diff --git a/.circleci/config.yml b/.circleci/config.yml index 018b8b18b4..b389cc3f6d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,104 +1,139 @@ --- version: 2.1 - orbs: - prometheus: prometheus/prometheus@0.4.0 - + prometheus: prometheus/prometheus@0.17.1 executors: # Whenever the Go version is updated here, .promu.yml and .promu-cgo.yml # should also be updated. golang: docker: - - image: circleci/golang:1.14 + - image: cimg/go:1.25 + arm: + docker: + - image: cimg/go:1.25 + resource_class: arm.medium jobs: test: executor: golang - steps: - - prometheus/setup_environment - - run: make - - prometheus/store_artifact: - file: node_exporter - - codespell: - docker: - - image: circleci/python - + - prometheus/setup_environment + - run: go mod download + - run: make + - prometheus/store_artifact: + file: node_exporter + test-arm: + executor: arm steps: - - checkout - - run: sudo pip install codespell - - run: codespell --skip=".git,./vendor,ttar,go.mod,go.sum,*pem,./collector/fixtures" -L uint,packages\',uptodate - + - prometheus/setup_environment + - run: uname -a + - run: make test-e2e + test_mixins: + executor: golang + steps: + - checkout + - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest + - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest + - run: go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest + - run: make promtool + - run: make -C docs/node-mixin clean + - run: make -C docs/node-mixin jb_install + - run: make -C docs/node-mixin + - run: git diff --exit-code build: machine: - image: ubuntu-1604:201903-01 + image: ubuntu-2404:current + parallelism: 3 + steps: + - prometheus/setup_environment + - run: docker run --privileged linuxkit/binfmt:af88a591f9cc896a52ce596b9cf7ca26a061ef97 + - run: promu crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX + - run: promu --config .promu-cgo.yml crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX + # sign the darwin build so it doesn't get SIGKILLed on start, see: https://github.com/prometheus/node_exporter/issues/2539 + - run: + command: | + if [[ -f "$(pwd)/.build/darwin-arm64/node_exporter" ]]; then + promu codesign "$(pwd)/.build/darwin-arm64/node_exporter" + fi + if [[ -f "$(pwd)/.build/darwin-amd64/node_exporter" ]]; then + promu codesign "$(pwd)/.build/darwin-amd64/node_exporter" + fi + - persist_to_workspace: + root: . + paths: + - .build + - store_artifacts: + path: .build + destination: /build + test_docker: + machine: + image: ubuntu-2404:current environment: - DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.14-base + DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.25-base REPO_PATH: github.com/prometheus/node_exporter - steps: - - checkout - - run: docker run --privileged linuxkit/binfmt:v0.7 - - run: make promu - - run: promu crossbuild - - run: promu --config .promu-cgo.yml crossbuild - - persist_to_workspace: - root: . - paths: - - .build - - store_artifacts: - path: .build - destination: /build - - run: - command: | - if [ -n "$CIRCLE_TAG" ]; then - make docker DOCKER_IMAGE_TAG=$CIRCLE_TAG - else - make docker - fi - - run: docker images - - run: docker run --rm -t -v "$(pwd):/app" "${DOCKER_TEST_IMAGE_NAME}" -i "${REPO_PATH}" -T - - run: - command: | - if [ -n "$CIRCLE_TAG" ]; then - make test-docker DOCKER_IMAGE_TAG=$CIRCLE_TAG - else - make test-docker - fi - + - prometheus/setup_environment + - attach_workspace: + at: . + - run: + command: | + if [ -n "$CIRCLE_TAG" ]; then + make docker DOCKER_IMAGE_TAG=$CIRCLE_TAG + else + make docker + fi + - run: docker images + - run: docker run --rm -t -v "$(pwd):/app" "${DOCKER_TEST_IMAGE_NAME}" -i "${REPO_PATH}" -T + - run: + command: | + if [ -n "$CIRCLE_TAG" ]; then + make test-docker DOCKER_IMAGE_TAG=$CIRCLE_TAG + else + make test-docker + fi workflows: version: 2 node_exporter: jobs: - - test: - filters: - tags: - only: /.*/ - - build: - filters: - tags: - only: /.*/ - - codespell: - filters: - tags: - only: /.*/ - - prometheus/publish_master: - context: org-context - requires: - - test - - build - filters: - branches: - only: master - - prometheus/publish_release: - context: org-context - requires: - - test - - build - filters: - tags: - only: /^v.*/ - branches: - ignore: /.*/ + - test: + filters: + tags: + only: /.*/ + - test-arm: + filters: + tags: + only: /.*/ + - build: + filters: + tags: + only: /.*/ + - test_docker: + requires: + - test + - build + filters: + tags: + only: /.*/ + - test_mixins: + filters: + tags: + only: /.*/ + - prometheus/publish_master: + context: org-context + requires: + - test + - build + filters: + branches: + only: master + - prometheus/publish_release: + context: org-context + requires: + - test + - build + filters: + tags: + only: /^v.*/ + branches: + ignore: /.*/ diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index c76c887bf9..ff07009480 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,14 +1,12 @@ +### node_exporter log output + ### Are you running node_exporter in Docker? diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..c0b195d20f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "monthly" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" diff --git a/.github/workflows/bsd.yml b/.github/workflows/bsd.yml new file mode 100644 index 0000000000..667ed0430a --- /dev/null +++ b/.github/workflows/bsd.yml @@ -0,0 +1,312 @@ +name: bsd + +on: + push: + branches: + - master + pull_request: + branches: + - master + +permissions: + contents: read + +env: + GNU_TAR_VERSION: "1.35" + GO_VERSION_DRAGONFLY: "1.25.1" + GO_VERSION_FREEBSD: "125" + GO_VERSION_NETBSD: "1.25.1" + GO_VERSION_OPENBSD: "1.25.1" + GO_VERSION_SOLARIS: "1.25.1" + +# To spin up one of the VMs below, see the "Debug Shell" section here: https://github.com/vmactions +jobs: + test_freebsd: + name: Run end-to-end tests on FreeBSD + runs-on: ubuntu-latest + steps: + - name: Checkout the repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - name: test-e2e + uses: vmactions/freebsd-vm@487ce35b96fae3e60d45b521735f5aa436ecfade # v1.2.4 + with: + release: "14.3" + copyback: false + envs: 'GO_VERSION_FREEBSD GNU_TAR_VERSION' + usesh: true + prepare: | + pkg update -f + pkg install -y \ + bash \ + git \ + gmake \ + gnugrep \ + go${GO_VERSION_FREEBSD} \ + gsed \ + gtar \ + python \ + wget + run: | + echo "::group::Setup prerequisites" + set -eu + mkdir bin + ln -s $(which go${GO_VERSION_FREEBSD}) $(pwd)/bin/go + ln -s $(which ggrep) $(pwd)/bin/grep + ln -s $(which gmake) $(pwd)/bin/make + ln -s $(which gsed) $(pwd)/bin/sed + ln -s $(which gtar) $(pwd)/bin/tar + export PATH=$(pwd)/bin:$PATH + echo "::endgroup::" + + echo "::group::Print environment information" + uname -a + echo "GOOS: $(go env GOOS)" + echo "GOARCH: $(go env GOARCH)" + echo "::endgroup::" + + echo "::group::Run End-to-End Tests" + git config --global --add safe.directory $(pwd) + gmake test-e2e + echo "::endgroup::" + + test_openbsd: + name: Run end-to-end tests on OpenBSD + runs-on: ubuntu-latest + steps: + - name: Checkout the repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - name: test-e2e + uses: vmactions/openbsd-vm@a254d784d6fad46e22ef73c6ecbb1dc310b04777 # v1.2.3 + with: + copyback: false + envs: 'GO_VERSION_OPENBSD GNU_TAR_VERSION' + usesh: true + prepare: | + pkg_add -u + pkg_add \ + bash \ + ggrep \ + git \ + gmake \ + go-${GO_VERSION_OPENBSD} \ + gsed \ + gtar-${GNU_TAR_VERSION}p1-static \ + python \ + wget + run: | + echo "::group::Setup prerequisites" + set -eu + mkdir bin + ln -s $(which ggrep) $(pwd)/bin/grep + ln -s $(which gmake) $(pwd)/bin/make + ln -s $(which gsed) $(pwd)/bin/sed + ln -s $(which gtar) $(pwd)/bin/tar + export PATH=$(pwd)/bin:$PATH + echo "::endgroup::" + + echo "::group::Print environment information" + uname -a + echo "GOOS: $(go env GOOS)" + echo "GOARCH: $(go env GOARCH)" + echo "::endgroup::" + + echo "::group::Run End-to-End Tests" + git config --global --add safe.directory $(pwd) + make test-e2e + echo "::endgroup::" + + test_netbsd: + name: Run end-to-end tests on NetBSD + runs-on: ubuntu-latest + steps: + - name: Checkout the repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - name: test-e2e + uses: vmactions/netbsd-vm@7aea50f9eb16df034be5d71eb81fea9804505c28 # v1.2.0 + with: + copyback: false + envs: 'GO_VERSION_NETBSD GNU_TAR_VERSION' + usesh: true + prepare: | + /usr/sbin/pkg_add -u \ + git \ + gmake \ + grep \ + gsed \ + gtar-base-${GNU_TAR_VERSION}\ + python312 \ + wget + run: | + echo "::group::Setup prerequisites" + set -eu + mkdir bin + GOGZ="go${GO_VERSION_NETBSD}.netbsd-amd64.tar.gz" + wget https://go.dev/dl/${GOGZ} + gtar xzf ${GOGZ} + ln -s $(pwd)/go/bin/go $(pwd)/bin/go + ln -s $(which ggrep) $(pwd)/bin/grep + ln -s $(which gmake) $(pwd)/bin/make + ln -s $(which gsed) $(pwd)/bin/sed + ln -s $(which gtar) $(pwd)/bin/tar + export PATH=$(pwd)/bin:$PATH + echo "::endgroup::" + + echo "::group::Print environment information" + uname -a + echo "GOOS: $(go env GOOS)" + echo "GOARCH: $(go env GOARCH)" + echo "::endgroup::" + + echo "::group::Run End-to-End Tests" + git config --global --add safe.directory $(pwd) + make test-e2e + echo "::endgroup::" + + test_dragonfly: + name: Run end-to-end tests on DragonFly + runs-on: ubuntu-latest + steps: + - name: Checkout the repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - name: test-e2e + uses: vmactions/dragonflybsd-vm@ff1f01c32b9e82f2ba388d0ff270442bcd6ceddc # v1.1.1 + with: + copyback: false + envs: 'GO_VERSION_DRAGONFLY' + usesh: true + prepare: | + pkg update && pkg upgrade -y + pkg install -y \ + bash \ + git \ + gmake \ + gnugrep \ + gsed \ + gtar \ + python3 \ + wget + run: | + echo "::group::Setup prerequisites" + set -eu + mkdir bin + GOGZ="go${GO_VERSION_DRAGONFLY}.dragonfly-amd64.tar.gz" + wget https://go.dev/dl/${GOGZ} + gtar xzf ${GOGZ} + ln -s $(pwd)/go/bin/go $(pwd)/bin/go + ln -s $(which ggrep) $(pwd)/bin/grep + ln -s $(which gmake) $(pwd)/bin/make + ln -s $(which gsed) $(pwd)/bin/sed + ln -s $(which gtar) $(pwd)/bin/tar + ln -s $(which python3) $(pwd)/bin/python + export PATH=$(pwd)/bin:$PATH + echo "::endgroup::" + + echo "::group::Print environment information" + uname -a + echo "GOOS: $(go env GOOS)" + echo "GOARCH: $(go env GOARCH)" + echo "::endgroup::" + + echo "::group::Run End-to-End Tests" + git config --global --add safe.directory $(pwd) + gmake test-e2e + echo "::endgroup::" + + test_solaris: + name: Run end-to-end tests on Solaris + runs-on: ubuntu-latest + steps: + - name: Checkout the repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - name: test-e2e + uses: vmactions/solaris-vm@58cbd70c6e051860f9b8f65908cc582938fbbdba # v1.1.5 + with: + copyback: false + envs: 'GO_VERSION_SOLARIS' + usesh: true + prepare: | + pkg install \ + bash \ + curl \ + gcc \ + git \ + gnu-grep \ + gnu-make \ + gnu-sed \ + gnu-tar + run: | + echo "::group::Setup prerequisites" + set -eu + mkdir bin + GOGZ="go${GO_VERSION_SOLARIS}.solaris-amd64.tar.gz" + wget https://go.dev/dl/${GOGZ} + gtar xzf ${GOGZ} + ln -s $(pwd)/go/bin/go $(pwd)/bin/go + ln -s $(which ggrep) $(pwd)/bin/grep + ln -s $(which gmake) $(pwd)/bin/make + ln -s $(which gsed) $(pwd)/bin/sed + ln -s $(which gtar) $(pwd)/bin/tar + export PATH=$(pwd)/bin:$PATH + + echo ">> building promu as it is not shipped for Solaris" + git clone https://github.com/prometheus/promu.git + cd promu + go build . + cd - + + mkdir -p $(go env GOPATH)/bin + ln -s $(pwd)/promu/promu $(go env GOPATH)/bin/promu + export PATH=$(go env GOPATH)/bin:$PATH + echo "::endgroup::" + + echo "::group::Print environment information" + uname -a + echo "GOOS: $(go env GOOS)" + echo "GOARCH: $(go env GOARCH)" + echo "::endgroup::" + + echo "::group::Run End-to-End Tests" + git config --global --add safe.directory $(pwd) + make test-e2e + echo "::endgroup::" + + test_macos: + name: Run end-to-end tests on Darwin/MacOS + runs-on: macos-latest + steps: + - name: Checkout the repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - name: Install dependencies + run: | + brew install \ + bash \ + curl \ + git \ + grep \ + make \ + gnu-sed \ + gnu-tar \ + go \ + python3 + - name: test-e2e + run: | + echo "::group::Setup prerequisites" + set -eu + mkdir bin + ln -s $(which ggrep) $(pwd)/bin/grep + ln -s $(which gmake) $(pwd)/bin/make + ln -s $(which gsed) $(pwd)/bin/sed + ln -s $(which gtar) $(pwd)/bin/tar + export PATH=$(pwd)/bin:$PATH + echo "::endgroup::" + + echo "::group::Print environment information" + uname -a + echo "GOOS: $(go env GOOS)" + echo "GOARCH: $(go env GOARCH)" + echo "::endgroup::" + + echo "::group::Run End-to-End Tests" + git config --global --add safe.directory $(pwd) + make test-e2e E2E_EXTRA_FLAGS='--collector.diskstats.device-include=disk[04]' + echo "::endgroup::" diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml new file mode 100644 index 0000000000..7de8bb8da7 --- /dev/null +++ b/.github/workflows/container_description.yml @@ -0,0 +1,61 @@ +--- +name: Push README to Docker Hub +on: + push: + paths: + - "README.md" + - "README-containers.md" + - ".github/workflows/container_description.yml" + branches: [ main, master ] + +permissions: + contents: read + +jobs: + PushDockerHubReadme: + runs-on: ubuntu-latest + name: Push README to Docker Hub + if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. + steps: + - name: git checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - name: Set docker hub repo name + run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV + - name: Push README to Dockerhub + uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 + env: + DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }} + DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }} + with: + destination_container_repo: ${{ env.DOCKER_REPO_NAME }} + provider: dockerhub + short_description: ${{ env.DOCKER_REPO_NAME }} + # Empty string results in README-containers.md being pushed if it + # exists. Otherwise, README.md is pushed. + readme_file: '' + + PushQuayIoReadme: + runs-on: ubuntu-latest + name: Push README to quay.io + if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. + steps: + - name: git checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - name: Set quay.io org name + run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV + - name: Set quay.io repo name + run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV + - name: Push README to quay.io + uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 + env: + DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }} + with: + destination_container_repo: ${{ env.DOCKER_REPO_NAME }} + provider: quay + # Empty string results in README-containers.md being pushed if it + # exists. Otherwise, README.md is pushed. + readme_file: '' diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000..75f886d546 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,44 @@ +--- +# This action is synced from https://github.com/prometheus/prometheus +name: golangci-lint +on: + push: + paths: + - "go.sum" + - "go.mod" + - "**.go" + - "scripts/errcheck_excludes.txt" + - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" + pull_request: + +permissions: # added using https://github.com/step-security/secure-repo + contents: read + +jobs: + golangci: + permissions: + contents: read # for actions/checkout to fetch code + pull-requests: read # for golangci/golangci-lint-action to fetch pull requests + name: lint + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - name: Install Go + uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + with: + go-version: 1.25.x + - name: Install snmp_exporter/generator dependencies + run: sudo apt-get update && sudo apt-get -y install libsnmp-dev + if: github.repository == 'prometheus/snmp_exporter' + - name: Get golangci-lint version + id: golangci-lint-version + run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT + - name: Lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + args: --verbose + version: ${{ steps.golangci-lint-version.outputs.version }} diff --git a/.gitignore b/.gitignore index 6f86022719..c1318db36f 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,8 @@ dependencies-stamp /.release /.tarballs +tools/tools + # Intellij /.idea @@ -34,3 +36,6 @@ dependencies-stamp # Test files extracted from ttar collector/fixtures/sys/ +collector/fixtures/udev/ + +/vendor diff --git a/.golangci.yml b/.golangci.yml index c53f64cc0e..16bec0de7f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,16 +1,44 @@ -run: - modules-download-mode: vendor - +version: "2" linters: enable: - - golint - -issues: - exclude-rules: - - path: _test.go - linters: - - errcheck - -linters-settings: - errcheck: - exclude: scripts/errcheck_excludes.txt + - depguard + - misspell + - revive + settings: + depguard: + rules: + no_exec_policy: + files: + - '!$test' + deny: + - pkg: os/exec + desc: Using os/exec to run sub processes it not allowed by policy + errcheck: + # Used in HTTP handlers, any error is handled by the server itself. + exclude-functions: + - (net/http.ResponseWriter).Write + revive: + rules: + - name: unused-parameter + severity: warning + disabled: true + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - errcheck + path: _test.go +formatters: + enable: + - goimports + exclusions: + generated: lax + settings: + goimports: + local-prefixes: + - github.com/prometheus/node_exporter diff --git a/.promu-cgo.yml b/.promu-cgo.yml index 00bfed4540..aa40105c4f 100644 --- a/.promu-cgo.yml +++ b/.promu-cgo.yml @@ -1,14 +1,13 @@ go: # Whenever the Go version is updated here, .circle/config.yml and # .promu.yml should also be updated. - version: 1.14 + version: 1.25 cgo: true repository: path: github.com/prometheus/node_exporter build: binaries: - name: node_exporter - flags: -mod=vendor -a -tags 'netgo static_build' ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} @@ -22,6 +21,6 @@ tarball: crossbuild: platforms: - darwin/amd64 - - darwin/386 + - darwin/arm64 - netbsd/amd64 - netbsd/386 diff --git a/.promu.yml b/.promu.yml index 5df5e7a754..a7076d5ba2 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,13 +1,12 @@ go: # Whenever the Go version is updated here, .circle/config.yml and # .promu-cgo.yml should also be updated. - version: 1.14 + version: 1.25 repository: path: github.com/prometheus/node_exporter build: binaries: - name: node_exporter - flags: -mod=vendor -a -tags 'netgo static_build' ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} @@ -20,14 +19,5 @@ tarball: - NOTICE crossbuild: platforms: - - linux/amd64 - - linux/386 - - linux/arm - - linux/arm64 - - linux/mips - - linux/mipsle - - linux/mips64 - - linux/mips64le - - linux/ppc64 - - linux/ppc64le - - linux/s390x + - linux + - openbsd/amd64 diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000000..8d09c375fd --- /dev/null +++ b/.yamllint @@ -0,0 +1,25 @@ +--- +extends: default +ignore: | + **/node_modules + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + commas: disable + comments: disable + comments-indentation: disable + document-start: disable + indentation: + spaces: consistent + indent-sequences: consistent + key-duplicates: + ignore: | + config/testdata/section_key_dup.bad.yml + line-length: disable + truthy: + check-keys: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d45de48e9..e4e07db753 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,340 @@ ## master / unreleased -* [CHANGE] Improve filter flag names. * [CHANGE] * [FEATURE] -* [ENHANCEMENT] Include TCP OutRsts in netstat metrics +* [ENHANCEMENT] * [BUGFIX] +## 1.10.2 / 2025-10-25 + +* [BUGFIX] meminfo: Fix typo in Zswap metric name #3455 + +## 1.10.1 / 2025-10-25 + +* [BUGFIX] filesystem: Fix mount points being collected multiple times #3376 +* [BUGFIX] filesystem: Refactor mountinfo parsing #3452 +* [BUGFIX] meminfo: Add Zswap/Zswapped metrics #3453 + +## 1.10.0 / 2025-10-24 + +* [CHANGE] mdadm: Use sysfs for RAID metrics #3031 +* [CHANGE] filesystem: Add erofs in default excluded fs #3313 +* [CHANGE] tcpstat: Use std lib binary.NativeEndian #3386 +* [FEATURE] pcidevice: Add new collector for PCIe devices #3339 +* [FEATURE] AIX: Add more metrics #3338 +* [FEATURE] systemd: Add Virtualization metrics #3254 +* [FEATURE] swaps: Add new collector #3428 +* [ENHANCEMENT] wifi: Add packet received and transmitted metrics #3382 +* [ENHANCEMENT] filesystem: Take super options into account for read-only #3387 +* [ENHANCEMENT] pcidevice: Add additional metrics #3425 +* [ENHANCEMENT] perf: Add `tlb_data` metrics #3436 +* [BUGFIX] interrupts: Fix OpenBSD interrupt device parsing #3288 +* [BUGFIX] diskstats: Simplify condition #3290 +* [BUGFIX] thermal: Sanitize darwin thermal strings #3294 +* [BUGFIX] filesystem: Fix Darwin collector cgo memory leak #3315 +* [BUGFIX] cpufreq: Fix: collector enable #3318 +* [BUGFIX] ethtool: Fix returning 0 for sanitized metrics #3335 +* [BUGFIX] netdev: Fix Darwin netdev i/o bytes metric #3336 +* [BUGFIX] systemd: Fix logging race #3364 +* [BUGFIX] filesystem: Fix duplicate Darwin CGO import #3391 + +## 1.9.1 / 2025-04-01 + +* [BUGFIX] pressure: Fix missing IRQ on older kernels #3263 +* [BUGFIX] Fix Darwin memory leak #3277 + +## 1.9.0 / 2025-02-17 + +* [CHANGE] meminfo: Convert linux implementation to use procfs lib #3049 +* [CHANGE] Update logging to use Go log/slog #3097 +* [FEATURE] filesystem: Add `node_filesystem_mount_info` metric #2970 +* [FEATURE] btrfs: Add metrics for commit statistics #3010 +* [FEATURE] interrupts: Add collector include/exclude filtering #3028 +* [FEATURE] interrupts: Add "exclude zeros" filtering #3028 +* [FEATURE] slabinfo: Add filters for slab name. #3041 +* [FEATURE] pressure: add IRQ PSI metrics #3048 +* [FEATURE] hwmon: Add include and exclude filter for sensors #3072 +* [FEATURE] filesystem: Add NetBSD support #3082 +* [FEATURE] netdev: Add ifAlias label #3087 +* [FEATURE] hwmon: Add Support for GPU Clock Frequencies #3093 +* [FEATURE] Add `exclude[]` URL parameter #3116 +* [FEATURE] Add AIX support #3136 +* [FEATURE] filesystem: Add fs-types/mount-points include flags #3171 +* [FEATURE] netstat: Add collector for tcp packet counters for FreeBSD. #3177 +* [ENHANCEMENT] ethtool: Add logging for filtering flags #2979 +* [ENHANCEMENT] netstat: Add TCPRcvQDrop to default metrics #3021 +* [ENHANCEMENT] diskstats: Add block device rotational #3022 +* [ENHANCEMENT] cpu: Support CPU online status #3032 +* [ENHANCEMENT] arp: optimize interface name resolution #3133 +* [ENHANCEMENT] textfile: Allow specifiying multiple directory globs #3135 +* [ENHANCEMENT] filesystem: Add reporting of purgeable space on MacOS #3206 +* [ENHANCEMENT] ethtool: Skip full scan of NetClass directories #3239 +* [BUGFIX] zfs: Prevent `procfs` integer underflow #2961 +* [BUGFIX] pressure: Fix collection on systems that do not expose a full CPU stat #3054 +* [BUGFIX] cpu: Fix FreeBSD 32-bit host support and plug memory leak #3083 +* [BUGFIX] hwmon: Add safety check to hwmon read #3134 +* [BUGFIX] zfs: Allow space in dataset name #3186 + +## 1.8.1 / 2024-05-16 + +* [BUGFIX] Fix CPU seconds on Solaris #2963 +* [BUGFIX] Sign Darwin/MacOS binaries #3008 +* [BUGFIX] Fix pressure collector nil reference #3016 + +## 1.8.0 / 2024-04-24 + +* [CHANGE] exec_bsd: Fix labels for `vm.stats.sys.v_syscall` sysctl #2895 +* [CHANGE] diskstats: Ignore zram devices on linux systems #2898 +* [CHANGE] textfile: Avoid inconsistent help-texts #2962 +* [CHANGE] os: Removed caching of modtime/filename of os-release file #2987 +* [FEATURE] xfrm: Add new collector #2866 +* [FEATURE] watchdog: Add new collector #2880 +* [ENHANCEMENT] cpu_vulnerabilities: Add mitigation information label #2806 +* [ENHANCEMENT] nfsd: Handle new `wdeleg_getattr` attribute #2810 +* [ENHANCEMENT] netstat: Add TCPOFOQueue to default netstat metrics #2867 +* [ENHANCEMENT] filesystem: surface device errors #2923 +* [ENHANCEMENT] os: Add support end parsing #2982 +* [ENHANCEMENT] zfs: Log mib when sysctl read fails on FreeBSD #2975 +* [ENHANCEMENT] fibre_channel: update procfs to take into account optional attributes #2933 +* [BUGFIX] cpu: Fix debug log in cpu collector #2857 +* [BUGFIX] hwmon: Fix hwmon nil ptr #2873 +* [BUGFIX] hwmon: Fix hwmon error capture #2915 +* [BUGFIX] zfs: Revert "Add ZFS freebsd per dataset stats #2925 +* [BUGFIX] ethtool: Sanitize ethtool metric name keys #2940 +* [BUGFIX] fix: data race of NetClassCollector metrics initialization #2995 + +## 1.7.0 / 2023-11-11 + +* [FEATURE] Add ZFS freebsd per dataset stats #2753 +* [FEATURE] Add cpu vulnerabilities reporting from sysfs #2721 +* [ENHANCEMENT] Parallelize stat calls in Linux filesystem collector #1772 +* [ENHANCEMENT] Add missing linkspeeds to ethtool collector 2711 +* [ENHANCEMENT] Add CPU MHz as the value for `node_cpu_info` metric #2778 +* [ENHANCEMENT] Improve qdisc collector performance #2779 +* [ENHANCEMENT] Add include and exclude filter for hwmon collector #2699 +* [ENHANCEMENT] Optionally fetch ARP stats via rtnetlink instead of procfs #2777 +* [BUFFIX] Fix ZFS arcstats on FreeBSD 14.0+ 2754 +* [BUGFIX] Fallback to 32-bit stats in netdev #2757 +* [BUGFIX] Close btrfs.FS handle after use #2780 +* [BUGFIX] Move RO status before error return #2807 +* [BUFFIX] Fix `promhttp_metric_handler_errors_total` being always active #2808 +* [BUGFIX] Fix nfsd v4 index miss #2824 + +## 1.6.1 / 2023-06-17 + +Rebuild with latest Go compiler bugfix release. + +## 1.6.0 / 2023-05-27 + +* [CHANGE] Fix cpustat when some cpus are offline #2318 +* [CHANGE] Remove metrics of offline CPUs in CPU collector #2605 +* [CHANGE] Deprecate ntp collector #2603 +* [CHANGE] Remove bcache `cache_readaheads_totals` metrics #2583 +* [CHANGE] Deprecate supervisord collector #2685 +* [FEATURE] Enable uname collector on NetBSD #2559 +* [FEATURE] NetBSD support for the meminfo collector #2570 +* [FEATURE] NetBSD support for CPU collector #2626 +* [FEATURE] Add FreeBSD collector for netisr subsystem #2668 +* [FEATURE] Add softirqs collector #2669 +* [ENHANCEMENT] Add suspended as a `node_zfs_zpool_state` #2449 +* [ENHANCEMENT] Add administrative state of Linux network interfaces #2515 +* [ENHANCEMENT] Log current value of GOMAXPROCS #2537 +* [ENHANCEMENT] Add profiler options for perf collector #2542 +* [ENHANCEMENT] Allow root path as metrics path #2590 +* [ENHANCEMENT] Add cpu frequency governor metrics #2569 +* [ENHANCEMENT] Add new landing page #2622 +* [ENHANCEMENT] Reduce privileges needed for btrfs device stats #2634 +* [ENHANCEMENT] Add ZFS `memory_available_bytes` #2687 +* [ENHANCEMENT] Use `SCSI_IDENT_SERIAL` as serial in diskstats #2612 +* [ENHANCEMENT] Read missing from netlink netclass attributes from sysfs #2669 +* [BUGFIX] perf: fixes for automatically detecting the correct tracefs mountpoints #2553 +* [BUGFIX] Fix `thermal_zone` collector noise #2554 +* [BUGFIX] Fix a problem fetching the user wire count on FreeBSD #2584 +* [BUGFIX] interrupts: Fix fields on linux aarch64 #2631 +* [BUGFIX] Remove metrics of offline CPUs in CPU collector #2605 +* [BUGFIX] Fix OpenBSD filesystem collector string parsing #2637 +* [BUGFIX] Fix bad reporting of `node_cpu_seconds_total` in OpenBSD #2663 + +## 1.5.0 / 2022-11-29 + +NOTE: This changes the Go runtime "GOMAXPROCS" to 1. This is done to limit the + concurrency of the exporter to 1 CPU thread at a time in order to avoid a + race condition problem in the Linux kernel (#2500) and parallel IO issues + on nodes with high numbers of CPUs/CPU threads (#1880). + +NOTE: A command line arg has been changed from `--web.config` to `--web.config.file`. + +* [CHANGE] Default GOMAXPROCS to 1 #2530 +* [FEATURE] Add multiple listeners and systemd socket listener activation #2393 +* [ENHANCEMENT] Add RTNL version of netclass collector #2492, #2528 +* [BUGFIX] Fix diskstats exclude flags #2487 +* [BUGFIX] Bump go/x/crypt and go/x/net #2488 +* [BUGFIX] Fix hwmon label sanitizer #2504 +* [BUGFIX] Use native endianness when encoding InetDiagMsg #2508 +* [BUGFIX] Fix btrfs device stats always being zero #2516 +* [BUGFIX] Security: Update exporter-toolkit (CVE-2022-46146) #2531 + +## 1.4.1 / 2022-11-29 + +* [BUGFIX] Fix diskstats exclude flags #2487 +* [BUGFIX] Security: Update go/x/crypto and go/x/net (CVE-2022-27191 CVE-2022-27664) #2488 +* [BUGFIX] Security: Update exporter-toolkit (CVE-2022-46146) #2531 + +## 1.4.0 / 2022-09-24 + +* [CHANGE] Merge metrics descriptions in textfile collector #2475 +* [FEATURE] [node-mixin] Add darwin dashboard to mixin #2351 +* [FEATURE] Add "isolated" metric on cpu collector on linux #2251 +* [FEATURE] Add cgroup summary collector #2408 +* [FEATURE] Add selinux collector #2205 +* [FEATURE] Add slab info collector #2376 +* [FEATURE] Add sysctl collector #2425 +* [FEATURE] Also track the CPU Spin time for OpenBSD systems #1971 +* [FEATURE] Add support for MacOS version #2471 +* [ENHANCEMENT] [node-mixin] Add missing selectors #2426 +* [ENHANCEMENT] [node-mixin] Change current datasource to grafana's default #2281 +* [ENHANCEMENT] [node-mixin] Change disk graph to disk table #2364 +* [ENHANCEMENT] [node-mixin] Change io time units to %util #2375 +* [ENHANCEMENT] Ad user_wired_bytes and laundry_bytes on *bsd #2266 +* [ENHANCEMENT] Add additional vm_stat memory metrics for darwin #2240 +* [ENHANCEMENT] Add device filter flags to arp collector #2254 +* [ENHANCEMENT] Add diskstats include and exclude device flags #2417 +* [ENHANCEMENT] Add node_softirqs_total metric #2221 +* [ENHANCEMENT] Add rapl zone name label option #2401 +* [ENHANCEMENT] Add slabinfo collector #1799 +* [ENHANCEMENT] Allow user to select port on NTP server to query #2270 +* [ENHANCEMENT] collector/diskstats: Add labels and metrics from udev #2404 +* [ENHANCEMENT] Enable builds against older macOS SDK #2327 +* [ENHANCEMENT] qdisk-linux: Add exclude and include flags for interface name #2432 +* [ENHANCEMENT] systemd: Expose systemd minor version #2282 +* [ENHANCEMENT] Use netlink for tcpstat collector #2322 +* [ENHANCEMENT] Use netlink to get netdev stats #2074 +* [ENHANCEMENT] Add additional perf counters for stalled frontend/backend cycles #2191 +* [ENHANCEMENT] Add btrfs device error stats #2193 +* [BUGFIX] [node-mixin] Fix fsSpaceAvailableCriticalThreshold and fsSpaceAvailableWarning #2352 +* [BUGFIX] Fix concurrency issue in ethtool collector #2289 +* [BUGFIX] Fix concurrency issue in netdev collector #2267 +* [BUGFIX] Fix diskstat reads and write metrics for disks with different sector sizes #2311 +* [BUGFIX] Fix iostat on macos broken by deprecation warning #2292 +* [BUGFIX] Fix NodeFileDescriptorLimit alerts #2340 +* [BUGFIX] Sanitize rapl zone names #2299 +* [BUGFIX] Add file descriptor close safely in test #2447 +* [BUGFIX] Fix race condition in os_release.go #2454 +* [BUGFIX] Skip ZFS IO metrics if their paths are missing #2451 + +## 1.3.1 / 2021-12-01 + +* [BUGFIX] Handle nil CPU thermal power status on M1 #2218 +* [BUGFIX] bsd: Ignore filesystems flagged as MNT_IGNORE. #2227 +* [BUGFIX] Sanitize UTF-8 in dmi collector #2229 + +## 1.3.0 / 2021-10-20 + +NOTE: In order to support globs in the textfile collector path, filenames exposed by + `node_textfile_mtime_seconds` now contain the full path name. + +* [CHANGE] Add path label to rapl collector #2146 +* [CHANGE] Exclude filesystems under /run/credentials #2157 +* [CHANGE] Add TCPTimeouts to netstat default filter #2189 +* [FEATURE] Add lnstat collector for metrics from /proc/net/stat/ #1771 +* [FEATURE] Add darwin powersupply collector #1777 +* [FEATURE] Add support for monitoring GPUs on Linux #1998 +* [FEATURE] Add Darwin thermal collector #2032 +* [FEATURE] Add os release collector #2094 +* [FEATURE] Add netdev.address-info collector #2105 +* [FEATURE] Add clocksource metrics to time collector #2197 +* [ENHANCEMENT] Support glob textfile collector directories #1985 +* [ENHANCEMENT] ethtool: Expose node_ethtool_info metric #2080 +* [ENHANCEMENT] Use include/exclude flags for ethtool filtering #2165 +* [ENHANCEMENT] Add flag to disable guest CPU metrics #2123 +* [ENHANCEMENT] Add DMI collector #2131 +* [ENHANCEMENT] Add threads metrics to processes collector #2164 +* [ENHANCEMENT] Reduce timer GC delays in the Linux filesystem collector #2169 +* [ENHANCEMENT] Add TCPTimeouts to netstat default filter #2189 +* [ENHANCEMENT] Use SysctlTimeval for boottime collector on BSD #2208 +* [BUGFIX] ethtool: Sanitize metric names #2093 +* [BUGFIX] Fix ethtool collector for multiple interfaces #2126 +* [BUGFIX] Fix possible panic on macOS #2133 +* [BUGFIX] Collect flag_info and bug_info only for one core #2156 +* [BUGFIX] Prevent duplicate ethtool metric names #2187 + +## 1.2.2 / 2021-08-06 + +* [BUGFIX] Fix processes collector long int parsing #2112 + +## 1.2.1 / 2021-07-23 + +* [BUGFIX] Fix zoneinfo parsing prometheus/procfs#386 +* [BUGFIX] Fix nvme collector log noise #2091 +* [BUGFIX] Fix rapl collector log noise #2092 + +## 1.2.0 / 2021-07-15 + +NOTE: Ignoring invalid network speed will be the default in 2.x +NOTE: Filesystem collector flags have been renamed. `--collector.filesystem.ignored-mount-points` is now `--collector.filesystem.mount-points-exclude` and `--collector.filesystem.ignored-fs-types` is now `--collector.filesystem.fs-types-exclude`. The old flags will be removed in 2.x. + +* [CHANGE] Rename filesystem collector flags to match other collectors #2012 +* [CHANGE] Make node_exporter print usage to STDOUT #2039 +* [FEATURE] Add conntrack statistics metrics #1155 +* [FEATURE] Add ethtool stats collector #1832 +* [FEATURE] Add flag to ignore network speed if it is unknown #1989 +* [FEATURE] Add tapestats collector for Linux #2044 +* [FEATURE] Add nvme collector #2062 +* [ENHANCEMENT] Add ErrorLog plumbing to promhttp #1887 +* [ENHANCEMENT] Add more Infiniband counters #2019 +* [ENHANCEMENT] netclass: retrieve interface names and filter before parsing #2033 +* [ENHANCEMENT] Add time zone offset metric #2060 +* [BUGFIX] Handle errors from disabled PSI subsystem #1983 +* [BUGFIX] Fix panic when using backwards compatible flags #2000 +* [BUGFIX] Fix wrong value for OpenBSD memory buffer cache #2015 +* [BUGFIX] Only initiate collectors once #2048 +* [BUGFIX] Handle small backwards jumps in CPU idle #2067 + +## 1.1.2 / 2021-03-05 + +* [BUGFIX] Handle errors from disabled PSI subsystem #1983 +* [BUGFIX] Sanitize strings from /sys/class/power_supply #1984 +* [BUGFIX] Silence missing netclass errors #1986 + +## 1.1.1 / 2021-02-12 + +* [BUGFIX] Fix ineffassign issue #1957 +* [BUGFIX] Fix some noisy log lines #1962 + +## 1.1.0 / 2021-02-05 + +NOTE: We have improved some of the flag naming conventions (PR #1743). The old names are + deprecated and will be removed in 2.0. They will continue to work for backwards + compatibility. + +* [CHANGE] Improve filter flag names #1743 +* [CHANGE] Add btrfs and powersupplyclass to list of exporters enabled by default #1897 +* [FEATURE] Add fibre channel collector #1786 +* [FEATURE] Expose cpu bugs and flags as info metrics. #1788 +* [FEATURE] Add network_route collector #1811 +* [FEATURE] Add zoneinfo collector #1922 +* [ENHANCEMENT] Add more InfiniBand counters #1694 +* [ENHANCEMENT] Add flag to aggr ipvs metrics to avoid high cardinality metrics #1709 +* [ENHANCEMENT] Adding backlog/current queue length to qdisc collector #1732 +* [ENHANCEMENT] Include TCP OutRsts in netstat metrics #1733 +* [ENHANCEMENT] Add pool size to entropy collector #1753 +* [ENHANCEMENT] Remove CGO dependencies for OpenBSD amd64 #1774 +* [ENHANCEMENT] bcache: add writeback_rate_debug stats #1658 +* [ENHANCEMENT] Add check state for mdadm arrays via node_md_state metric #1810 +* [ENHANCEMENT] Expose XFS inode statistics #1870 +* [ENHANCEMENT] Expose zfs zpool state #1878 +* [ENHANCEMENT] Added an ability to pass collector.supervisord.url via SUPERVISORD_URL environment variable #1947 +* [BUGFIX] filesystem_freebsd: Fix label values #1728 +* [BUGFIX] Fix various procfs parsing errors #1735 +* [BUGFIX] Handle no data from powersupplyclass #1747 +* [BUGFIX] udp_queues_linux.go: change upd to udp in two error strings #1769 +* [BUGFIX] Fix node_scrape_collector_success behaviour #1816 +* [BUGFIX] Fix NodeRAIDDegraded to not use a string rule expressions #1827 +* [BUGFIX] Fix node_md_disks state label from fail to failed #1862 +* [BUGFIX] Handle EPERM for syscall in timex collector #1938 +* [BUGFIX] bcache: fix typo in a metric name #1943 +* [BUGFIX] Fix XFS read/write stats (https://github.com/prometheus/procfs/pull/343) + ## 1.0.1 / 2020-06-15 * [BUGFIX] filesystem_freebsd: Fix label values #1728 @@ -21,7 +350,7 @@ * The label named `state` on `node_systemd_service_restart_total` metrics was changed to `name` to better describe the metric. #1393 * Refactoring of the mdadm collector changes several metrics - `node_md_disks_active` is removed - - `node_md_disks` now has a `state` label for "fail", "spare", "active" disks. + - `node_md_disks` now has a `state` label for "failed", "spare", "active" disks. - `node_md_is_active` is replaced by `node_md_state` with a state set of "active", "inactive", "recovering", "resync". * Additional label `mountaddr` added to NFS device metrics to distinguish mounts from the same URL, but different IP addresses. #1417 * Metrics node_cpu_scaling_frequency_min_hrts and node_cpu_scaling_frequency_max_hrts of the cpufreq collector were renamed to node_cpu_scaling_frequency_min_hertz and node_cpu_scaling_frequency_max_hertz. #1510 @@ -207,7 +536,7 @@ Other breaking changes: * [CHANGE] Greatly reduce the metrics vmstat returns by default. #874 * [CHANGE] Greatly trim what netstat collector exposes by default #876 * [CHANGE] Drop `exec_` prefix and move `node_boot_time_seconds` from `exec` to new `boottime` collector and enable for Darwin/Dragonfly/FreeBSD/NetBSD/OpenBSD. #839, #901 -* [CHANGE] Remove depreated gmond collector #852 +* [CHANGE] Remove deprecated gmond collector #852 * [CHANGE] align Darwin disk stat names with Linux #930 * [FEATURE] Add `collect[]` parameter #699 * [FEATURE] Add text collector conversion for ipmitool output. #746 @@ -252,7 +581,7 @@ Other breaking changes: **Breaking changes** This release contains major breaking changes to flag handling. -* The flag library has been changed, all flags now require double-dashs. (`-foo` becomes `--foo`). +* The flag library has been changed, all flags now require double-dashes. (`-foo` becomes `--foo`). * The collector selection flag has been replaced by individual boolean flags. * The `-collector.procfs` and `-collector.sysfs` flags have been renamed to `--path.procfs` and `--path.sysfs` respectively. @@ -268,14 +597,14 @@ Windows support is now removed, the [wmi_exporter](https://github.com/martinlind * [CHANGE] Switch to kingpin flags #639 * [CHANGE] Replace --collectors.enabled with per-collector flags #640 * [FEATURE] Add ARP collector for Linux #540 -* [FEATURE] Add XFS colector for Linux #568, #575 +* [FEATURE] Add XFS collector for Linux #568, #575 * [FEATURE] Add qdisc collector for Linux #580 * [FEATURE] Add cpufreq stats for Linux #548 * [FEATURE] Add diskstats for Darwin #593 * [FEATURE] Add bcache collector for Linux #597 * [FEATURE] Add parsing /proc/net/snmp6 file for Linux #615 * [FEATURE] Add timex collector for Linux #664 -* [ENHANCEMENT] Include overal health status in smartmon.sh example script #546 +* [ENHANCEMENT] Include overall health status in smartmon.sh example script #546 * [ENHANCEMENT] Include `guest_nice` in CPU collector #554 * [ENHANCEMENT] Add exec_boot_time for freebsd, dragonfly #550 * [ENHANCEMENT] Get full resolution for node_time #555 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..d325872bdf --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/Dockerfile b/Dockerfile index 57844bd8f8..4ba28f97d6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ ARG ARCH="amd64" ARG OS="linux" -FROM quay.io/prometheus/busybox-${OS}-${ARCH}:glibc +FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest LABEL maintainer="The Prometheus Authors " ARG ARCH="amd64" diff --git a/Makefile b/Makefile index 0efbe25504..7ba6bf2aa1 100644 --- a/Makefile +++ b/Makefile @@ -19,9 +19,10 @@ DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x include Makefile.common -PROMTOOL_VERSION ?= 2.18.1 +PROMTOOL_VERSION ?= 2.30.0 PROMTOOL_URL ?= https://github.com/prometheus/prometheus/releases/download/v$(PROMTOOL_VERSION)/prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMTOOL ?= $(FIRST_GOPATH)/bin/promtool +E2E_EXTRA_FLAGS ?= DOCKER_IMAGE_NAME ?= node-exporter MACH ?= $(shell uname -m) @@ -45,18 +46,28 @@ else PROMU_CONF ?= .promu-cgo.yml endif else - PROMU_CONF ?= .promu-cgo.yml + # Do not use CGO for openbsd/amd64 builds + ifeq ($(GOOS), openbsd) + ifeq ($(GOARCH), amd64) + PROMU_CONF ?= .promu.yml + else + PROMU_CONF ?= .promu-cgo.yml + endif + else + PROMU_CONF ?= .promu-cgo.yml + endif endif endif PROMU := $(FIRST_GOPATH)/bin/promu --config $(PROMU_CONF) +e2e-out-64k-page = collector/fixtures/e2e-64k-page-output.txt e2e-out = collector/fixtures/e2e-output.txt ifeq ($(MACH), ppc64le) - e2e-out = collector/fixtures/e2e-64k-page-output.txt + e2e-out = $(e2e-out-64k-page) endif ifeq ($(MACH), aarch64) - e2e-out = collector/fixtures/e2e-64k-page-output.txt + e2e-out = $(e2e-out-64k-page) endif # 64bit -> 32bit mapping for cross-checking. At least for amd64/386, the 64bit CPU can execute 32bit code but not the other way around, so we don't support cross-testing upwards. @@ -78,12 +89,12 @@ $(eval $(call goarch_pair,mips64el,mipsel)) all:: vet checkmetrics checkrules common-all $(cross-test) $(test-e2e) .PHONY: test -test: collector/fixtures/sys/.unpacked +test: collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked @echo ">> running tests" $(GO) test -short $(test-flags) $(pkgs) .PHONY: test-32bit -test-32bit: collector/fixtures/sys/.unpacked +test-32bit: collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked @echo ">> running tests in 32-bit mode" @env GOARCH=$(GOARCH_CROSS) $(GO) test $(pkgs) @@ -93,20 +104,27 @@ skip-test-32bit: %/.unpacked: %.ttar @echo ">> extracting fixtures" - if [ -d $(dir $@) ] ; then rm -r $(dir $@) ; fi + if [ -d $(dir $@) ] ; then rm -rf $(dir $@) ; fi ./ttar -C $(dir $*) -x -f $*.ttar touch $@ update_fixtures: rm -vf collector/fixtures/sys/.unpacked ./ttar -C collector/fixtures -c -f collector/fixtures/sys.ttar sys + rm -vf collector/fixtures/udev/.unpacked + ./ttar -C collector/fixtures -c -f collector/fixtures/udev.ttar udev + +.PHONY: tools +tools: + @rm ./tools/tools >/dev/null 2>&1 || true + @$(GO) build -o tools/tools ./tools/... .PHONY: test-e2e -test-e2e: build collector/fixtures/sys/.unpacked +test-e2e: build collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked tools @echo ">> running end-to-end tests" - ./end-to-end-test.sh + ./end-to-end-test.sh -e "$(E2E_EXTRA_FLAGS)" @echo ">> running end-to-end tests with unix socket" - ./end-to-end-test.sh -s + ./end-to-end-test.sh -e "$(E2E_EXTRA_FLAGS)" -s .PHONY: skip-test-e2e skip-test-e2e: @@ -116,6 +134,7 @@ skip-test-e2e: checkmetrics: $(PROMTOOL) @echo ">> checking metrics for correctness" ./checkmetrics.sh $(PROMTOOL) $(e2e-out) + ./checkmetrics.sh $(PROMTOOL) $(e2e-out-64k-page) .PHONY: checkrules checkrules: $(PROMTOOL) @@ -132,4 +151,4 @@ promtool: $(PROMTOOL) $(PROMTOOL): mkdir -p $(FIRST_GOPATH)/bin - curl -fsS -L $(PROMTOOL_URL) | tar -xvzf - -C $(FIRST_GOPATH)/bin --no-anchored --strip 1 promtool + curl -fsS -L $(PROMTOOL_URL) | tar -xvzf - -C $(FIRST_GOPATH)/bin --strip 1 "prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM)/promtool" diff --git a/Makefile.common b/Makefile.common index 9320176ca2..143bf03fbc 100644 --- a/Makefile.common +++ b/Makefile.common @@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... @@ -72,23 +49,33 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) +ifneq ($(shell command -v gotestsum 2> /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.5.0 +PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.18.0 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +GOLANGCI_LINT_VERSION ?= v2.6.0 +GOLANGCI_FMT_OPTS ?= +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif endif endif @@ -105,6 +92,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) +SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) + ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 @@ -118,7 +107,7 @@ endif %: common-% ; .PHONY: common-all -common-all: precheck style check_license lint unused build test +common-all: precheck style check_license lint yamllint unused build test .PHONY: common-style common-style: @@ -144,11 +133,7 @@ common-check_license: .PHONY: common-deps common-deps: @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif + $(GO) mod download .PHONY: update-go-deps update-go-deps: @@ -156,46 +141,56 @@ update-go-deps: @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ $(GO) get $$m; \ done - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifneq (,$(wildcard vendor)) - GO111MODULE=$(GO111MODULE) $(GO) mod vendor -endif + $(GO) mod tidy .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format -common-format: +common-format: $(GOLANGCI_LINT) @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + $(GO) fmt $(pkgs) +ifdef GOLANGCI_LINT + @echo ">> formatting code with golangci-lint" + $(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS) +endif .PHONY: common-vet common-vet: @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -ifdef GO111MODULE -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) + $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif + +.PHONY: common-lint-fix +common-lint-fix: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint fix" + $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) +endif + +.PHONY: common-yamllint +common-yamllint: + @echo ">> running yamllint on all YAML files in the repository" +ifeq (, $(shell command -v yamllint 2> /dev/null)) + @echo "yamllint not installed so skipping" +else + yamllint . endif # For backward-compatibility. @@ -203,38 +198,29 @@ endif common-staticcheck: lint .PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE +common-unused: @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) + $(GO) mod tidy @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif .PHONY: common-build common-build: promu @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) +.PHONY: common-docker-repo-name +common-docker-repo-name: + @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ @@ -243,17 +229,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%: .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" +DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) @@ -265,8 +253,8 @@ $(PROMU): cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) -.PHONY: proto -proto: +.PHONY: common-proto +common-proto: @echo ">> generating code from proto files" @./scripts/genproto.sh @@ -278,11 +266,9 @@ $(GOLANGCI_LINT): | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif +.PHONY: common-print-golangci-lint-version +common-print-golangci-lint-version: + @echo $(GOLANGCI_LINT_VERSION) .PHONY: precheck precheck:: @@ -298,3 +284,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/README.md b/README.md index effca34c03..ec5adbf7ac 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,8 @@ # Node exporter [![CircleCI](https://circleci.com/gh/prometheus/node_exporter/tree/master.svg?style=shield)][circleci] -[![Buildkite status](https://badge.buildkite.com/94a0c1fb00b1f46883219c256efe9ce01d63b6505f3a942f9b.svg)](https://buildkite.com/prometheus/node-exporter) +![bsd workflow](https://github.com/prometheus/node_exporter/actions/workflows/bsd.yml/badge.svg) +![golangci-lint workflow](https://github.com/prometheus/node_exporter/actions/workflows/golangci-lint.yml/badge.svg) [![Docker Repository on Quay](https://quay.io/repository/prometheus/node-exporter/status)][quay] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/node-exporter.svg?maxAge=604800)][hub] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/node_exporter)][goreportcard] @@ -11,9 +12,65 @@ in Go with pluggable metric collectors. The [Windows exporter](https://github.com/prometheus-community/windows_exporter) is recommended for Windows users. To expose NVIDIA GPU metrics, [prometheus-dcgm -](https://github.com/NVIDIA/gpu-monitoring-tools/tree/master/exporters/prometheus-dcgm) +](https://github.com/NVIDIA/dcgm-exporter) can be used. +## Installation and Usage + +If you are new to Prometheus and `node_exporter` there is a [simple step-by-step guide](https://prometheus.io/docs/guides/node-exporter/). + +The `node_exporter` listens on HTTP port 9100 by default. See the `--help` output for more options. + +### Ansible + +For automated installs with [Ansible](https://www.ansible.com/), there is the [Prometheus Community role](https://github.com/prometheus-community/ansible). + +### Docker + +The `node_exporter` is designed to monitor the host system. Deploying in containers requires +extra care in order to avoid monitoring the container itself. + +For situations where containerized deployment is needed, some extra flags must be used to allow +the `node_exporter` access to the host namespaces. + +Be aware that any non-root mount points you want to monitor will need to be bind-mounted +into the container. + +If you start container for host monitoring, specify `path.rootfs` argument. +This argument must match path in bind-mount of host root. The node\_exporter will use +`path.rootfs` as prefix to access host filesystem. + +```bash +docker run -d \ + --net="host" \ + --pid="host" \ + -v "/:/host:ro,rslave" \ + quay.io/prometheus/node-exporter:latest \ + --path.rootfs=/host +``` + +For Docker compose, similar flag changes are needed. + +```yaml +--- +version: '3.8' + +services: + node_exporter: + image: quay.io/prometheus/node-exporter:latest + container_name: node_exporter + command: + - '--path.rootfs=/host' + network_mode: host + pid: host + restart: unless-stopped + volumes: + - '/:/host:ro,rslave' +``` + +On some systems, the `timex` collector requires an additional Docker flag, +`--cap-add=SYS_TIME`, in order to access the required syscalls. + ## Collectors There is varying support for collectors on each operating system. The tables @@ -21,6 +78,38 @@ below list all existing collectors and the supported systems. Collectors are enabled by providing a `--collector.` flag. Collectors that are enabled by default can be disabled by providing a `--no-collector.` flag. +To enable only some specific collector(s), use `--collector.disable-defaults --collector. ...`. + +### Include & Exclude flags + +A few collectors can be configured to include or exclude certain patterns using dedicated flags. The exclude flags are used to indicate "all except", while the include flags are used to say "none except". Note that these flags are mutually exclusive on collectors that support both. + +Example: + +```txt +--collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) +``` + +List: + +Collector | Scope | Include Flag | Exclude Flag +--- | --- | --- | --- +arp | device | --collector.arp.device-include | --collector.arp.device-exclude +cpu | bugs | --collector.cpu.info.bugs-include | N/A +cpu | flags | --collector.cpu.info.flags-include | N/A +diskstats | device | --collector.diskstats.device-include | --collector.diskstats.device-exclude +ethtool | device | --collector.ethtool.device-include | --collector.ethtool.device-exclude +ethtool | metrics | --collector.ethtool.metrics-include | N/A +filesystem | fs-types | --collector.filesystem.fs-types-include | --collector.filesystem.fs-types-exclude +filesystem | mount-points | --collector.filesystem.mount-points-include | --collector.filesystem.mount-points-exclude +hwmon | chip | --collector.hwmon.chip-include | --collector.hwmon.chip-exclude +hwmon | sensor | --collector.hwmon.sensor-include | --collector.hwmon.sensor-exclude +interrupts | name | --collector.interrupts.name-include | --collector.interrupts.name-exclude +netdev | device | --collector.netdev.device-include | --collector.netdev.device-exclude +qdisk | device | --collector.qdisk.device-include | --collector.qdisk.device-exclude +slabinfo | slab-names | --collector.slabinfo.slabs-include | --collector.slabinfo.slabs-exclude +sysctl | all | --collector.sysctl.include | N/A +systemd | unit | --collector.systemd.unit-include | --collector.systemd.unit-exclude ### Enabled by default @@ -29,14 +118,17 @@ Name | Description | OS arp | Exposes ARP statistics from `/proc/net/arp`. | Linux bcache | Exposes bcache statistics from `/sys/fs/bcache/`. | Linux bonding | Exposes the number of configured and active slaves of Linux bonding interfaces. | Linux +btrfs | Exposes btrfs statistics | Linux boottime | Exposes system boot time derived from the `kern.boottime` sysctl. | Darwin, Dragonfly, FreeBSD, NetBSD, OpenBSD, Solaris conntrack | Shows conntrack statistics (does nothing if no `/proc/sys/net/netfilter/` present). | Linux -cpu | Exposes CPU statistics | Darwin, Dragonfly, FreeBSD, Linux, Solaris +cpu | Exposes CPU statistics | Darwin, Dragonfly, FreeBSD, Linux, Solaris, OpenBSD cpufreq | Exposes CPU frequency statistics | Linux, Solaris diskstats | Exposes disk I/O statistics. | Darwin, Linux, OpenBSD +dmi | Expose Desktop Management Interface (DMI) info from `/sys/class/dmi/id/` | Linux edac | Exposes error detection and correction statistics. | Linux entropy | Exposes available entropy. | Linux exec | Exposes execution statistics. | Dragonfly, FreeBSD +fibrechannel | Exposes fibre channel information and statistics from `/sys/class/fc_host/`. | Linux filefd | Exposes file descriptor statistics from `/proc/sys/fs/file-nr`. | Linux filesystem | Exposes filesystem statistics, such as disk space used. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD hwmon | Expose hardware monitoring and sensor data from `/sys/class/hwmon/`. | Linux @@ -47,29 +139,97 @@ mdadm | Exposes statistics about devices in `/proc/mdstat` (does nothing if no ` meminfo | Exposes memory statistics. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD netclass | Exposes network interface info from `/sys/class/net/` | Linux netdev | Exposes network interface statistics such as bytes transferred. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD +netisr | Exposes netisr statistics | FreeBSD netstat | Exposes network statistics from `/proc/net/netstat`. This is the same information as `netstat -s`. | Linux nfs | Exposes NFS client statistics from `/proc/net/rpc/nfs`. This is the same information as `nfsstat -c`. | Linux nfsd | Exposes NFS kernel server statistics from `/proc/net/rpc/nfsd`. This is the same information as `nfsstat -s`. | Linux -pressure | Exposes pressure stall statistics from `/proc/pressure/`. | Linux (kernel 4.20+ and/or [CONFIG\_PSI](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt)) +nvme | Exposes NVMe info from `/sys/class/nvme/` | Linux +os | Expose OS release info from `/etc/os-release` or `/usr/lib/os-release` | _any_ +powersupplyclass | Exposes Power Supply statistics from `/sys/class/power_supply` | Linux +pressure | Exposes pressure stall statistics from `/proc/pressure/`. | Linux (kernel 4.20+ and/or [CONFIG\_PSI](https://www.kernel.org/doc/html/latest/accounting/psi.html)) rapl | Exposes various statistics from `/sys/class/powercap`. | Linux schedstat | Exposes task scheduler statistics from `/proc/schedstat`. | Linux +selinux | Exposes SELinux statistics. | Linux sockstat | Exposes various statistics from `/proc/net/sockstat`. | Linux softnet | Exposes statistics from `/proc/net/softnet_stat`. | Linux stat | Exposes various statistics from `/proc/stat`. This includes boot time, forks and interrupts. | Linux +tapestats | Exposes statistics from `/sys/class/scsi_tape`. | Linux textfile | Exposes statistics read from local disk. The `--collector.textfile.directory` flag must be set. | _any_ +thermal | Exposes thermal statistics like `pmset -g therm`. | Darwin thermal\_zone | Exposes thermal zone & cooling device statistics from `/sys/class/thermal`. | Linux time | Exposes the current system time. | _any_ timex | Exposes selected adjtimex(2) system call stats. | Linux udp_queues | Exposes UDP total lengths of the rx_queue and tx_queue from `/proc/net/udp` and `/proc/net/udp6`. | Linux uname | Exposes system information as provided by the uname system call. | Darwin, FreeBSD, Linux, OpenBSD vmstat | Exposes statistics from `/proc/vmstat`. | Linux +watchdog | Exposes statistics from `/sys/class/watchdog` | Linux xfs | Exposes XFS runtime statistics. | Linux (kernel 4.4+) -zfs | Exposes [ZFS](http://open-zfs.org/) performance statistics. | [Linux](http://zfsonlinux.org/), Solaris +zfs | Exposes [ZFS](http://open-zfs.org/) performance statistics. | FreeBSD, [Linux](http://zfsonlinux.org/), Solaris ### Disabled by default -The perf collector may not work by default on all Linux systems due to kernel -configuration and security settings. To allow access, set the following sysctl +`node_exporter` also implements a number of collectors that are disabled by default. Reasons for this vary by +collector, and may include: +* High cardinality +* Prolonged runtime that exceeds the Prometheus `scrape_interval` or `scrape_timeout` +* Significant resource demands on the host + +You can enable additional collectors as desired by adding them to your +init system's or service supervisor's startup configuration for +`node_exporter` but caution is advised. Enable at most one at a time, +testing first on a non-production system, then by hand on a single +production node. When enabling additional collectors, you should +carefully monitor the change by observing the ` +scrape_duration_seconds` metric to ensure that collection completes +and does not time out. In addition, monitor the +`scrape_samples_post_metric_relabeling` metric to see the changes in +cardinality. + +Name | Description | OS +---------|-------------|---- +buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux +cgroups | A summary of the number of active and enabled cgroups | Linux +cpu\_vulnerabilities | Exposes CPU vulnerability information from sysfs. | Linux +devstat | Exposes device statistics | Dragonfly, FreeBSD +drm | Expose GPU metrics using sysfs / DRM, `amdgpu` is the only driver which exposes this information through DRM | Linux +drbd | Exposes Distributed Replicated Block Device statistics (to version 8.4) | Linux +ethtool | Exposes network interface information and network driver statistics equivalent to `ethtool`, `ethtool -S`, and `ethtool -i`. | Linux +interrupts | Exposes detailed interrupts statistics. | Linux, OpenBSD +kernel_hung | Exposes number of tasks that have been detected as hung from `/proc/sys/kernel/hung_task_detect_count`. | Linux +ksmd | Exposes kernel and system statistics from `/sys/kernel/mm/ksm`. | Linux +lnstat | Exposes stats from `/proc/net/stat/`. | Linux +logind | Exposes session counts from [logind](http://www.freedesktop.org/wiki/Software/systemd/logind/). | Linux +meminfo\_numa | Exposes memory statistics from `/sys/devices/system/node/node[0-9]*/meminfo`, `/sys/devices/system/node/node[0-9]*/numastat`. | Linux +mountstats | Exposes filesystem statistics from `/proc/self/mountstats`. Exposes detailed NFS client statistics. | Linux +network_route | Exposes the routing table as metrics | Linux +pcidevice | Exposes pci devices' information including their link status and parent devices. | Linux +perf | Exposes perf based metrics (Warning: Metrics are dependent on kernel configuration and settings). | Linux +processes | Exposes aggregate process statistics from `/proc`. | Linux +qdisc | Exposes [queuing discipline](https://en.wikipedia.org/wiki/Network_scheduler#Linux_kernel) statistics | Linux +slabinfo | Exposes slab statistics from `/proc/slabinfo`. Note that permission of `/proc/slabinfo` is usually 0400, so set it appropriately. | Linux +softirqs | Exposes detailed softirq statistics from `/proc/softirqs`. | Linux +sysctl | Expose sysctl values from `/proc/sys`. Use `--collector.sysctl.include(-info)` to configure. | Linux +swap | Expose swap information from `/proc/swaps`. | Linux +systemd | Exposes service and system status from [systemd](http://www.freedesktop.org/wiki/Software/systemd/). | Linux +tcpstat | Exposes TCP connection status information from `/proc/net/tcp` and `/proc/net/tcp6`. (Warning: the current version has potential performance issues in high load situations.) | Linux +wifi | Exposes WiFi device and station statistics. | Linux +xfrm | Exposes statistics from `/proc/net/xfrm_stat` | Linux +zoneinfo | Exposes NUMA memory zone metrics. | Linux + +### Deprecated + +These collectors are deprecated and will be removed in the next major release. + +Name | Description | OS +---------|-------------|---- +ntp | Exposes local NTP daemon health to check [time](./docs/TIME.md) | _any_ +runit | Exposes service status from [runit](http://smarden.org/runit/). | _any_ +supervisord | Exposes service status from [supervisord](http://supervisord.org/). | _any_ + +### Perf Collector + +The `perf` collector may not work out of the box on some Linux systems due to kernel +configuration and security settings. To allow access, set the following `sysctl` parameter: ``` @@ -85,7 +245,7 @@ Depending on the configured value different metrics will be available, for most cases `0` will provide the most complete set. For more information see [`man 2 perf_event_open`](http://man7.org/linux/man-pages/man2/perf_event_open.2.html). -By default, the perf collector will only collect metrics of the CPUs that +By default, the `perf` collector will only collect metrics of the CPUs that `node_exporter` is running on (ie [`runtime.NumCPU`](https://golang.org/pkg/runtime/#NumCPU). If this is insufficient (e.g. if you run `node_exporter` with its CPU affinity set to @@ -96,43 +256,73 @@ configuration is zero indexed and can also take a stride value; e.g. `--collector.perf --collector.perf.cpus=1-10:5` would collect on CPUs 1, 5, and 10. -The perf collector is also able to collect +The `perf` collector is also able to collect [tracepoint](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html) counts when using the `--collector.perf.tracepoint` flag. Tracepoints can be found using [`perf list`](http://man7.org/linux/man-pages/man1/perf.1.html) or from debugfs. And example usage of this would be `--collector.perf.tracepoint="sched:sched_process_exec"`. +### Sysctl Collector -Name | Description | OS ----------|-------------|---- -buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux -devstat | Exposes device statistics | Dragonfly, FreeBSD -drbd | Exposes Distributed Replicated Block Device statistics (to version 8.4) | Linux -interrupts | Exposes detailed interrupts statistics. | Linux, OpenBSD -ksmd | Exposes kernel and system statistics from `/sys/kernel/mm/ksm`. | Linux -logind | Exposes session counts from [logind](http://www.freedesktop.org/wiki/Software/systemd/logind/). | Linux -meminfo\_numa | Exposes memory statistics from `/proc/meminfo_numa`. | Linux -mountstats | Exposes filesystem statistics from `/proc/self/mountstats`. Exposes detailed NFS client statistics. | Linux -ntp | Exposes local NTP daemon health to check [time](./docs/TIME.md) | _any_ -processes | Exposes aggregate process statistics from `/proc`. | Linux -qdisc | Exposes [queuing discipline](https://en.wikipedia.org/wiki/Network_scheduler#Linux_kernel) statistics | Linux -runit | Exposes service status from [runit](http://smarden.org/runit/). | _any_ -supervisord | Exposes service status from [supervisord](http://supervisord.org/). | _any_ -systemd | Exposes service and system status from [systemd](http://www.freedesktop.org/wiki/Software/systemd/). | Linux -tcpstat | Exposes TCP connection status information from `/proc/net/tcp` and `/proc/net/tcp6`. (Warning: the current version has potential performance issues in high load situations.) | Linux -wifi | Exposes WiFi device and station statistics. | Linux -perf | Exposes perf based metrics (Warning: Metrics are dependent on kernel configuration and settings). | Linux +The `sysctl` collector can be enabled with `--collector.sysctl`. It supports exposing numeric sysctl values +as metrics using the `--collector.sysctl.include` flag and string values as info metrics by using the +`--collector.sysctl.include-info` flag. The flags can be repeated. For sysctl with multiple numeric values, +an optional mapping can be given to expose each value as its own metric. Otherwise an `index` label is used +to identify the different fields. + +#### Examples +##### Numeric values +###### Single values +Using `--collector.sysctl.include=vm.user_reserve_kbytes`: +`vm.user_reserve_kbytes = 131072` -> `node_sysctl_vm_user_reserve_kbytes 131072` + +###### Multiple values +A sysctl can contain multiple values, for example: +``` +net.ipv4.tcp_rmem = 4096 131072 6291456 +``` +Using `--collector.sysctl.include=net.ipv4.tcp_rmem` the collector will expose: +``` +node_sysctl_net_ipv4_tcp_rmem{index="0"} 4096 +node_sysctl_net_ipv4_tcp_rmem{index="1"} 131072 +node_sysctl_net_ipv4_tcp_rmem{index="2"} 6291456 +``` +If the indexes have defined meaning like in this case, the values can be mapped to multiple metrics by appending the mapping to the --collector.sysctl.include flag: +Using `--collector.sysctl.include=net.ipv4.tcp_rmem:min,default,max` the collector will expose: +``` +node_sysctl_net_ipv4_tcp_rmem_min 4096 +node_sysctl_net_ipv4_tcp_rmem_default 131072 +node_sysctl_net_ipv4_tcp_rmem_max 6291456 +``` + +##### String values +String values need to be exposed as info metric. The user selects them by using the `--collector.sysctl.include-info` flag. + +###### Single values +`kernel.core_pattern = core` -> `node_sysctl_info{key="kernel.core_pattern_info", value="core"} 1` + +###### Multiple values +Given the following sysctl: +``` +kernel.seccomp.actions_avail = kill_process kill_thread trap errno trace log allow +``` +Setting `--collector.sysctl.include-info=kernel.seccomp.actions_avail` will yield: +``` +node_sysctl_info{key="kernel.seccomp.actions_avail", index="0", value="kill_process"} 1 +node_sysctl_info{key="kernel.seccomp.actions_avail", index="1", value="kill_thread"} 1 +... +``` ### Textfile Collector -The textfile collector is similar to the [Pushgateway](https://github.com/prometheus/pushgateway), +The `textfile` collector is similar to the [Pushgateway](https://github.com/prometheus/pushgateway), in that it allows exporting of statistics from batch jobs. It can also be used to export static metrics, such as what role a machine has. The Pushgateway -should be used for service-level metrics. The textfile module is for metrics +should be used for service-level metrics. The `textfile` module is for metrics that are tied to a machine. -To use it, set the `--collector.textfile.directory` flag on the Node exporter. The +To use it, set the `--collector.textfile.directory` flag on the `node_exporter` commandline. The collector will parse all files in that directory matching the glob `*.prom` using the [text format](http://prometheus.io/docs/instrumenting/exposition_formats/). **Note:** Timestamps are not supported. @@ -153,18 +343,26 @@ mv /path/to/directory/role.prom.$$ /path/to/directory/role.prom The `node_exporter` will expose all metrics from enabled collectors by default. This is the recommended way to collect metrics to avoid errors when comparing metrics of different families. -For advanced use the `node_exporter` can be passed an optional list of collectors to filter metrics. The `collect[]` parameter may be used multiple times. In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#). +For advanced use the `node_exporter` can be passed an optional list of collectors to filter metrics. The parameters `collect[]` and `exclude[]` can be used multiple times (but cannot be combined). In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#). +Collect only `cpu` and `meminfo` collector metrics: ``` params: collect[]: - - foo - - bar + - cpu + - meminfo +``` + +Collect all enabled collector metrics but exclude `netdev`: +``` + params: + exclude[]: + - netdev ``` This can be useful for having different Prometheus servers collect specific metrics from nodes. -## Building and running +## Development building and running Prerequisites: @@ -173,9 +371,9 @@ Prerequisites: Building: - go get github.com/prometheus/node_exporter - cd ${GOPATH-$HOME/go}/src/github.com/prometheus/node_exporter - make + git clone https://github.com/prometheus/node_exporter.git + cd node_exporter + make build ./node_exporter To see all available configuration flags: @@ -188,40 +386,15 @@ To see all available configuration flags: ## TLS endpoint -** EXPERIMENTAL ** +**EXPERIMENTAL** The exporter supports TLS via a new web configuration file. ```console -./node_exporter --web.config=web-config.yml +./node_exporter --web.config.file=web-config.yml ``` -See the [https package](https/README.md) for more details. - -## Using Docker -The `node_exporter` is designed to monitor the host system. It's not recommended -to deploy it as a Docker container because it requires access to the host system. -Be aware that any non-root mount points you want to monitor will need to be bind-mounted -into the container. -If you start container for host monitoring, specify `path.rootfs` argument. -This argument must match path in bind-mount of host root. The node\_exporter will use -`path.rootfs` as prefix to access host filesystem. - -```bash -docker run -d \ - --net="host" \ - --pid="host" \ - -v "/:/host:ro,rslave" \ - quay.io/prometheus/node-exporter \ - --path.rootfs=/host -``` - -On some systems, the `timex` collector requires an additional Docker flag, -`--cap-add=SYS_TIME`, in order to access the required syscalls. - -## Using a third-party repository for RHEL/CentOS/Fedora - -There is a [community-supplied COPR repository](https://copr.fedorainfracloud.org/coprs/ibotty/prometheus-exporters/) which closely follows upstream releases. +See the [exporter-toolkit web-configuration](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md) for more details. [travis]: https://travis-ci.org/prometheus/node_exporter [hub]: https://hub.docker.com/r/prom/node-exporter/ diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..fed02d85c7 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,6 @@ +# Reporting a security issue + +The Prometheus security policy, including how to report vulnerabilities, can be +found here: + + diff --git a/VERSION b/VERSION index 7dea76edb3..5ad2491cf8 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.0.1 +1.10.2 diff --git a/collector/arp_linux.go b/collector/arp_linux.go index 86cb78a117..c3d4715163 100644 --- a/collector/arp_linux.go +++ b/collector/arp_linux.go @@ -11,24 +11,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noarp +//go:build !noarp package collector import ( - "bufio" "fmt" - "io" - "os" - "strings" + "log/slog" - "github.com/go-kit/kit/log" + "github.com/alecthomas/kingpin/v2" + "github.com/jsimonetti/rtnetlink/v2/rtnl" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" + "golang.org/x/sys/unix" +) + +var ( + arpDeviceInclude = kingpin.Flag("collector.arp.device-include", "Regexp of arp devices to include (mutually exclusive to device-exclude).").String() + arpDeviceExclude = kingpin.Flag("collector.arp.device-exclude", "Regexp of arp devices to exclude (mutually exclusive to device-include).").String() + arpNetlink = kingpin.Flag("collector.arp.netlink", "Use netlink to gather stats instead of /proc/net/arp.").Default("true").Bool() ) type arpCollector struct { - entries *prometheus.Desc - logger log.Logger + fs procfs.FS + deviceFilter deviceFilter + entries *prometheus.Desc + logger *slog.Logger } func init() { @@ -36,8 +44,15 @@ func init() { } // NewARPCollector returns a new Collector exposing ARP stats. -func NewARPCollector(logger log.Logger) (Collector, error) { +func NewARPCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + return &arpCollector{ + fs: fs, + deviceFilter: newDeviceFilter(*arpDeviceExclude, *arpDeviceInclude), entries: prometheus.NewDesc( prometheus.BuildFQName(namespace, "arp", "entries"), "ARP entries by device", @@ -47,56 +62,66 @@ func NewARPCollector(logger log.Logger) (Collector, error) { }, nil } -func getARPEntries() (map[string]uint32, error) { - file, err := os.Open(procFilePath("net/arp")) +func getTotalArpEntries(deviceEntries []procfs.ARPEntry) map[string]uint32 { + entries := make(map[string]uint32) + + for _, device := range deviceEntries { + entries[device.Device]++ + } + + return entries +} + +func getTotalArpEntriesRTNL() (map[string]uint32, error) { + conn, err := rtnl.Dial(nil) if err != nil { return nil, err } - defer file.Close() + defer conn.Close() - entries, err := parseARPEntries(file) + // Neighbors will also contain IPv6 neighbors, but since this is purely an ARP collector, + // restrict to AF_INET. + neighbors, err := conn.Neighbours(nil, unix.AF_INET) if err != nil { return nil, err } - return entries, nil -} - -// TODO: This should get extracted to the github.com/prometheus/procfs package -// to support more complete parsing of /proc/net/arp. Instead of adding -// more fields to this function's return values it should get moved and -// changed to support each field. -func parseARPEntries(data io.Reader) (map[string]uint32, error) { - scanner := bufio.NewScanner(data) + // Map of interface name to ARP neighbor count. entries := make(map[string]uint32) - for scanner.Scan() { - columns := strings.Fields(scanner.Text()) - - if len(columns) < 6 { - return nil, fmt.Errorf("unexpected ARP table format") - } - - if columns[0] != "IP" { - deviceIndex := len(columns) - 1 - entries[columns[deviceIndex]]++ + for _, n := range neighbors { + // Skip entries which have state NUD_NOARP to conform to output of /proc/net/arp. + if n.State&unix.NUD_NOARP == 0 { + entries[n.Interface.Name]++ } } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("failed to parse ARP info: %w", err) - } - return entries, nil } func (c *arpCollector) Update(ch chan<- prometheus.Metric) error { - entries, err := getARPEntries() - if err != nil { - return fmt.Errorf("could not get ARP entries: %w", err) + var enumeratedEntry map[string]uint32 + + if *arpNetlink { + var err error + + enumeratedEntry, err = getTotalArpEntriesRTNL() + if err != nil { + return fmt.Errorf("could not get ARP entries: %w", err) + } + } else { + entries, err := c.fs.GatherARPEntries() + if err != nil { + return fmt.Errorf("could not get ARP entries: %w", err) + } + + enumeratedEntry = getTotalArpEntries(entries) } - for device, entryCount := range entries { + for device, entryCount := range enumeratedEntry { + if c.deviceFilter.ignored(device) { + continue + } ch <- prometheus.MustNewConstMetric( c.entries, prometheus.GaugeValue, float64(entryCount), device) } diff --git a/collector/bcache_linux.go b/collector/bcache_linux.go index 20995c7608..90366cc32a 100644 --- a/collector/bcache_linux.go +++ b/collector/bcache_linux.go @@ -11,18 +11,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nobcache +//go:build !nobcache package collector import ( "fmt" + "log/slog" - "github.com/go-kit/kit/log" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/bcache" ) +var ( + priorityStats = kingpin.Flag("collector.bcache.priorityStats", "Expose expensive priority stats.").Bool() +) + func init() { registerCollector("bcache", defaultEnabled, NewBcacheCollector) } @@ -30,12 +35,12 @@ func init() { // A bcacheCollector is a Collector which gathers metrics from Linux bcache. type bcacheCollector struct { fs bcache.FS - logger log.Logger + logger *slog.Logger } // NewBcacheCollector returns a newly allocated bcacheCollector. // It exposes a number of Linux bcache statistics. -func NewBcacheCollector(logger log.Logger) (Collector, error) { +func NewBcacheCollector(logger *slog.Logger) (Collector, error) { fs, err := bcache.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) @@ -50,7 +55,13 @@ func NewBcacheCollector(logger log.Logger) (Collector, error) { // Update reads and exposes bcache stats. // It implements the Collector interface. func (c *bcacheCollector) Update(ch chan<- prometheus.Metric) error { - stats, err := c.fs.Stats() + var stats []*bcache.Stats + var err error + if *priorityStats { + stats, err = c.fs.Stats() + } else { + stats, err = c.fs.StatsWithoutPriority() + } if err != nil { return fmt.Errorf("failed to retrieve bcache stats: %w", err) } @@ -122,14 +133,19 @@ func bcachePeriodStatsToMetric(ps *bcache.PeriodStats, labelValue string) []bcac extraLabel: label, extraLabelValue: labelValue, }, - { - name: "cache_readaheads_total", - desc: "Count of times readahead occurred.", - value: float64(ps.CacheReadaheads), - metricType: prometheus.CounterValue, - extraLabel: label, - extraLabelValue: labelValue, - }, + } + if ps.CacheReadaheads != 0 { + bcacheReadaheadMetrics := []bcacheMetric{ + { + name: "cache_readaheads_total", + desc: "Count of times readahead occurred.", + value: float64(ps.CacheReadaheads), + metricType: prometheus.CounterValue, + extraLabel: label, + extraLabelValue: labelValue, + }, + } + metrics = append(metrics, bcacheReadaheadMetrics...) } return metrics } @@ -223,6 +239,46 @@ func (c *bcacheCollector) updateBcacheStats(ch chan<- prometheus.Metric, s *bcac extraLabel: []string{"backing_device"}, extraLabelValue: bdev.Name, }, + { + name: "dirty_target_bytes", + desc: "Current dirty data target threshold for this backing device in bytes.", + value: float64(bdev.WritebackRateDebug.Target), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, + { + name: "writeback_rate", + desc: "Current writeback rate for this backing device in bytes.", + value: float64(bdev.WritebackRateDebug.Rate), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, + { + name: "writeback_rate_proportional_term", + desc: "Current result of proportional controller, part of writeback rate", + value: float64(bdev.WritebackRateDebug.Proportional), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, + { + name: "writeback_rate_integral_term", + desc: "Current result of integral controller, part of writeback rate", + value: float64(bdev.WritebackRateDebug.Integral), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, + { + name: "writeback_change", + desc: "Last writeback rate change step for this backing device.", + value: float64(bdev.WritebackRateDebug.Change), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, } allMetrics = append(allMetrics, metrics...) @@ -259,23 +315,28 @@ func (c *bcacheCollector) updateBcacheStats(ch chan<- prometheus.Metric, s *bcac extraLabel: []string{"cache_device"}, extraLabelValue: cache.Name, }, + } + if *priorityStats { // metrics in /sys/fs/bcache///priority_stats - { - name: "priority_stats_unused_percent", - desc: "The percentage of the cache that doesn't contain any data.", - value: float64(cache.Priority.UnusedPercent), - metricType: prometheus.GaugeValue, - extraLabel: []string{"cache_device"}, - extraLabelValue: cache.Name, - }, - { - name: "priority_stats_metadata_percent", - desc: "Bcache's metadata overhead.", - value: float64(cache.Priority.MetadataPercent), - metricType: prometheus.GaugeValue, - extraLabel: []string{"cache_device"}, - extraLabelValue: cache.Name, - }, + priorityStatsMetrics := []bcacheMetric{ + { + name: "priority_stats_unused_percent", + desc: "The percentage of the cache that doesn't contain any data.", + value: float64(cache.Priority.UnusedPercent), + metricType: prometheus.GaugeValue, + extraLabel: []string{"cache_device"}, + extraLabelValue: cache.Name, + }, + { + name: "priority_stats_metadata_percent", + desc: "Bcache's metadata overhead.", + value: float64(cache.Priority.MetadataPercent), + metricType: prometheus.GaugeValue, + extraLabel: []string{"cache_device"}, + extraLabelValue: cache.Name, + }, + } + metrics = append(metrics, priorityStatsMetrics...) } allMetrics = append(allMetrics, metrics...) } diff --git a/collector/bonding_linux.go b/collector/bonding_linux.go index 863f62c2ea..a1e0a98735 100644 --- a/collector/bonding_linux.go +++ b/collector/bonding_linux.go @@ -11,26 +11,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nobonding +//go:build !nobonding package collector import ( "errors" "fmt" - "io/ioutil" + "log/slog" "os" "path/filepath" "strings" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" ) type bondingCollector struct { slaves, active typedDesc - logger log.Logger + logger *slog.Logger } func init() { @@ -39,7 +37,7 @@ func init() { // NewBondingCollector returns a newly allocated bondingCollector. // It exposes the number of configured and active slave of linux bonding interfaces. -func NewBondingCollector(logger log.Logger) (Collector, error) { +func NewBondingCollector(logger *slog.Logger) (Collector, error) { return &bondingCollector{ slaves: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "bonding", "slaves"), @@ -61,7 +59,7 @@ func (c *bondingCollector) Update(ch chan<- prometheus.Metric) error { bondingStats, err := readBondingStats(statusfile) if err != nil { if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "Not collecting bonding, file does not exist", "file", statusfile) + c.logger.Debug("Not collecting bonding, file does not exist", "file", statusfile) return ErrNoData } return err @@ -75,21 +73,21 @@ func (c *bondingCollector) Update(ch chan<- prometheus.Metric) error { func readBondingStats(root string) (status map[string][2]int, err error) { status = map[string][2]int{} - masters, err := ioutil.ReadFile(filepath.Join(root, "bonding_masters")) + masters, err := os.ReadFile(filepath.Join(root, "bonding_masters")) if err != nil { return nil, err } - for _, master := range strings.Fields(string(masters)) { - slaves, err := ioutil.ReadFile(filepath.Join(root, master, "bonding", "slaves")) + for master := range strings.FieldsSeq(string(masters)) { + slaves, err := os.ReadFile(filepath.Join(root, master, "bonding", "slaves")) if err != nil { return nil, err } sstat := [2]int{0, 0} - for _, slave := range strings.Fields(string(slaves)) { - state, err := ioutil.ReadFile(filepath.Join(root, master, fmt.Sprintf("lower_%s", slave), "bonding_slave", "mii_status")) + for slave := range strings.FieldsSeq(string(slaves)) { + state, err := os.ReadFile(filepath.Join(root, master, fmt.Sprintf("lower_%s", slave), "bonding_slave", "mii_status")) if errors.Is(err, os.ErrNotExist) { // some older? kernels use slave_ prefix - state, err = ioutil.ReadFile(filepath.Join(root, master, fmt.Sprintf("slave_%s", slave), "bonding_slave", "mii_status")) + state, err = os.ReadFile(filepath.Join(root, master, fmt.Sprintf("slave_%s", slave), "bonding_slave", "mii_status")) } if err != nil { return nil, err diff --git a/collector/bonding_linux_test.go b/collector/bonding_linux_test.go index 564cf01e25..874819225c 100644 --- a/collector/bonding_linux_test.go +++ b/collector/bonding_linux_test.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nobonding + package collector import ( diff --git a/collector/boot_time_bsd.go b/collector/boot_time_bsd.go index 74d4bcee36..fe1ddcac89 100644 --- a/collector/boot_time_bsd.go +++ b/collector/boot_time_bsd.go @@ -11,19 +11,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build freebsd dragonfly openbsd netbsd darwin -// +build !noboottime +//go:build (freebsd || dragonfly || openbsd || netbsd || darwin) && !noboottime package collector import ( - "github.com/go-kit/kit/log" + "log/slog" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" ) type bootTimeCollector struct { - boottime bsdSysctl - logger log.Logger + logger *slog.Logger } func init() { @@ -31,29 +31,27 @@ func init() { } // newBootTimeCollector returns a new Collector exposing system boot time on BSD systems. -func newBootTimeCollector(logger log.Logger) (Collector, error) { +func newBootTimeCollector(logger *slog.Logger) (Collector, error) { return &bootTimeCollector{ - boottime: bsdSysctl{ - name: "boot_time_seconds", - description: "Unix time of last boot, including microseconds.", - mib: "kern.boottime", - dataType: bsdSysctlTypeStructTimeval, - }, logger: logger, }, nil } // Update pushes boot time onto ch func (c *bootTimeCollector) Update(ch chan<- prometheus.Metric) error { - v, err := c.boottime.Value() + tv, err := unix.SysctlTimeval("kern.boottime") if err != nil { return err } + // This conversion maintains the usec precision. Using the time + // package did not. + v := float64(tv.Sec) + (float64(tv.Usec) / float64(1000*1000)) + ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", c.boottime.name), - c.boottime.description, + prometheus.BuildFQName(namespace, "", "boot_time_seconds"), + "Unix time of last boot, including microseconds.", nil, nil, ), prometheus.GaugeValue, v) diff --git a/collector/boot_time_solaris.go b/collector/boot_time_solaris.go index eb270347ff..4b2ef4577c 100644 --- a/collector/boot_time_solaris.go +++ b/collector/boot_time_solaris.go @@ -11,27 +11,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build solaris -// +build !noboottime +//go:build !noboottime package collector import ( - "github.com/go-kit/kit/log" + "log/slog" + + "github.com/illumos/go-kstat" "github.com/prometheus/client_golang/prometheus" - "github.com/siebenmann/go-kstat" ) type bootTimeCollector struct { boottime typedDesc - logger log.Logger + logger *slog.Logger } func init() { registerCollector("boottime", defaultEnabled, newBootTimeCollector) } -func newBootTimeCollector(logger log.Logger) (Collector, error) { +func newBootTimeCollector(logger *slog.Logger) (Collector, error) { return &bootTimeCollector{ boottime: typedDesc{ prometheus.NewDesc( diff --git a/collector/btrfs_linux.go b/collector/btrfs_linux.go index 4d4857b8f2..bc6fb6048d 100644 --- a/collector/btrfs_linux.go +++ b/collector/btrfs_linux.go @@ -11,14 +11,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nobtrfs +//go:build !nobtrfs package collector import ( "fmt" + "log/slog" + "path" + "strings" + "syscall" - "github.com/go-kit/kit/log" + dennwc "github.com/dennwc/btrfs" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/btrfs" ) @@ -26,7 +30,7 @@ import ( // A btrfsCollector is a Collector which gathers metrics from Btrfs filesystems. type btrfsCollector struct { fs btrfs.FS - logger log.Logger + logger *slog.Logger } func init() { @@ -34,7 +38,7 @@ func init() { } // NewBtrfsCollector returns a new Collector exposing Btrfs statistics. -func NewBtrfsCollector(logger log.Logger) (Collector, error) { +func NewBtrfsCollector(logger *slog.Logger) (Collector, error) { fs, err := btrfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) @@ -51,19 +55,166 @@ func NewBtrfsCollector(logger log.Logger) (Collector, error) { func (c *btrfsCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.Stats() if err != nil { - return fmt.Errorf("failed to retrieve Btrfs stats: %w", err) + return fmt.Errorf("failed to retrieve Btrfs stats from procfs: %w", err) + } + + ioctlStatsMap, err := c.getIoctlStats() + if err != nil { + c.logger.Debug( + "Error querying btrfs device stats with ioctl", + "err", err) + ioctlStatsMap = make(map[string]*btrfsIoctlFsStats) } for _, s := range stats { - c.updateBtrfsStats(ch, s) + // match up procfs and ioctl info by filesystem UUID (without dashes) + var fsUUID = strings.ReplaceAll(s.UUID, "-", "") + ioctlStats := ioctlStatsMap[fsUUID] + c.updateBtrfsStats(ch, s, ioctlStats) } return nil } +type btrfsIoctlFsDevStats struct { + path string + uuid string + + bytesUsed uint64 + totalBytes uint64 + + // The error stats below match the following upstream lists: + // https://github.com/dennwc/btrfs/blob/b3db0b2dedac3bf580f412034d77e0bf4b420167/btrfs.go#L132-L140 + // https://github.com/torvalds/linux/blob/70d605cbeecb408dd884b1f0cd3963eeeaac144c/include/uapi/linux/btrfs.h#L680-L692 + writeErrs uint64 + readErrs uint64 + flushErrs uint64 + corruptionErrs uint64 + generationErrs uint64 +} + +type btrfsIoctlFsStats struct { + uuid string + devices []btrfsIoctlFsDevStats +} + +func (c *btrfsCollector) getIoctlStats() (map[string]*btrfsIoctlFsStats, error) { + // Instead of introducing more ioctl calls to scan for all btrfs + // filesystems re-use our mount point utils to find known mounts + mountsList, err := mountPointDetails(c.logger) + if err != nil { + return nil, err + } + + // Track devices we have successfully scanned, by device path. + devicesDone := make(map[string]struct{}) + // Filesystems scann results by UUID. + fsStats := make(map[string]*btrfsIoctlFsStats) + + for _, mount := range mountsList { + if mount.fsType != "btrfs" { + continue + } + + if _, found := devicesDone[mount.device]; found { + // We already found this filesystem by another mount point. + continue + } + + mountPath := rootfsFilePath(mount.mountPoint) + + fs, err := dennwc.Open(mountPath, true) + if err != nil { + // Failed to open this mount point, maybe we didn't have permission + // maybe we'll find another mount point for this FS later. + c.logger.Debug( + "Error inspecting btrfs mountpoint", + "mountPoint", mountPath, + "err", err) + continue + } + defer fs.Close() + + fsInfo, err := fs.Info() + if err != nil { + // Failed to get the FS info for some reason, + // perhaps it'll work with a different mount point + c.logger.Debug( + "Error querying btrfs filesystem", + "mountPoint", mountPath, + "err", err) + continue + } + + fsID := fsInfo.FSID.String() + if _, found := fsStats[fsID]; found { + // We already found this filesystem by another mount point + continue + } + + deviceStats, err := c.getIoctlDeviceStats(fs, &fsInfo) + if err != nil { + c.logger.Debug( + "Error querying btrfs device stats", + "mountPoint", mountPath, + "err", err) + continue + } + + devicesDone[mount.device] = struct{}{} + fsStats[fsID] = &btrfsIoctlFsStats{ + uuid: fsID, + devices: deviceStats, + } + } + + return fsStats, nil +} + +func (c *btrfsCollector) getIoctlDeviceStats(fs *dennwc.FS, fsInfo *dennwc.Info) ([]btrfsIoctlFsDevStats, error) { + devices := make([]btrfsIoctlFsDevStats, 0, fsInfo.NumDevices) + + for i := uint64(0); i <= fsInfo.MaxID; i++ { + deviceInfo, err := fs.GetDevInfo(i) + + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ENODEV { + // Device IDs do not consistently start at 0, nor are ranges contiguous, so we expect this. + continue + } + return nil, err + } + + deviceStats, err := fs.GetDevStats(i) + if err != nil { + return nil, err + } + + devices = append(devices, btrfsIoctlFsDevStats{ + path: deviceInfo.Path, + uuid: deviceInfo.UUID.String(), + bytesUsed: deviceInfo.BytesUsed, + totalBytes: deviceInfo.TotalBytes, + + writeErrs: deviceStats.WriteErrs, + readErrs: deviceStats.ReadErrs, + flushErrs: deviceStats.FlushErrs, + corruptionErrs: deviceStats.CorruptionErrs, + generationErrs: deviceStats.GenerationErrs, + }) + + if uint64(len(devices)) == fsInfo.NumDevices { + break + } + } + + return devices, nil +} + // btrfsMetric represents a single Btrfs metric that is converted into a Prometheus Metric. type btrfsMetric struct { name string + metricType prometheus.ValueType desc string value float64 extraLabel []string @@ -71,14 +222,14 @@ type btrfsMetric struct { } // updateBtrfsStats collects statistics for one bcache ID. -func (c *btrfsCollector) updateBtrfsStats(ch chan<- prometheus.Metric, s *btrfs.Stats) { +func (c *btrfsCollector) updateBtrfsStats(ch chan<- prometheus.Metric, s *btrfs.Stats, ioctlStats *btrfsIoctlFsStats) { const subsystem = "btrfs" // Basic information about the filesystem. devLabels := []string{"uuid"} // Retrieve the metrics. - metrics := c.getMetrics(s) + metrics := c.getMetrics(s, ioctlStats) // Convert all gathered metrics to Prometheus Metrics and add to channel. for _, m := range metrics { @@ -98,7 +249,7 @@ func (c *btrfsCollector) updateBtrfsStats(ch chan<- prometheus.Metric, s *btrfs. ch <- prometheus.MustNewConstMetric( desc, - prometheus.GaugeValue, + m.metricType, m.value, labelValues..., ) @@ -106,31 +257,46 @@ func (c *btrfsCollector) updateBtrfsStats(ch chan<- prometheus.Metric, s *btrfs. } // getMetrics returns metrics for the given Btrfs statistics. -func (c *btrfsCollector) getMetrics(s *btrfs.Stats) []btrfsMetric { +func (c *btrfsCollector) getMetrics(s *btrfs.Stats, ioctlStats *btrfsIoctlFsStats) []btrfsMetric { metrics := []btrfsMetric{ { name: "info", desc: "Filesystem information", value: 1, + metricType: prometheus.GaugeValue, extraLabel: []string{"label"}, extraLabelValue: []string{s.Label}, }, { - name: "global_rsv_size_bytes", - desc: "Size of global reserve.", - value: float64(s.Allocation.GlobalRsvSize), + name: "global_rsv_size_bytes", + desc: "Size of global reserve.", + metricType: prometheus.GaugeValue, + value: float64(s.Allocation.GlobalRsvSize), + }, + { + name: "commits_total", + desc: "The total number of commits that have occurred.", + metricType: prometheus.CounterValue, + value: float64(s.CommitStats.Commits), + }, + { + name: "last_commit_seconds", + desc: "Duration of the most recent commit, in seconds.", + metricType: prometheus.GaugeValue, + value: float64(s.CommitStats.LastCommitMs) / 1000, + }, + { + name: "max_commit_seconds", + desc: "Duration of the slowest commit, in seconds.", + metricType: prometheus.GaugeValue, + value: float64(s.CommitStats.MaxCommitMs) / 1000, + }, + { + name: "commit_seconds_total", + desc: "Sum of the duration of all commits, in seconds.", + metricType: prometheus.CounterValue, + value: float64(s.CommitStats.TotalCommitMs) / 1000, }, - } - - // Information about devices. - for n, dev := range s.Devices { - metrics = append(metrics, btrfsMetric{ - name: "device_size_bytes", - desc: "Size of a device that is part of the filesystem.", - value: float64(dev.Size), - extraLabel: []string{"device"}, - extraLabelValue: []string{n}, - }) } // Information about data, metadata and system data. @@ -138,6 +304,81 @@ func (c *btrfsCollector) getMetrics(s *btrfs.Stats) []btrfsMetric { metrics = append(metrics, c.getAllocationStats("metadata", s.Allocation.Metadata)...) metrics = append(metrics, c.getAllocationStats("system", s.Allocation.System)...) + // Information about devices. + if ioctlStats == nil { + for n, dev := range s.Devices { + metrics = append(metrics, btrfsMetric{ + name: "device_size_bytes", + desc: "Size of a device that is part of the filesystem.", + metricType: prometheus.GaugeValue, + value: float64(dev.Size), + extraLabel: []string{"device"}, + extraLabelValue: []string{n}, + }) + } + return metrics + } + + for _, dev := range ioctlStats.devices { + // trim the path prefix from the device name so the value should match + // the value used in the fallback branch above. + // e.g. /dev/sda -> sda, /rootfs/dev/md1 -> md1 + _, device := path.Split(dev.path) + + extraLabels := []string{"device", "btrfs_dev_uuid"} + extraLabelValues := []string{device, dev.uuid} + + metrics = append(metrics, + btrfsMetric{ + name: "device_size_bytes", + desc: "Size of a device that is part of the filesystem.", + metricType: prometheus.GaugeValue, + value: float64(dev.totalBytes), + extraLabel: extraLabels, + extraLabelValue: extraLabelValues, + }, + // A bytes available metric is probably more useful than a + // bytes used metric, because large numbers of bytes will + // suffer from floating point representation issues + // and we probably care more about the number when it's low anyway + btrfsMetric{ + name: "device_unused_bytes", + desc: "Unused bytes unused on a device that is part of the filesystem.", + metricType: prometheus.GaugeValue, + value: float64(dev.totalBytes - dev.bytesUsed), + extraLabel: extraLabels, + extraLabelValue: extraLabelValues, + }) + + errorLabels := append([]string{"type"}, extraLabels...) + values := []uint64{ + dev.writeErrs, + dev.readErrs, + dev.flushErrs, + dev.corruptionErrs, + dev.generationErrs, + } + btrfsErrorTypeNames := []string{ + "write", + "read", + "flush", + "corruption", + "generation", + } + + for i, errorType := range btrfsErrorTypeNames { + metrics = append(metrics, + btrfsMetric{ + name: "device_errors_total", + desc: "Errors reported for the device", + metricType: prometheus.CounterValue, + value: float64(values[i]), + extraLabel: errorLabels, + extraLabelValue: append([]string{errorType}, extraLabelValues...), + }) + } + } + return metrics } @@ -147,6 +388,7 @@ func (c *btrfsCollector) getAllocationStats(a string, s *btrfs.AllocationStats) { name: "reserved_bytes", desc: "Amount of space reserved for a data type", + metricType: prometheus.GaugeValue, value: float64(s.ReservedBytes), extraLabel: []string{"block_group_type"}, extraLabelValue: []string{a}, @@ -167,6 +409,7 @@ func (c *btrfsCollector) getLayoutStats(a, l string, s *btrfs.LayoutUsage) []btr { name: "used_bytes", desc: "Amount of used space by a layout/data type", + metricType: prometheus.GaugeValue, value: float64(s.UsedBytes), extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{a, l}, @@ -174,6 +417,7 @@ func (c *btrfsCollector) getLayoutStats(a, l string, s *btrfs.LayoutUsage) []btr { name: "size_bytes", desc: "Amount of space allocated for a layout/data type", + metricType: prometheus.GaugeValue, value: float64(s.TotalBytes), extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{a, l}, @@ -181,6 +425,7 @@ func (c *btrfsCollector) getLayoutStats(a, l string, s *btrfs.LayoutUsage) []btr { name: "allocation_ratio", desc: "Data allocation ratio for a layout/data type", + metricType: prometheus.GaugeValue, value: s.Ratio, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{a, l}, diff --git a/collector/btrfs_linux_test.go b/collector/btrfs_linux_test.go index 9db74196ad..7be7bc39bb 100644 --- a/collector/btrfs_linux_test.go +++ b/collector/btrfs_linux_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nobtrfs +//go:build !nobtrfs package collector @@ -26,8 +26,10 @@ var expectedBtrfsMetrics = [][]btrfsMetric{ { {name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{"fixture"}}, {name: "global_rsv_size_bytes", value: 1.6777216e+07}, - {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop25"}}, - {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop26"}}, + {name: "commits_total", value: 258051, metricType: 1}, + {name: "last_commit_seconds", value: 1.0}, + {name: "max_commit_seconds", value: 51.462}, + {name: "commit_seconds_total", value: 47836.090, metricType: 1}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}}, {name: "used_bytes", value: 8.08189952e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, {name: "size_bytes", value: 2.147483648e+09, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, @@ -40,14 +42,16 @@ var expectedBtrfsMetrics = [][]btrfsMetric{ {name: "used_bytes", value: 16384, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid1"}}, {name: "size_bytes", value: 8.388608e+06, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid1"}}, {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid1"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop25"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop26"}}, }, { {name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{""}}, {name: "global_rsv_size_bytes", value: 1.6777216e+07}, - {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop22"}}, - {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop23"}}, - {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop24"}}, - {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop25"}}, + {name: "commits_total", value: 0, metricType: 1}, + {name: "last_commit_seconds", value: 0}, + {name: "max_commit_seconds", value: 0}, + {name: "commit_seconds_total", value: 0, metricType: 1}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}}, {name: "used_bytes", value: 0, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, {name: "size_bytes", value: 6.44087808e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, @@ -60,6 +64,10 @@ var expectedBtrfsMetrics = [][]btrfsMetric{ {name: "used_bytes", value: 16384, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid6"}}, {name: "size_bytes", value: 1.6777216e+07, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid6"}}, {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid6"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop22"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop23"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop24"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop25"}}, }, } @@ -91,7 +99,10 @@ func checkMetric(exp, got *btrfsMetric) bool { } func TestBtrfs(t *testing.T) { - fs, _ := btrfs.NewFS("fixtures/sys") + fs, err := btrfs.NewFS("fixtures/sys") + if err != nil { + t.Fatal(err) + } collector := &btrfsCollector{fs: fs} stats, err := collector.fs.Stats() @@ -103,7 +114,7 @@ func TestBtrfs(t *testing.T) { } for i, s := range stats { - metrics := collector.getMetrics(s) + metrics := collector.getMetrics(s, nil) if len(metrics) != len(expectedBtrfsMetrics[i]) { t.Fatalf("Unexpected number of Btrfs metrics: expected %v, got %v", len(expectedBtrfsMetrics[i]), len(metrics)) } diff --git a/collector/buddyinfo.go b/collector/buddyinfo.go index 579c3e4cfc..e0b54fb36b 100644 --- a/collector/buddyinfo.go +++ b/collector/buddyinfo.go @@ -11,17 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nobuddyinfo -// +build !netbsd +//go:build !nobuddyinfo && !netbsd package collector import ( "fmt" + "log/slog" "strconv" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) @@ -33,7 +31,7 @@ const ( type buddyinfoCollector struct { fs procfs.FS desc *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -41,7 +39,7 @@ func init() { } // NewBuddyinfoCollector returns a new Collector exposing buddyinfo stats. -func NewBuddyinfoCollector(logger log.Logger) (Collector, error) { +func NewBuddyinfoCollector(logger *slog.Logger) (Collector, error) { desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, buddyInfoSubsystem, "blocks"), "Count of free blocks according to size.", @@ -62,7 +60,7 @@ func (c *buddyinfoCollector) Update(ch chan<- prometheus.Metric) error { return fmt.Errorf("couldn't get buddyinfo: %w", err) } - level.Debug(c.logger).Log("msg", "Set node_buddy", "buddyInfo", buddyInfo) + c.logger.Debug("Set node_buddy", "buddyInfo", buddyInfo) for _, entry := range buddyInfo { for size, value := range entry.Sizes { ch <- prometheus.MustNewConstMetric( diff --git a/collector/cgroups_linux.go b/collector/cgroups_linux.go new file mode 100644 index 0000000000..f8da892ee4 --- /dev/null +++ b/collector/cgroups_linux.go @@ -0,0 +1,72 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nostat + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +const cgroupsCollectorSubsystem = "cgroups" + +type cgroupSummaryCollector struct { + fs procfs.FS + cgroups *prometheus.Desc + enabled *prometheus.Desc + logger *slog.Logger +} + +func init() { + registerCollector(cgroupsCollectorSubsystem, defaultDisabled, NewCgroupSummaryCollector) +} + +// NewCgroupSummaryCollector returns a new Collector exposing a summary of cgroups. +func NewCgroupSummaryCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + return &cgroupSummaryCollector{ + fs: fs, + cgroups: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cgroupsCollectorSubsystem, "cgroups"), + "Current cgroup number of the subsystem.", + []string{"subsys_name"}, nil, + ), + enabled: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cgroupsCollectorSubsystem, "enabled"), + "Current cgroup number of the subsystem.", + []string{"subsys_name"}, nil, + ), + logger: logger, + }, nil +} + +// Update implements Collector and exposes cgroup statistics. +func (c *cgroupSummaryCollector) Update(ch chan<- prometheus.Metric) error { + cgroupSummarys, err := c.fs.CgroupSummarys() + if err != nil { + return err + } + for _, cs := range cgroupSummarys { + ch <- prometheus.MustNewConstMetric(c.cgroups, prometheus.GaugeValue, float64(cs.Cgroups), cs.SubsysName) + ch <- prometheus.MustNewConstMetric(c.enabled, prometheus.GaugeValue, float64(cs.Enabled), cs.SubsysName) + } + return nil +} diff --git a/collector/collector.go b/collector/collector.go index e9ea14c0c5..18ff7388ca 100644 --- a/collector/collector.go +++ b/collector/collector.go @@ -17,13 +17,12 @@ package collector import ( "errors" "fmt" + "log/slog" "sync" "time" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" - kingpin "gopkg.in/alecthomas/kingpin.v2" ) // Namespace defines the common namespace to be used by all metrics. @@ -50,12 +49,14 @@ const ( ) var ( - factories = make(map[string]func(logger log.Logger) (Collector, error)) - collectorState = make(map[string]*bool) - forcedCollectors = map[string]bool{} // collectors which have been explicitly enabled or disabled + factories = make(map[string]func(logger *slog.Logger) (Collector, error)) + initiatedCollectorsMtx = sync.Mutex{} + initiatedCollectors = make(map[string]Collector) + collectorState = make(map[string]*bool) + forcedCollectors = map[string]bool{} // collectors which have been explicitly enabled or disabled ) -func registerCollector(collector string, isDefaultEnabled bool, factory func(logger log.Logger) (Collector, error)) { +func registerCollector(collector string, isDefaultEnabled bool, factory func(logger *slog.Logger) (Collector, error)) { var helpDefaultState string if isDefaultEnabled { helpDefaultState = "enabled" @@ -76,7 +77,7 @@ func registerCollector(collector string, isDefaultEnabled bool, factory func(log // NodeCollector implements the prometheus.Collector interface. type NodeCollector struct { Collectors map[string]Collector - logger log.Logger + logger *slog.Logger } // DisableDefaultCollectors sets the collector state to false for all collectors which @@ -102,7 +103,7 @@ func collectorFlagAction(collector string) func(ctx *kingpin.ParseContext) error } // NewNodeCollector creates a new NodeCollector. -func NewNodeCollector(logger log.Logger, filters ...string) (*NodeCollector, error) { +func NewNodeCollector(logger *slog.Logger, filters ...string) (*NodeCollector, error) { f := make(map[string]bool) for _, filter := range filters { enabled, exist := collectorState[filter] @@ -115,15 +116,21 @@ func NewNodeCollector(logger log.Logger, filters ...string) (*NodeCollector, err f[filter] = true } collectors := make(map[string]Collector) + initiatedCollectorsMtx.Lock() + defer initiatedCollectorsMtx.Unlock() for key, enabled := range collectorState { - if *enabled { - collector, err := factories[key](log.With(logger, "collector", key)) + if !*enabled || (len(f) > 0 && !f[key]) { + continue + } + if collector, ok := initiatedCollectors[key]; ok { + collectors[key] = collector + } else { + collector, err := factories[key](logger.With("collector", key)) if err != nil { return nil, err } - if len(f) == 0 || f[key] { - collectors[key] = collector - } + collectors[key] = collector + initiatedCollectors[key] = collector } } return &NodeCollector{Collectors: collectors, logger: logger}, nil @@ -148,7 +155,7 @@ func (n NodeCollector) Collect(ch chan<- prometheus.Metric) { wg.Wait() } -func execute(name string, c Collector, ch chan<- prometheus.Metric, logger log.Logger) { +func execute(name string, c Collector, ch chan<- prometheus.Metric, logger *slog.Logger) { begin := time.Now() err := c.Update(ch) duration := time.Since(begin) @@ -156,13 +163,13 @@ func execute(name string, c Collector, ch chan<- prometheus.Metric, logger log.L if err != nil { if IsNoDataError(err) { - level.Debug(logger).Log("msg", "collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err) + logger.Debug("collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err) } else { - level.Error(logger).Log("msg", "collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err) + logger.Error("collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err) } success = 0 } else { - level.Debug(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds()) + logger.Debug("collector succeeded", "name", name, "duration_seconds", duration.Seconds()) success = 1 } ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name) @@ -190,3 +197,49 @@ var ErrNoData = errors.New("collector returned no data") func IsNoDataError(err error) bool { return err == ErrNoData } + +// pushMetric helps construct and convert a variety of value types into Prometheus float64 metrics. +func pushMetric(ch chan<- prometheus.Metric, fieldDesc *prometheus.Desc, name string, value any, valueType prometheus.ValueType, labelValues ...string) { + var fVal float64 + switch val := value.(type) { + case uint8: + fVal = float64(val) + case uint16: + fVal = float64(val) + case uint32: + fVal = float64(val) + case uint64: + fVal = float64(val) + case int64: + fVal = float64(val) + case *uint8: + if val == nil { + return + } + fVal = float64(*val) + case *uint16: + if val == nil { + return + } + fVal = float64(*val) + case *uint32: + if val == nil { + return + } + fVal = float64(*val) + case *uint64: + if val == nil { + return + } + fVal = float64(*val) + case *int64: + if val == nil { + return + } + fVal = float64(*val) + default: + return + } + + ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, fVal, labelValues...) +} diff --git a/collector/conntrack_linux.go b/collector/conntrack_linux.go index 9dc8114f1a..376ab05af5 100644 --- a/collector/conntrack_linux.go +++ b/collector/conntrack_linux.go @@ -11,19 +11,43 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noconntrack +//go:build !noconntrack package collector import ( - "github.com/go-kit/kit/log" + "errors" + "fmt" + "log/slog" + "os" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" ) type conntrackCollector struct { - current *prometheus.Desc - limit *prometheus.Desc - logger log.Logger + current *prometheus.Desc + limit *prometheus.Desc + found *prometheus.Desc + invalid *prometheus.Desc + ignore *prometheus.Desc + insert *prometheus.Desc + insertFailed *prometheus.Desc + drop *prometheus.Desc + earlyDrop *prometheus.Desc + searchRestart *prometheus.Desc + logger *slog.Logger +} + +type conntrackStatistics struct { + found uint64 // Number of searched entries which were successful + invalid uint64 // Number of packets seen which can not be tracked + ignore uint64 // Number of packets seen which are already connected to a conntrack entry + insert uint64 // Number of entries inserted into the list + insertFailed uint64 // Number of entries for which list insertion was attempted but failed (happens if the same entry is already present) + drop uint64 // Number of packets dropped due to conntrack failure. Either new conntrack entry allocation failed, or protocol helper dropped the packet + earlyDrop uint64 // Number of dropped conntrack entries to make room for new ones, if maximum table size was reached + searchRestart uint64 // Number of conntrack table lookups which had to be restarted due to hashtable resizes } func init() { @@ -31,7 +55,7 @@ func init() { } // NewConntrackCollector returns a new Collector exposing conntrack stats. -func NewConntrackCollector(logger log.Logger) (Collector, error) { +func NewConntrackCollector(logger *slog.Logger) (Collector, error) { return &conntrackCollector{ current: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_entries"), @@ -43,6 +67,46 @@ func NewConntrackCollector(logger log.Logger) (Collector, error) { "Maximum size of connection tracking table.", nil, nil, ), + found: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_found"), + "Number of searched entries which were successful.", + nil, nil, + ), + invalid: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_invalid"), + "Number of packets seen which can not be tracked.", + nil, nil, + ), + ignore: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_ignore"), + "Number of packets seen which are already connected to a conntrack entry.", + nil, nil, + ), + insert: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_insert"), + "Number of entries inserted into the list.", + nil, nil, + ), + insertFailed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_insert_failed"), + "Number of entries for which list insertion was attempted but failed.", + nil, nil, + ), + drop: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_drop"), + "Number of packets dropped due to conntrack failure.", + nil, nil, + ), + earlyDrop: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_early_drop"), + "Number of dropped conntrack entries to make room for new ones, if maximum table size was reached.", + nil, nil, + ), + searchRestart: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_search_restart"), + "Number of conntrack table lookups which had to be restarted due to hashtable resizes.", + nil, nil, + ), logger: logger, }, nil } @@ -50,18 +114,73 @@ func NewConntrackCollector(logger log.Logger) (Collector, error) { func (c *conntrackCollector) Update(ch chan<- prometheus.Metric) error { value, err := readUintFromFile(procFilePath("sys/net/netfilter/nf_conntrack_count")) if err != nil { - // Conntrack probably not loaded into the kernel. - return nil + return c.handleErr(err) } ch <- prometheus.MustNewConstMetric( c.current, prometheus.GaugeValue, float64(value)) value, err = readUintFromFile(procFilePath("sys/net/netfilter/nf_conntrack_max")) if err != nil { - return nil + return c.handleErr(err) } ch <- prometheus.MustNewConstMetric( c.limit, prometheus.GaugeValue, float64(value)) + conntrackStats, err := getConntrackStatistics() + if err != nil { + return c.handleErr(err) + } + + ch <- prometheus.MustNewConstMetric( + c.found, prometheus.GaugeValue, float64(conntrackStats.found)) + ch <- prometheus.MustNewConstMetric( + c.invalid, prometheus.GaugeValue, float64(conntrackStats.invalid)) + ch <- prometheus.MustNewConstMetric( + c.ignore, prometheus.GaugeValue, float64(conntrackStats.ignore)) + ch <- prometheus.MustNewConstMetric( + c.insert, prometheus.GaugeValue, float64(conntrackStats.insert)) + ch <- prometheus.MustNewConstMetric( + c.insertFailed, prometheus.GaugeValue, float64(conntrackStats.insertFailed)) + ch <- prometheus.MustNewConstMetric( + c.drop, prometheus.GaugeValue, float64(conntrackStats.drop)) + ch <- prometheus.MustNewConstMetric( + c.earlyDrop, prometheus.GaugeValue, float64(conntrackStats.earlyDrop)) + ch <- prometheus.MustNewConstMetric( + c.searchRestart, prometheus.GaugeValue, float64(conntrackStats.searchRestart)) return nil } + +func (c *conntrackCollector) handleErr(err error) error { + if errors.Is(err, os.ErrNotExist) { + c.logger.Debug("conntrack probably not loaded") + return ErrNoData + } + return fmt.Errorf("failed to retrieve conntrack stats: %w", err) +} + +func getConntrackStatistics() (*conntrackStatistics, error) { + c := conntrackStatistics{} + + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + connStats, err := fs.ConntrackStat() + if err != nil { + return nil, err + } + + for _, connStat := range connStats { + c.found += connStat.Found + c.invalid += connStat.Invalid + c.ignore += connStat.Ignore + c.insert += connStat.Insert + c.insertFailed += connStat.InsertFailed + c.drop += connStat.Drop + c.earlyDrop += connStat.EarlyDrop + c.searchRestart += connStat.SearchRestart + } + + return &c, nil +} diff --git a/collector/cpu_aix.go b/collector/cpu_aix.go new file mode 100644 index 0000000000..1368f1b368 --- /dev/null +++ b/collector/cpu_aix.go @@ -0,0 +1,131 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nocpu + +package collector + +/* +#include // Include the standard Unix header +#include // For errno +*/ +import "C" +import ( + "fmt" + "log/slog" + "strconv" + + "github.com/power-devops/perfstat" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + nodeCPUPhysicalSecondsDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "physical_seconds_total"), + "Seconds the physical CPUs spent in each mode.", + []string{"cpu", "mode"}, nil, + ) + nodeCPUSRunQueueDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "runqueue"), + "Length of the run queue.", []string{"cpu"}, nil, + ) + nodeCPUFlagsDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "flags"), + "CPU flags.", + []string{"cpu", "flag"}, nil, + ) + nodeCPUContextSwitchDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "context_switches_total"), + "Number of context switches.", + []string{"cpu"}, nil, + ) +) + +type cpuCollector struct { + cpu typedDesc + cpuPhysical typedDesc + cpuRunQueue typedDesc + cpuFlags typedDesc + cpuContextSwitch typedDesc + + logger *slog.Logger + tickPerSecond float64 + purrTicksPerSecond float64 +} + +func init() { + registerCollector("cpu", defaultEnabled, NewCpuCollector) +} + +func tickPerSecond() (float64, error) { + ticks, err := C.sysconf(C._SC_CLK_TCK) + if ticks == -1 || err != nil { + return 0, fmt.Errorf("failed to get clock ticks per second: %v", err) + } + return float64(ticks), nil +} + +func NewCpuCollector(logger *slog.Logger) (Collector, error) { + ticks, err := tickPerSecond() + if err != nil { + return nil, err + } + + pconfig, err := perfstat.PartitionStat() + + if err != nil { + return nil, err + } + + return &cpuCollector{ + cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, + cpuPhysical: typedDesc{nodeCPUPhysicalSecondsDesc, prometheus.CounterValue}, + cpuRunQueue: typedDesc{nodeCPUSRunQueueDesc, prometheus.GaugeValue}, + cpuFlags: typedDesc{nodeCPUFlagsDesc, prometheus.GaugeValue}, + cpuContextSwitch: typedDesc{nodeCPUContextSwitchDesc, prometheus.CounterValue}, + logger: logger, + tickPerSecond: ticks, + purrTicksPerSecond: float64(pconfig.ProcessorMhz * 1e6), + }, nil +} + +func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error { + stats, err := perfstat.CpuStat() + if err != nil { + return err + } + + for n, stat := range stats { + // LPAR metrics + ch <- c.cpu.mustNewConstMetric(float64(stat.User)/c.tickPerSecond, strconv.Itoa(n), "user") + ch <- c.cpu.mustNewConstMetric(float64(stat.Sys)/c.tickPerSecond, strconv.Itoa(n), "system") + ch <- c.cpu.mustNewConstMetric(float64(stat.Idle)/c.tickPerSecond, strconv.Itoa(n), "idle") + ch <- c.cpu.mustNewConstMetric(float64(stat.Wait)/c.tickPerSecond, strconv.Itoa(n), "wait") + + // Physical CPU metrics + ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PIdle)/c.purrTicksPerSecond, strconv.Itoa(n), "pidle") + ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PUser)/c.purrTicksPerSecond, strconv.Itoa(n), "puser") + ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PSys)/c.purrTicksPerSecond, strconv.Itoa(n), "psys") + ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PWait)/c.purrTicksPerSecond, strconv.Itoa(n), "pwait") + + // Run queue length + ch <- c.cpuRunQueue.mustNewConstMetric(float64(stat.RunQueue), strconv.Itoa(n)) + + // Flags + ch <- c.cpuFlags.mustNewConstMetric(float64(stat.SpurrFlag), strconv.Itoa(n), "spurr") + + // Context switches + ch <- c.cpuContextSwitch.mustNewConstMetric(float64(stat.CSwitches), strconv.Itoa(n)) + } + return nil +} diff --git a/collector/cpu_common.go b/collector/cpu_common.go index 1a786076c9..cb34facbfa 100644 --- a/collector/cpu_common.go +++ b/collector/cpu_common.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nocpu +//go:build !nocpu package collector @@ -26,7 +26,7 @@ const ( var ( nodeCPUSecondsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "seconds_total"), - "Seconds the cpus spent in each mode.", + "Seconds the CPUs spent in each mode.", []string{"cpu", "mode"}, nil, ) ) diff --git a/collector/cpu_darwin.go b/collector/cpu_darwin.go index ba4ba809e6..e4e312aeb3 100644 --- a/collector/cpu_darwin.go +++ b/collector/cpu_darwin.go @@ -14,7 +14,7 @@ // Based on gopsutil/cpu/cpu_darwin_cgo.go @ ae251eb which is licensed under // BSD. See https://github.com/shirou/gopsutil/blob/master/LICENSE for details. -// +build !nocpu +//go:build !nocpu package collector @@ -22,10 +22,10 @@ import ( "bytes" "encoding/binary" "fmt" + "log/slog" "strconv" "unsafe" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -38,6 +38,7 @@ import ( #include #include #include +#include #if TARGET_OS_MAC #include #endif @@ -51,7 +52,7 @@ const ClocksPerSec = float64(C.CLK_TCK) type statCollector struct { cpu *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -59,7 +60,7 @@ func init() { } // NewCPUCollector returns a new Collector exposing CPU stats. -func NewCPUCollector(logger log.Logger) (Collector, error) { +func NewCPUCollector(logger *slog.Logger) (Collector, error) { return &statCollector{ cpu: nodeCPUSecondsDesc, logger: logger, diff --git a/collector/cpu_dragonfly.go b/collector/cpu_dragonfly.go index 4ccb729d35..3571f436d5 100644 --- a/collector/cpu_dragonfly.go +++ b/collector/cpu_dragonfly.go @@ -11,16 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nocpu +//go:build !nocpu package collector import ( "errors" + "log/slog" "strconv" "unsafe" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -77,7 +77,7 @@ const maxCPUTimesLen = C.MAXCPU * C.CPUSTATES type statCollector struct { cpu *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -85,7 +85,7 @@ func init() { } // NewStatCollector returns a new Collector exposing CPU stats. -func NewStatCollector(logger log.Logger) (Collector, error) { +func NewStatCollector(logger *slog.Logger) (Collector, error) { return &statCollector{ cpu: nodeCPUSecondsDesc, logger: logger, @@ -93,7 +93,7 @@ func NewStatCollector(logger log.Logger) (Collector, error) { } func getDragonFlyCPUTimes() ([]float64, error) { - // We want time spent per-cpu per CPUSTATE. + // We want time spent per-CPU per CPUSTATE. // CPUSTATES (number of CPUSTATES) is defined as 5U. // States: CP_USER | CP_NICE | CP_SYS | CP_IDLE | CP_INTR // diff --git a/collector/cpu_dragonfly_test.go b/collector/cpu_dragonfly_test.go index 4be0d5bbbd..c5d006fa9e 100644 --- a/collector/cpu_dragonfly_test.go +++ b/collector/cpu_dragonfly_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nocpu +//go:build !nocpu package collector diff --git a/collector/cpu_freebsd.go b/collector/cpu_freebsd.go index 41573e40e3..6532029ec1 100644 --- a/collector/cpu_freebsd.go +++ b/collector/cpu_freebsd.go @@ -11,18 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nocpu +//go:build !nocpu package collector import ( "fmt" + "log/slog" "math" "strconv" "unsafe" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) @@ -84,7 +83,7 @@ func getCPUTimes() ([]cputime, error) { type statCollector struct { cpu typedDesc temp typedDesc - logger log.Logger + logger *slog.Logger } func init() { @@ -92,7 +91,7 @@ func init() { } // NewStatCollector returns a new Collector exposing CPU stats. -func NewStatCollector(logger log.Logger) (Collector, error) { +func NewStatCollector(logger *slog.Logger) (Collector, error) { return &statCollector{ cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, temp: typedDesc{prometheus.NewDesc( @@ -133,11 +132,11 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) error { if err != nil { if err == unix.ENOENT { // No temperature information for this CPU - level.Debug(c.logger).Log("msg", "no temperature information for CPU", "cpu", cpu) + c.logger.Debug("no temperature information for CPU", "cpu", cpu) } else { // Unexpected error ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu) - level.Error(c.logger).Log("msg", "failed to query CPU temperature for CPU", "cpu", cpu, "err", err) + c.logger.Error("failed to query CPU temperature for CPU", "cpu", cpu, "err", err) } continue } diff --git a/collector/cpu_linux.go b/collector/cpu_linux.go index dfa4d4afc4..1fef28908e 100644 --- a/collector/cpu_linux.go +++ b/collector/cpu_linux.go @@ -11,37 +11,60 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nocpu +//go:build !nocpu package collector import ( + "errors" "fmt" + "log/slog" + "os" "path/filepath" + "regexp" + "slices" "strconv" "sync" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "golang.org/x/exp/maps" + + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" - "gopkg.in/alecthomas/kingpin.v2" + "github.com/prometheus/procfs/sysfs" ) type cpuCollector struct { - fs procfs.FS + procfs procfs.FS + sysfs sysfs.FS cpu *prometheus.Desc cpuInfo *prometheus.Desc + cpuFrequencyHz *prometheus.Desc + cpuFlagsInfo *prometheus.Desc + cpuBugsInfo *prometheus.Desc cpuGuest *prometheus.Desc cpuCoreThrottle *prometheus.Desc cpuPackageThrottle *prometheus.Desc - logger log.Logger - cpuStats []procfs.CPUStat + cpuIsolated *prometheus.Desc + logger *slog.Logger + cpuOnline *prometheus.Desc + cpuStats map[int64]procfs.CPUStat cpuStatsMutex sync.Mutex + isolatedCpus []uint16 + + cpuFlagsIncludeRegexp *regexp.Regexp + cpuBugsIncludeRegexp *regexp.Regexp } +// Idle jump back limit in seconds. +const jumpBackSeconds = 3.0 + var ( - enableCPUInfo = kingpin.Flag("collector.cpu.info", "Enables metric cpu_info").Bool() + enableCPUGuest = kingpin.Flag("collector.cpu.guest", "Enables metric node_cpu_guest_seconds_total").Default("true").Bool() + enableCPUInfo = kingpin.Flag("collector.cpu.info", "Enables metric cpu_info").Bool() + flagsInclude = kingpin.Flag("collector.cpu.info.flags-include", "Filter the `flags` field in cpuInfo with a value that must be a regular expression").String() + bugsInclude = kingpin.Flag("collector.cpu.info.bugs-include", "Filter the `bugs` field in cpuInfo with a value that must be a regular expression").String() + jumpBackDebugMessage = fmt.Sprintf("CPU Idle counter jumped backwards more than %f seconds, possible hotplug event, resetting CPU stats", jumpBackSeconds) ) func init() { @@ -49,36 +72,105 @@ func init() { } // NewCPUCollector returns a new Collector exposing kernel/system statistics. -func NewCPUCollector(logger log.Logger) (Collector, error) { - fs, err := procfs.NewFS(*procPath) +func NewCPUCollector(logger *slog.Logger) (Collector, error) { + pfs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } - return &cpuCollector{ - fs: fs, - cpu: nodeCPUSecondsDesc, + + sfs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + isolcpus, err := sfs.IsolatedCPUs() + if err != nil { + if !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to get isolated cpus: %w", err) + } + logger.Debug("couldn't open isolated file", "error", err) + } + + c := &cpuCollector{ + procfs: pfs, + sysfs: sfs, + cpu: nodeCPUSecondsDesc, cpuInfo: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "info"), "CPU information from /proc/cpuinfo.", []string{"package", "core", "cpu", "vendor", "family", "model", "model_name", "microcode", "stepping", "cachesize"}, nil, ), + cpuFrequencyHz: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"), + "CPU frequency in hertz from /proc/cpuinfo.", + []string{"package", "core", "cpu"}, nil, + ), + cpuFlagsInfo: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "flag_info"), + "The `flags` field of CPU information from /proc/cpuinfo taken from the first core.", + []string{"flag"}, nil, + ), + cpuBugsInfo: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "bug_info"), + "The `bugs` field of CPU information from /proc/cpuinfo taken from the first core.", + []string{"bug"}, nil, + ), cpuGuest: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "guest_seconds_total"), - "Seconds the cpus spent in guests (VMs) for each mode.", + "Seconds the CPUs spent in guests (VMs) for each mode.", []string{"cpu", "mode"}, nil, ), cpuCoreThrottle: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "core_throttles_total"), - "Number of times this cpu core has been throttled.", + "Number of times this CPU core has been throttled.", []string{"package", "core"}, nil, ), cpuPackageThrottle: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "package_throttles_total"), - "Number of times this cpu package has been throttled.", + "Number of times this CPU package has been throttled.", []string{"package"}, nil, ), - logger: logger, - }, nil + cpuIsolated: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "isolated"), + "Whether each core is isolated, information from /sys/devices/system/cpu/isolated.", + []string{"cpu"}, nil, + ), + cpuOnline: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "online"), + "CPUs that are online and being scheduled.", + []string{"cpu"}, nil, + ), + logger: logger, + isolatedCpus: isolcpus, + cpuStats: make(map[int64]procfs.CPUStat), + } + err = c.compileIncludeFlags(flagsInclude, bugsInclude) + if err != nil { + return nil, fmt.Errorf("fail to compile --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include, the values of them must be regular expressions: %w", err) + } + return c, nil +} + +func (c *cpuCollector) compileIncludeFlags(flagsIncludeFlag, bugsIncludeFlag *string) error { + if (*flagsIncludeFlag != "" || *bugsIncludeFlag != "") && !*enableCPUInfo { + *enableCPUInfo = true + c.logger.Info("--collector.cpu.info has been set to `true` because you set the following flags, like --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include") + } + + var err error + if *flagsIncludeFlag != "" { + c.cpuFlagsIncludeRegexp, err = regexp.Compile(*flagsIncludeFlag) + if err != nil { + return err + } + } + if *bugsIncludeFlag != "" { + c.cpuBugsIncludeRegexp, err = regexp.Compile(*bugsIncludeFlag) + if err != nil { + return err + } + } + return nil } // Update implements Collector and exposes cpu related metrics from /proc/stat and /sys/.../cpu/. @@ -91,15 +183,24 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error { if err := c.updateStat(ch); err != nil { return err } - if err := c.updateThermalThrottle(ch); err != nil { + if c.isolatedCpus != nil { + c.updateIsolated(ch) + } + err := c.updateThermalThrottle(ch) + if err != nil { + return err + } + err = c.updateOnline(ch) + if err != nil { return err } + return nil } // updateInfo reads /proc/cpuinfo func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error { - info, err := c.fs.CPUInfo() + info, err := c.procfs.CPUInfo() if err != nil { return err } @@ -118,6 +219,49 @@ func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error { cpu.Stepping, cpu.CacheSize) } + + cpuFreqEnabled, ok := collectorState["cpufreq"] + if !ok || cpuFreqEnabled == nil { + c.logger.Debug("cpufreq key missing or nil value in collectorState map") + } else if *cpuFreqEnabled { + for _, cpu := range info { + ch <- prometheus.MustNewConstMetric(c.cpuFrequencyHz, + prometheus.GaugeValue, + cpu.CPUMHz*1e6, + cpu.PhysicalID, + cpu.CoreID, + strconv.Itoa(int(cpu.Processor))) + } + } + + if len(info) != 0 { + cpu := info[0] + if err := updateFieldInfo(cpu.Flags, c.cpuFlagsIncludeRegexp, c.cpuFlagsInfo, ch); err != nil { + return err + } + if err := updateFieldInfo(cpu.Bugs, c.cpuBugsIncludeRegexp, c.cpuBugsInfo, ch); err != nil { + return err + } + } + + return nil +} + +func updateFieldInfo(valueList []string, filter *regexp.Regexp, desc *prometheus.Desc, ch chan<- prometheus.Metric) error { + if filter == nil { + return nil + } + + for _, val := range valueList { + if !filter.MatchString(val) { + continue + } + ch <- prometheus.MustNewConstMetric(desc, + prometheus.GaugeValue, + 1, + val, + ) + } return nil } @@ -142,19 +286,19 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error // topology/physical_package_id if physicalPackageID, err = readUintFromFile(filepath.Join(cpu, "topology", "physical_package_id")); err != nil { - level.Debug(c.logger).Log("msg", "CPU is missing physical_package_id", "cpu", cpu) + c.logger.Debug("CPU is missing physical_package_id", "cpu", cpu) continue } // topology/core_id if coreID, err = readUintFromFile(filepath.Join(cpu, "topology", "core_id")); err != nil { - level.Debug(c.logger).Log("msg", "CPU is missing core_id", "cpu", cpu) + c.logger.Debug("CPU is missing core_id", "cpu", cpu) continue } // metric node_cpu_core_throttles_total // // We process this metric before the package throttles as there - // are cpu+kernel combinations that only present core throttles + // are CPU+kernel combinations that only present core throttles // but no package throttles. // Seen e.g. on an Intel Xeon E5472 system with RHEL 6.9 kernel. if _, present := packageCoreThrottles[physicalPackageID]; !present { @@ -165,7 +309,7 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error if coreThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "core_throttle_count")); err == nil { packageCoreThrottles[physicalPackageID][coreID] = coreThrottleCount } else { - level.Debug(c.logger).Log("msg", "CPU is missing core_throttle_count", "cpu", cpu) + c.logger.Debug("CPU is missing core_throttle_count", "cpu", cpu) } } @@ -175,7 +319,7 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error if packageThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "package_throttle_count")); err == nil { packageThrottles[physicalPackageID] = packageThrottleCount } else { - level.Debug(c.logger).Log("msg", "CPU is missing package_throttle_count", "cpu", cpu) + c.logger.Debug("CPU is missing package_throttle_count", "cpu", cpu) } } } @@ -199,9 +343,39 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error return nil } -// updateStat reads /proc/stat through procfs and exports cpu related metrics. +// updateIsolated reads /sys/devices/system/cpu/isolated through sysfs and exports isolation level metrics. +func (c *cpuCollector) updateIsolated(ch chan<- prometheus.Metric) { + for _, cpu := range c.isolatedCpus { + cpuNum := strconv.Itoa(int(cpu)) + ch <- prometheus.MustNewConstMetric(c.cpuIsolated, prometheus.GaugeValue, 1.0, cpuNum) + } +} + +// updateOnline reads /sys/devices/system/cpu/cpu*/online through sysfs and exports online status metrics. +func (c *cpuCollector) updateOnline(ch chan<- prometheus.Metric) error { + cpus, err := c.sysfs.CPUs() + if err != nil { + return err + } + // No-op if the system does not support CPU online stats. + cpu0 := cpus[0] + if _, err := cpu0.Online(); err != nil && errors.Is(err, os.ErrNotExist) { + return nil + } + for _, cpu := range cpus { + setOnline := float64(0) + if online, _ := cpu.Online(); online { + setOnline = 1 + } + ch <- prometheus.MustNewConstMetric(c.cpuOnline, prometheus.GaugeValue, setOnline, cpu.Number()) + } + + return nil +} + +// updateStat reads /proc/stat through procfs and exports CPU-related metrics. func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error { - stats, err := c.fs.Stat() + stats, err := c.procfs.Stat() if err != nil { return err } @@ -212,7 +386,7 @@ func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error { c.cpuStatsMutex.Lock() defer c.cpuStatsMutex.Unlock() for cpuID, cpuStat := range c.cpuStats { - cpuNum := strconv.Itoa(cpuID) + cpuNum := strconv.Itoa(int(cpuID)) ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.User, cpuNum, "user") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Nice, cpuNum, "nice") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.System, cpuNum, "system") @@ -222,85 +396,101 @@ func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.SoftIRQ, cpuNum, "softirq") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Steal, cpuNum, "steal") - // Guest CPU is also accounted for in cpuStat.User and cpuStat.Nice, expose these as separate metrics. - ch <- prometheus.MustNewConstMetric(c.cpuGuest, prometheus.CounterValue, cpuStat.Guest, cpuNum, "user") - ch <- prometheus.MustNewConstMetric(c.cpuGuest, prometheus.CounterValue, cpuStat.GuestNice, cpuNum, "nice") + if *enableCPUGuest { + // Guest CPU is also accounted for in cpuStat.User and cpuStat.Nice, expose these as separate metrics. + ch <- prometheus.MustNewConstMetric(c.cpuGuest, prometheus.CounterValue, cpuStat.Guest, cpuNum, "user") + ch <- prometheus.MustNewConstMetric(c.cpuGuest, prometheus.CounterValue, cpuStat.GuestNice, cpuNum, "nice") + } } return nil } // updateCPUStats updates the internal cache of CPU stats. -func (c *cpuCollector) updateCPUStats(newStats []procfs.CPUStat) { +func (c *cpuCollector) updateCPUStats(newStats map[int64]procfs.CPUStat) { + // Acquire a lock to update the stats. c.cpuStatsMutex.Lock() defer c.cpuStatsMutex.Unlock() // Reset the cache if the list of CPUs has changed. - if len(c.cpuStats) != len(newStats) { - c.cpuStats = make([]procfs.CPUStat, len(newStats)) - } - for i, n := range newStats { - // If idle jumps backwards, assume we had a hotplug event and reset the stats for this CPU. - if n.Idle < c.cpuStats[i].Idle { - level.Warn(c.logger).Log("msg", "CPU Idle counter jumped backwards, possible hotplug event, resetting CPU stats", "cpu", i, "old_value", c.cpuStats[i].Idle, "new_value", n.Idle) - c.cpuStats[i] = procfs.CPUStat{} + cpuStats := c.cpuStats[i] + + // If idle jumps backwards by more than X seconds, assume we had a hotplug event and reset the stats for this CPU. + if (cpuStats.Idle - n.Idle) >= jumpBackSeconds { + c.logger.Debug(jumpBackDebugMessage, "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle) + cpuStats = procfs.CPUStat{} + } + + if n.Idle >= cpuStats.Idle { + cpuStats.Idle = n.Idle + } else { + c.logger.Debug("CPU Idle counter jumped backwards", "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle) } - c.cpuStats[i].Idle = n.Idle - if n.User >= c.cpuStats[i].User { - c.cpuStats[i].User = n.User + if n.User >= cpuStats.User { + cpuStats.User = n.User } else { - level.Warn(c.logger).Log("msg", "CPU User counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].User, "new_value", n.User) + c.logger.Debug("CPU User counter jumped backwards", "cpu", i, "old_value", cpuStats.User, "new_value", n.User) } - if n.Nice >= c.cpuStats[i].Nice { - c.cpuStats[i].Nice = n.Nice + if n.Nice >= cpuStats.Nice { + cpuStats.Nice = n.Nice } else { - level.Warn(c.logger).Log("msg", "CPU Nice counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Nice, "new_value", n.Nice) + c.logger.Debug("CPU Nice counter jumped backwards", "cpu", i, "old_value", cpuStats.Nice, "new_value", n.Nice) } - if n.System >= c.cpuStats[i].System { - c.cpuStats[i].System = n.System + if n.System >= cpuStats.System { + cpuStats.System = n.System } else { - level.Warn(c.logger).Log("msg", "CPU System counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].System, "new_value", n.System) + c.logger.Debug("CPU System counter jumped backwards", "cpu", i, "old_value", cpuStats.System, "new_value", n.System) } - if n.Iowait >= c.cpuStats[i].Iowait { - c.cpuStats[i].Iowait = n.Iowait + if n.Iowait >= cpuStats.Iowait { + cpuStats.Iowait = n.Iowait } else { - level.Warn(c.logger).Log("msg", "CPU Iowait counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Iowait, "new_value", n.Iowait) + c.logger.Debug("CPU Iowait counter jumped backwards", "cpu", i, "old_value", cpuStats.Iowait, "new_value", n.Iowait) } - if n.IRQ >= c.cpuStats[i].IRQ { - c.cpuStats[i].IRQ = n.IRQ + if n.IRQ >= cpuStats.IRQ { + cpuStats.IRQ = n.IRQ } else { - level.Warn(c.logger).Log("msg", "CPU IRQ counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].IRQ, "new_value", n.IRQ) + c.logger.Debug("CPU IRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.IRQ, "new_value", n.IRQ) } - if n.SoftIRQ >= c.cpuStats[i].SoftIRQ { - c.cpuStats[i].SoftIRQ = n.SoftIRQ + if n.SoftIRQ >= cpuStats.SoftIRQ { + cpuStats.SoftIRQ = n.SoftIRQ } else { - level.Warn(c.logger).Log("msg", "CPU SoftIRQ counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].SoftIRQ, "new_value", n.SoftIRQ) + c.logger.Debug("CPU SoftIRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.SoftIRQ, "new_value", n.SoftIRQ) } - if n.Steal >= c.cpuStats[i].Steal { - c.cpuStats[i].Steal = n.Steal + if n.Steal >= cpuStats.Steal { + cpuStats.Steal = n.Steal } else { - level.Warn(c.logger).Log("msg", "CPU Steal counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Steal, "new_value", n.Steal) + c.logger.Debug("CPU Steal counter jumped backwards", "cpu", i, "old_value", cpuStats.Steal, "new_value", n.Steal) } - if n.Guest >= c.cpuStats[i].Guest { - c.cpuStats[i].Guest = n.Guest + if n.Guest >= cpuStats.Guest { + cpuStats.Guest = n.Guest } else { - level.Warn(c.logger).Log("msg", "CPU Guest counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Guest, "new_value", n.Guest) + c.logger.Debug("CPU Guest counter jumped backwards", "cpu", i, "old_value", cpuStats.Guest, "new_value", n.Guest) } - if n.GuestNice >= c.cpuStats[i].GuestNice { - c.cpuStats[i].GuestNice = n.GuestNice + if n.GuestNice >= cpuStats.GuestNice { + cpuStats.GuestNice = n.GuestNice } else { - level.Warn(c.logger).Log("msg", "CPU GuestNice counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].GuestNice, "new_value", n.GuestNice) + c.logger.Debug("CPU GuestNice counter jumped backwards", "cpu", i, "old_value", cpuStats.GuestNice, "new_value", n.GuestNice) } + + c.cpuStats[i] = cpuStats + } + + // Remove offline CPUs. + if len(newStats) != len(c.cpuStats) { + onlineCPUIds := maps.Keys(newStats) + maps.DeleteFunc(c.cpuStats, func(key int64, item procfs.CPUStat) bool { + return !slices.Contains(onlineCPUIds, key) + }) } } diff --git a/collector/cpu_linux_test.go b/collector/cpu_linux_test.go new file mode 100644 index 0000000000..6d4bc4506d --- /dev/null +++ b/collector/cpu_linux_test.go @@ -0,0 +1,202 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nocpu + +package collector + +import ( + "io" + "log/slog" + "reflect" + "testing" + + "github.com/prometheus/procfs" +) + +func copyStats(d, s map[int64]procfs.CPUStat) { + for k := range s { + v := s[k] + d[k] = v + } +} + +func makeTestCPUCollector(s map[int64]procfs.CPUStat) *cpuCollector { + dup := make(map[int64]procfs.CPUStat, len(s)) + copyStats(dup, s) + return &cpuCollector{ + logger: slog.New(slog.NewTextHandler(io.Discard, nil)), + cpuStats: dup, + } +} + +func TestCPU(t *testing.T) { + firstCPUStat := map[int64]procfs.CPUStat{ + 0: { + User: 100.0, + Nice: 100.0, + System: 100.0, + Idle: 100.0, + Iowait: 100.0, + IRQ: 100.0, + SoftIRQ: 100.0, + Steal: 100.0, + Guest: 100.0, + GuestNice: 100.0, + }} + + c := makeTestCPUCollector(firstCPUStat) + want := map[int64]procfs.CPUStat{ + 0: { + User: 101.0, + Nice: 101.0, + System: 101.0, + Idle: 101.0, + Iowait: 101.0, + IRQ: 101.0, + SoftIRQ: 101.0, + Steal: 101.0, + Guest: 101.0, + GuestNice: 101.0, + }} + c.updateCPUStats(want) + got := c.cpuStats + if !reflect.DeepEqual(want, got) { + t.Fatalf("should have %v CPU Stat: got %v", want, got) + } + + c = makeTestCPUCollector(firstCPUStat) + jumpBack := map[int64]procfs.CPUStat{ + 0: { + User: 99.9, + Nice: 99.9, + System: 99.9, + Idle: 99.9, + Iowait: 99.9, + IRQ: 99.9, + SoftIRQ: 99.9, + Steal: 99.9, + Guest: 99.9, + GuestNice: 99.9, + }} + c.updateCPUStats(jumpBack) + got = c.cpuStats + if reflect.DeepEqual(jumpBack, got) { + t.Fatalf("should have %v CPU Stat: got %v", firstCPUStat, got) + } + + c = makeTestCPUCollector(firstCPUStat) + resetIdle := map[int64]procfs.CPUStat{ + 0: { + User: 102.0, + Nice: 102.0, + System: 102.0, + Idle: 1.0, + Iowait: 102.0, + IRQ: 102.0, + SoftIRQ: 102.0, + Steal: 102.0, + Guest: 102.0, + GuestNice: 102.0, + }} + c.updateCPUStats(resetIdle) + got = c.cpuStats + if !reflect.DeepEqual(resetIdle, got) { + t.Fatalf("should have %v CPU Stat: got %v", resetIdle, got) + } +} + +func TestCPUOffline(t *testing.T) { + // CPU 1 goes offline. + firstCPUStat := map[int64]procfs.CPUStat{ + 0: { + User: 100.0, + Nice: 100.0, + System: 100.0, + Idle: 100.0, + Iowait: 100.0, + IRQ: 100.0, + SoftIRQ: 100.0, + Steal: 100.0, + Guest: 100.0, + GuestNice: 100.0, + }, + 1: { + User: 101.0, + Nice: 101.0, + System: 101.0, + Idle: 101.0, + Iowait: 101.0, + IRQ: 101.0, + SoftIRQ: 101.0, + Steal: 101.0, + Guest: 101.0, + GuestNice: 101.0, + }, + } + + c := makeTestCPUCollector(firstCPUStat) + want := map[int64]procfs.CPUStat{ + 0: { + User: 100.0, + Nice: 100.0, + System: 100.0, + Idle: 100.0, + Iowait: 100.0, + IRQ: 100.0, + SoftIRQ: 100.0, + Steal: 100.0, + Guest: 100.0, + GuestNice: 100.0, + }, + } + c.updateCPUStats(want) + got := c.cpuStats + if !reflect.DeepEqual(want, got) { + t.Fatalf("should have %v CPU Stat: got %v", want, got) + } + + // CPU 1 comes back online. + want = map[int64]procfs.CPUStat{ + 0: { + User: 100.0, + Nice: 100.0, + System: 100.0, + Idle: 100.0, + Iowait: 100.0, + IRQ: 100.0, + SoftIRQ: 100.0, + Steal: 100.0, + Guest: 100.0, + GuestNice: 100.0, + }, + 1: { + User: 101.0, + Nice: 101.0, + System: 101.0, + Idle: 101.0, + Iowait: 101.0, + IRQ: 101.0, + SoftIRQ: 101.0, + Steal: 101.0, + Guest: 101.0, + GuestNice: 101.0, + }, + } + c.updateCPUStats(want) + got = c.cpuStats + if !reflect.DeepEqual(want, got) { + t.Fatalf("should have %v CPU Stat: got %v", want, got) + } + +} diff --git a/collector/cpu_netbsd.go b/collector/cpu_netbsd.go new file mode 100644 index 0000000000..49051a6efd --- /dev/null +++ b/collector/cpu_netbsd.go @@ -0,0 +1,278 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nocpu + +package collector + +import ( + "errors" + "log/slog" + "math" + "regexp" + "sort" + "strconv" + "strings" + "unsafe" + + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" + + "howett.net/plist" +) + +const ( + _IOC_OUT = uint(0x40000000) + _IOC_IN = uint(0x80000000) + _IOC_INOUT = (_IOC_IN | _IOC_OUT) + _IOCPARM_MASK = uint(0x1fff) + _IOCPARM_SHIFT = uint(16) + _IOCGROUP_SHIFT = uint(8) +) + +type clockinfo struct { + hz int32 // clock frequency + tick int32 // micro-seconds per hz tick + spare int32 + stathz int32 // statistics clock frequency + profhz int32 // profiling clock frequency +} + +type cputime struct { + user float64 + nice float64 + sys float64 + intr float64 + idle float64 +} + +type plistref struct { + pref_plist unsafe.Pointer + pref_len uint +} + +type sysmonValues struct { + CurValue int `plist:"cur-value"` + Description string `plist:"description"` + State string `plist:"state"` + Type string `plist:"type"` +} + +type sysmonProperty []sysmonValues + +type sysmonProperties map[string]sysmonProperty + +func _IOC(inout uint, group byte, num uint, len uintptr) uint { + return ((inout) | ((uint(len) & _IOCPARM_MASK) << _IOCPARM_SHIFT) | (uint(group) << _IOCGROUP_SHIFT) | (num)) +} + +func _IOWR(group byte, num uint, len uintptr) uint { + return _IOC(_IOC_INOUT, group, num, len) +} + +func ioctl(fd int, nr uint, typ byte, size uintptr, retptr unsafe.Pointer) error { + _, _, errno := unix.Syscall( + unix.SYS_IOCTL, + uintptr(fd), + uintptr(_IOWR(typ, nr, size)), + uintptr(retptr), + ) + if errno != 0 { + return errno + } + return nil +} + +func readSysmonProperties() (sysmonProperties, error) { + fd, err := unix.Open(rootfsFilePath("/dev/sysmon"), unix.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer unix.Close(fd) + + var retptr plistref + + if err = ioctl(fd, 0, 'E', unsafe.Sizeof(retptr), unsafe.Pointer(&retptr)); err != nil { + return nil, err + } + defer unix.Syscall(unix.SYS_MUNMAP, uintptr(retptr.pref_plist), uintptr(retptr.pref_len), uintptr(0)) + bytes := unsafe.Slice((*byte)(unsafe.Pointer(retptr.pref_plist)), retptr.pref_len-1) + + var props sysmonProperties + if _, err = plist.Unmarshal(bytes, &props); err != nil { + return nil, err + } + return props, nil +} + +func sortFilterSysmonProperties(props sysmonProperties, prefix string) []string { + var keys []string + for key := range props { + if !strings.HasPrefix(key, prefix) { + continue + } + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func convertTemperatures(prop sysmonProperty, res map[int]float64) error { + + for _, val := range prop { + if val.State == "invalid" || val.State == "unknown" || val.State == "" { + continue + } + + re := regexp.MustCompile("^cpu([0-9]+) temperature$") + core := re.FindStringSubmatch(val.Description)[1] + ncore, _ := strconv.Atoi(core) + temperature := ((float64(uint64(val.CurValue))) / 1000000) - 273.15 + res[ncore] = temperature + } + return nil +} + +func getCPUTemperatures() (map[int]float64, error) { + + res := make(map[int]float64) + + // Read all properties + props, err := readSysmonProperties() + if err != nil { + return res, err + } + + keys := sortFilterSysmonProperties(props, "coretemp") + for idx := range keys { + convertTemperatures(props[keys[idx]], res) + } + + return res, nil +} + +func getCPUTimes() ([]cputime, error) { + const states = 5 + + clockb, err := unix.SysctlRaw("kern.clockrate") + if err != nil { + return nil, err + } + clock := *(*clockinfo)(unsafe.Pointer(&clockb[0])) + + var cpufreq float64 + if clock.stathz > 0 { + cpufreq = float64(clock.stathz) + } else { + cpufreq = float64(clock.hz) + } + + ncpusb, err := unix.SysctlRaw("hw.ncpu") + if err != nil { + return nil, err + } + ncpus := int(*(*uint32)(unsafe.Pointer(&ncpusb[0]))) + + if ncpus < 1 { + return nil, errors.New("Invalid cpu number") + } + + var times []float64 + for ncpu := 0; ncpu < ncpus; ncpu++ { + cpb, err := unix.SysctlRaw("kern.cp_time", ncpu) + if err != nil { + return nil, err + } + for len(cpb) >= int(unsafe.Sizeof(uint64(0))) { + t := *(*uint64)(unsafe.Pointer(&cpb[0])) + times = append(times, float64(t)/cpufreq) + cpb = cpb[unsafe.Sizeof(uint64(0)):] + } + } + + cpus := make([]cputime, len(times)/states) + for i := 0; i < len(times); i += states { + cpu := &cpus[i/states] + cpu.user = times[i] + cpu.nice = times[i+1] + cpu.sys = times[i+2] + cpu.intr = times[i+3] + cpu.idle = times[i+4] + } + return cpus, nil +} + +type statCollector struct { + cpu typedDesc + temp typedDesc + logger *slog.Logger +} + +func init() { + registerCollector("cpu", defaultEnabled, NewStatCollector) +} + +// NewStatCollector returns a new Collector exposing CPU stats. +func NewStatCollector(logger *slog.Logger) (Collector, error) { + return &statCollector{ + cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, + temp: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "temperature_celsius"), + "CPU temperature", + []string{"cpu"}, nil, + ), prometheus.GaugeValue}, + logger: logger, + }, nil +} + +// Expose CPU stats using sysctl. +func (c *statCollector) Update(ch chan<- prometheus.Metric) error { + // We want time spent per-cpu per CPUSTATE. + // CPUSTATES (number of CPUSTATES) is defined as 5U. + // Order: CP_USER | CP_NICE | CP_SYS | CP_IDLE | CP_INTR + // sysctl kern.cp_time.x provides CPUSTATES long integers: + // (space-separated list of the above variables, where + // x stands for the number of the CPU core) + // + // Each value is a counter incremented at frequency + // kern.clockrate.(stathz | hz) + // + // Look into sys/kern/kern_clock.c for details. + + cpuTimes, err := getCPUTimes() + if err != nil { + return err + } + + cpuTemperatures, err := getCPUTemperatures() + if err != nil { + return err + } + + for cpu, t := range cpuTimes { + lcpu := strconv.Itoa(cpu) + ch <- c.cpu.mustNewConstMetric(float64(t.user), lcpu, "user") + ch <- c.cpu.mustNewConstMetric(float64(t.nice), lcpu, "nice") + ch <- c.cpu.mustNewConstMetric(float64(t.sys), lcpu, "system") + ch <- c.cpu.mustNewConstMetric(float64(t.intr), lcpu, "interrupt") + ch <- c.cpu.mustNewConstMetric(float64(t.idle), lcpu, "idle") + + if temp, ok := cpuTemperatures[cpu]; ok { + ch <- c.temp.mustNewConstMetric(temp, lcpu) + } else { + c.logger.Debug("no temperature information for CPU", "cpu", cpu) + ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu) + } + } + return err +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/collector/cpu_netbsd_test.go similarity index 51% rename from vendor/github.com/prometheus/procfs/proc_environ.go rename to collector/cpu_netbsd_test.go index 6134b3580c..4a872bffa9 100644 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/collector/cpu_netbsd_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,27 +11,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -package procfs +//go:build !nocpu -import ( - "strings" +package collector - "github.com/prometheus/procfs/internal/util" +import ( + "runtime" + "testing" ) -// Environ reads process environments from /proc//environ -func (p Proc) Environ() ([]string, error) { - environments := make([]string, 0) - - data, err := util.ReadFileNoStat(p.path("environ")) +func TestCPUTimes(t *testing.T) { + times, err := getCPUTimes() if err != nil { - return environments, err + t.Fatalf("getCPUTimes returned error: %v", err) + } + + if len(times) == 0 { + t.Fatalf("no CPU times found") } - environments = strings.Split(string(data), "\000") - if len(environments) > 0 { - environments = environments[:len(environments)-1] + if got, want := len(times), runtime.NumCPU(); got != want { + t.Fatalf("unexpected # of CPU times; got %d want %d", got, want) } +} - return environments, nil +func TestCPUTemperatures(t *testing.T) { + _, err := getCPUTemperatures() + if err != nil { + t.Fatalf("getCPUTemperatures returned error: %v", err) + } } diff --git a/collector/cpu_openbsd.go b/collector/cpu_openbsd.go index b7d7688722..2c8ce9a81f 100644 --- a/collector/cpu_openbsd.go +++ b/collector/cpu_openbsd.go @@ -11,35 +11,47 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nocpu +//go:build !nocpu package collector import ( + "log/slog" "strconv" "unsafe" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) -/* -#include -#include -*/ -import "C" +const ( + CP_USER = iota + CP_NICE + CP_SYS + CP_SPIN + CP_INTR + CP_IDLE + CPUSTATES +) +const ( + CP_USER_O63 = iota + CP_NICE_O63 + CP_SYS_O63 + CP_INTR_O63 + CP_IDLE_O63 + CPUSTATES_O63 +) type cpuCollector struct { cpu typedDesc - logger log.Logger + logger *slog.Logger } func init() { registerCollector("cpu", defaultEnabled, NewCPUCollector) } -func NewCPUCollector(logger log.Logger) (Collector, error) { +func NewCPUCollector(logger *slog.Logger) (Collector, error) { return &cpuCollector{ cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, logger: logger, @@ -51,32 +63,41 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) (err error) { if err != nil { return err } - clock := *(*C.struct_clockinfo)(unsafe.Pointer(&clockb[0])) - hz := float64(clock.stathz) + clock := *(*unix.Clockinfo)(unsafe.Pointer(&clockb[0])) + hz := float64(clock.Stathz) ncpus, err := unix.SysctlUint32("hw.ncpu") if err != nil { return err } - var cpTime [][C.CPUSTATES]C.int64_t + var cpTime [][CPUSTATES]uint64 for i := 0; i < int(ncpus); i++ { cpb, err := unix.SysctlRaw("kern.cp_time2", i) if err != nil && err != unix.ENODEV { return err } if err != unix.ENODEV { - cpTime = append(cpTime, *(*[C.CPUSTATES]C.int64_t)(unsafe.Pointer(&cpb[0]))) + var times [CPUSTATES]uint64 + for n := 0; n < len(cpb); n += 8 { + times[n/8] = *(*uint64)(unsafe.Pointer(&cpb[n])) + } + if len(cpb)/8 == CPUSTATES_O63 { + copy(times[CP_INTR:], times[CP_INTR_O63:]) + times[CP_SPIN] = 0 + } + cpTime = append(cpTime, times) } } for cpu, time := range cpTime { lcpu := strconv.Itoa(cpu) - ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_USER])/hz, lcpu, "user") - ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_NICE])/hz, lcpu, "nice") - ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_SYS])/hz, lcpu, "system") - ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_INTR])/hz, lcpu, "interrupt") - ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_IDLE])/hz, lcpu, "idle") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_USER])/hz, lcpu, "user") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_NICE])/hz, lcpu, "nice") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_SYS])/hz, lcpu, "system") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_SPIN])/hz, lcpu, "spin") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_INTR])/hz, lcpu, "interrupt") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_IDLE])/hz, lcpu, "idle") } return err } diff --git a/collector/cpu_solaris.go b/collector/cpu_solaris.go index 0624454f49..471d29f03c 100644 --- a/collector/cpu_solaris.go +++ b/collector/cpu_solaris.go @@ -11,17 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build solaris -// +build !nocpu +//go:build !nocpu package collector import ( + "log/slog" "strconv" - "github.com/go-kit/kit/log" + "github.com/illumos/go-kstat" "github.com/prometheus/client_golang/prometheus" - kstat "github.com/siebenmann/go-kstat" ) // #include @@ -29,14 +28,14 @@ import "C" type cpuCollector struct { cpu typedDesc - logger log.Logger + logger *slog.Logger } func init() { registerCollector("cpu", defaultEnabled, NewCpuCollector) } -func NewCpuCollector(logger log.Logger) (Collector, error) { +func NewCpuCollector(logger *slog.Logger) (Collector, error) { return &cpuCollector{ cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, logger: logger, @@ -60,17 +59,17 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error { } for k, v := range map[string]string{ - "idle": "cpu_ticks_idle", - "kernel": "cpu_ticks_kernel", - "user": "cpu_ticks_user", - "wait": "cpu_ticks_wait", + "idle": "cpu_nsec_idle", + "kernel": "cpu_nsec_kernel", + "user": "cpu_nsec_user", + "wait": "cpu_nsec_wait", } { kstatValue, err := ksCPU.GetNamed(v) if err != nil { return err } - ch <- c.cpu.mustNewConstMetric(float64(kstatValue.UintVal), strconv.Itoa(cpu), k) + ch <- c.cpu.mustNewConstMetric(float64(kstatValue.UintVal)/1e9, strconv.Itoa(cpu), k) } } return nil diff --git a/collector/cpu_vulnerabilities_linux.go b/collector/cpu_vulnerabilities_linux.go new file mode 100644 index 0000000000..a41d5b17a5 --- /dev/null +++ b/collector/cpu_vulnerabilities_linux.go @@ -0,0 +1,69 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +const ( + cpuVulnerabilitiesCollectorSubsystem = "cpu_vulnerabilities" +) + +var ( + vulnerabilityDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuVulnerabilitiesCollectorSubsystem, "info"), + "Details of each CPU vulnerability reported by sysfs. The value of the series is an int encoded state of the vulnerability. The same state is stored as a string in the label", + []string{"codename", "state", "mitigation"}, + nil, + ) +) + +type cpuVulnerabilitiesCollector struct{} + +func init() { + registerCollector(cpuVulnerabilitiesCollectorSubsystem, defaultDisabled, NewVulnerabilitySysfsCollector) +} + +func NewVulnerabilitySysfsCollector(logger *slog.Logger) (Collector, error) { + return &cpuVulnerabilitiesCollector{}, nil +} + +func (v *cpuVulnerabilitiesCollector) Update(ch chan<- prometheus.Metric) error { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return fmt.Errorf("failed to open sysfs: %w", err) + } + + vulnerabilities, err := fs.CPUVulnerabilities() + if err != nil { + return fmt.Errorf("failed to get vulnerabilities: %w", err) + } + + for _, vulnerability := range vulnerabilities { + ch <- prometheus.MustNewConstMetric( + vulnerabilityDesc, + prometheus.GaugeValue, + 1.0, + vulnerability.CodeName, + sysfs.VulnerabilityHumanEncoding[vulnerability.State], + vulnerability.Mitigation, + ) + } + return nil +} diff --git a/collector/cpufreq_common.go b/collector/cpufreq_common.go new file mode 100644 index 0000000000..4cd17808f8 --- /dev/null +++ b/collector/cpufreq_common.go @@ -0,0 +1,58 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nocpu + +package collector + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +var ( + cpuFreqHertzDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"), + "Current CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ) + cpuFreqMinDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_min_hertz"), + "Minimum CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ) + cpuFreqMaxDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"), + "Maximum CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ) + cpuFreqScalingFreqDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_hertz"), + "Current scaled CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ) + cpuFreqScalingFreqMinDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_min_hertz"), + "Minimum scaled CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ) + cpuFreqScalingFreqMaxDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_max_hertz"), + "Maximum scaled CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ) + cpuFreqScalingGovernorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_governor"), + "Current enabled CPU frequency governor.", + []string{"cpu", "governor"}, nil, + ) +) diff --git a/collector/cpufreq_linux.go b/collector/cpufreq_linux.go index aa32ad2d67..725e64e015 100644 --- a/collector/cpufreq_linux.go +++ b/collector/cpufreq_linux.go @@ -11,27 +11,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nocpu +//go:build !nocpu package collector import ( "fmt" + "log/slog" + "strings" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) type cpuFreqCollector struct { - fs sysfs.FS - cpuFreq *prometheus.Desc - cpuFreqMin *prometheus.Desc - cpuFreqMax *prometheus.Desc - scalingFreq *prometheus.Desc - scalingFreqMin *prometheus.Desc - scalingFreqMax *prometheus.Desc - logger log.Logger + fs sysfs.FS + logger *slog.Logger } func init() { @@ -39,44 +34,14 @@ func init() { } // NewCPUFreqCollector returns a new Collector exposing kernel/system statistics. -func NewCPUFreqCollector(logger log.Logger) (Collector, error) { +func NewCPUFreqCollector(logger *slog.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &cpuFreqCollector{ - fs: fs, - cpuFreq: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"), - "Current cpu thread frequency in hertz.", - []string{"cpu"}, nil, - ), - cpuFreqMin: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_min_hertz"), - "Minimum cpu thread frequency in hertz.", - []string{"cpu"}, nil, - ), - cpuFreqMax: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"), - "Maximum cpu thread frequency in hertz.", - []string{"cpu"}, nil, - ), - scalingFreq: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_hertz"), - "Current scaled cpu thread frequency in hertz.", - []string{"cpu"}, nil, - ), - scalingFreqMin: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_min_hertz"), - "Minimum scaled cpu thread frequency in hertz.", - []string{"cpu"}, nil, - ), - scalingFreqMax: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_max_hertz"), - "Maximum scaled cpu thread frequency in hertz.", - []string{"cpu"}, nil, - ), + fs: fs, logger: logger, }, nil } @@ -93,7 +58,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { for _, stats := range cpuFreqs { if stats.CpuinfoCurrentFrequency != nil { ch <- prometheus.MustNewConstMetric( - c.cpuFreq, + cpuFreqHertzDesc, prometheus.GaugeValue, float64(*stats.CpuinfoCurrentFrequency)*1000.0, stats.Name, @@ -101,7 +66,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { } if stats.CpuinfoMinimumFrequency != nil { ch <- prometheus.MustNewConstMetric( - c.cpuFreqMin, + cpuFreqMinDesc, prometheus.GaugeValue, float64(*stats.CpuinfoMinimumFrequency)*1000.0, stats.Name, @@ -109,7 +74,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { } if stats.CpuinfoMaximumFrequency != nil { ch <- prometheus.MustNewConstMetric( - c.cpuFreqMax, + cpuFreqMaxDesc, prometheus.GaugeValue, float64(*stats.CpuinfoMaximumFrequency)*1000.0, stats.Name, @@ -117,7 +82,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { } if stats.ScalingCurrentFrequency != nil { ch <- prometheus.MustNewConstMetric( - c.scalingFreq, + cpuFreqScalingFreqDesc, prometheus.GaugeValue, float64(*stats.ScalingCurrentFrequency)*1000.0, stats.Name, @@ -125,7 +90,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { } if stats.ScalingMinimumFrequency != nil { ch <- prometheus.MustNewConstMetric( - c.scalingFreqMin, + cpuFreqScalingFreqMinDesc, prometheus.GaugeValue, float64(*stats.ScalingMinimumFrequency)*1000.0, stats.Name, @@ -133,12 +98,28 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { } if stats.ScalingMaximumFrequency != nil { ch <- prometheus.MustNewConstMetric( - c.scalingFreqMax, + cpuFreqScalingFreqMaxDesc, prometheus.GaugeValue, float64(*stats.ScalingMaximumFrequency)*1000.0, stats.Name, ) } + if stats.Governor != "" { + availableGovernors := strings.SplitSeq(stats.AvailableGovernors, " ") + for g := range availableGovernors { + state := 0 + if g == stats.Governor { + state = 1 + } + ch <- prometheus.MustNewConstMetric( + cpuFreqScalingGovernorDesc, + prometheus.GaugeValue, + float64(state), + stats.Name, + g, + ) + } + } } return nil } diff --git a/collector/cpufreq_solaris.go b/collector/cpufreq_solaris.go index 6532ec01ac..41826a999c 100644 --- a/collector/cpufreq_solaris.go +++ b/collector/cpufreq_solaris.go @@ -11,45 +11,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build solaris -// +build !nocpu +//go:build !nocpu package collector import ( "fmt" + "log/slog" "strconv" - "github.com/go-kit/kit/log" + "github.com/illumos/go-kstat" "github.com/prometheus/client_golang/prometheus" - kstat "github.com/siebenmann/go-kstat" ) // #include import "C" type cpuFreqCollector struct { - cpuFreq *prometheus.Desc - cpuFreqMax *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { registerCollector("cpufreq", defaultEnabled, NewCpuFreqCollector) } -func NewCpuFreqCollector(logger log.Logger) (Collector, error) { +func NewCpuFreqCollector(logger *slog.Logger) (Collector, error) { return &cpuFreqCollector{ - cpuFreq: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"), - "Current cpu thread frequency in hertz.", - []string{"cpu"}, nil, - ), - cpuFreqMax: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"), - "Maximum cpu thread frequency in hertz.", - []string{"cpu"}, nil, - ), logger: logger, }, nil } @@ -81,14 +68,14 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { lcpu := strconv.Itoa(cpu) ch <- prometheus.MustNewConstMetric( - c.cpuFreq, + cpuFreqHertzDesc, prometheus.GaugeValue, float64(cpuFreqV.UintVal), lcpu, ) // Multiply by 1e+6 to convert MHz to Hz. ch <- prometheus.MustNewConstMetric( - c.cpuFreqMax, + cpuFreqMaxDesc, prometheus.GaugeValue, float64(cpuFreqMaxV.IntVal)*1e+6, lcpu, diff --git a/collector/device_filter.go b/collector/device_filter.go new file mode 100644 index 0000000000..d8a38a2970 --- /dev/null +++ b/collector/device_filter.go @@ -0,0 +1,41 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "regexp" +) + +type deviceFilter struct { + ignorePattern *regexp.Regexp + acceptPattern *regexp.Regexp +} + +func newDeviceFilter(ignoredPattern, acceptPattern string) (f deviceFilter) { + if ignoredPattern != "" { + f.ignorePattern = regexp.MustCompile(ignoredPattern) + } + + if acceptPattern != "" { + f.acceptPattern = regexp.MustCompile(acceptPattern) + } + + return +} + +// ignored returns whether the device should be ignored +func (f *deviceFilter) ignored(name string) bool { + return (f.ignorePattern != nil && f.ignorePattern.MatchString(name)) || + (f.acceptPattern != nil && !f.acceptPattern.MatchString(name)) +} diff --git a/collector/device_filter_test.go b/collector/device_filter_test.go new file mode 100644 index 0000000000..7332f1cdb4 --- /dev/null +++ b/collector/device_filter_test.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "testing" +) + +func TestDeviceFilter(t *testing.T) { + tests := []struct { + ignore string + accept string + name string + expectedResult bool + }{ + {"", "", "eth0", false}, + {"", "^💩0$", "💩0", false}, + {"", "^💩0$", "💩1", true}, + {"", "^💩0$", "veth0", true}, + {"^💩", "", "💩3", true}, + {"^💩", "", "veth0", false}, + } + + for _, test := range tests { + filter := newDeviceFilter(test.ignore, test.accept) + result := filter.ignored(test.name) + + if result != test.expectedResult { + t.Errorf("ignorePattern=%v acceptPattern=%v ifname=%v expected=%v result=%v", test.ignore, test.accept, test.name, test.expectedResult, result) + } + } +} diff --git a/collector/devstat_dragonfly.go b/collector/devstat_dragonfly.go index 69e24ba8e0..28cc522831 100644 --- a/collector/devstat_dragonfly.go +++ b/collector/devstat_dragonfly.go @@ -11,15 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nodevstat +//go:build !nodevstat package collector import ( "errors" "fmt" + "log/slog" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -97,7 +97,7 @@ type devstatCollector struct { bytesDesc *prometheus.Desc transfersDesc *prometheus.Desc blocksDesc *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -105,7 +105,7 @@ func init() { } // NewDevstatCollector returns a new Collector exposing Device stats. -func NewDevstatCollector(logger log.Logger) (Collector, error) { +func NewDevstatCollector(logger *slog.Logger) (Collector, error) { return &devstatCollector{ bytesDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "bytes_total"), diff --git a/collector/devstat_freebsd.go b/collector/devstat_freebsd.go index dfa18d1e88..0c8a11c9a0 100644 --- a/collector/devstat_freebsd.go +++ b/collector/devstat_freebsd.go @@ -11,17 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nodevstat +//go:build !nodevstat package collector import ( "errors" "fmt" + "log/slog" "sync" "unsafe" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -42,7 +42,7 @@ type devstatCollector struct { duration typedDesc busyTime typedDesc blocks typedDesc - logger log.Logger + logger *slog.Logger } func init() { @@ -50,7 +50,7 @@ func init() { } // NewDevstatCollector returns a new Collector exposing Device stats. -func NewDevstatCollector(logger log.Logger) (Collector, error) { +func NewDevstatCollector(logger *slog.Logger) (Collector, error) { return &devstatCollector{ devinfo: &C.struct_devinfo{}, bytes: typedDesc{prometheus.NewDesc( diff --git a/collector/diskstats_aix.go b/collector/diskstats_aix.go new file mode 100644 index 0000000000..f77eef06bd --- /dev/null +++ b/collector/diskstats_aix.go @@ -0,0 +1,144 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nodiskstats + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/power-devops/perfstat" + "github.com/prometheus/client_golang/prometheus" +) + +const diskstatsDefaultIgnoredDevices = "" + +type diskstatsCollector struct { + rbytes typedDesc + wbytes typedDesc + time typedDesc + bsize typedDesc + qdepth typedDesc + + rserv typedDesc + wserv typedDesc + + xfers typedDesc + xrate typedDesc + + deviceFilter deviceFilter + logger *slog.Logger + + tickPerSecond float64 +} + +func init() { + registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector) +} + +// NewDiskstatsCollector returns a new Collector exposing disk device stats. +func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) { + ticks, err := tickPerSecond() + if err != nil { + return nil, err + } + deviceFilter, err := newDiskstatsDeviceFilter(logger) + if err != nil { + return nil, fmt.Errorf("failed to parse device filter flags: %w", err) + } + + return &diskstatsCollector{ + rbytes: typedDesc{readBytesDesc, prometheus.CounterValue}, + wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue}, + time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue}, + + bsize: typedDesc{ + prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "block_size_bytes"), + "Size of the block device in bytes.", + diskLabelNames, nil, + ), + prometheus.GaugeValue, + }, + qdepth: typedDesc{ + prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "queue_depth"), + "Number of requests in the queue.", + diskLabelNames, nil, + ), + prometheus.GaugeValue, + }, + rserv: typedDesc{ + prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "read_time_seconds_total"), + "The total time spent servicing read requests.", + diskLabelNames, nil, + ), + prometheus.CounterValue, + }, + wserv: typedDesc{ + prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "write_time_seconds_total"), + "The total time spent servicing write requests.", + diskLabelNames, nil, + ), + prometheus.CounterValue, + }, + xfers: typedDesc{ + prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "transfers_total"), + "The total number of transfers to/from disk.", + diskLabelNames, nil, + ), + prometheus.CounterValue, + }, + xrate: typedDesc{ + prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "transfers_to_disk_total"), + "The total number of transfers from disk.", + diskLabelNames, nil, + ), + prometheus.CounterValue, + }, + deviceFilter: deviceFilter, + logger: logger, + + tickPerSecond: ticks, + }, nil +} + +func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error { + stats, err := perfstat.DiskStat() + if err != nil { + return err + } + + for _, stat := range stats { + if c.deviceFilter.ignored(stat.Name) { + continue + } + ch <- c.rbytes.mustNewConstMetric(float64(stat.Rblks*512), stat.Name) + ch <- c.wbytes.mustNewConstMetric(float64(stat.Wblks*512), stat.Name) + ch <- c.time.mustNewConstMetric(float64(stat.Time)/float64(c.tickPerSecond), stat.Name) + + ch <- c.bsize.mustNewConstMetric(float64(stat.BSize), stat.Name) + ch <- c.qdepth.mustNewConstMetric(float64(stat.QDepth), stat.Name) + ch <- c.rserv.mustNewConstMetric(float64(stat.Rserv)/1e9, stat.Name) + ch <- c.wserv.mustNewConstMetric(float64(stat.Wserv)/1e9, stat.Name) + ch <- c.xfers.mustNewConstMetric(float64(stat.Xfers), stat.Name) + ch <- c.xrate.mustNewConstMetric(float64(stat.XRate), stat.Name) + } + return nil +} diff --git a/collector/diskstats_common.go b/collector/diskstats_common.go index 7efb399a38..c930ca48d0 100644 --- a/collector/diskstats_common.go +++ b/collector/diskstats_common.go @@ -11,12 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nodiskstats -// +build openbsd linux darwin +//go:build !nodiskstats && (openbsd || linux || darwin || aix) package collector import ( + "errors" + "log/slog" + + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" ) @@ -27,6 +30,21 @@ const ( var ( diskLabelNames = []string{"device"} + diskstatsDeviceExcludeSet bool + diskstatsDeviceExclude = kingpin.Flag( + "collector.diskstats.device-exclude", + "Regexp of diskstats devices to exclude (mutually exclusive to device-include).", + ).Default(diskstatsDefaultIgnoredDevices).PreAction(func(c *kingpin.ParseContext) error { + diskstatsDeviceExcludeSet = true + return nil + }).String() + oldDiskstatsDeviceExclude = kingpin.Flag( + "collector.diskstats.ignored-devices", + "DEPRECATED: Use collector.diskstats.device-exclude", + ).Hidden().String() + + diskstatsDeviceInclude = kingpin.Flag("collector.diskstats.device-include", "Regexp of diskstats devices to include (mutually exclusive to device-exclude).").String() + readsCompletedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "reads_completed_total"), "The total number of reads completed successfully.", @@ -71,3 +89,28 @@ var ( nil, ) ) + +func newDiskstatsDeviceFilter(logger *slog.Logger) (deviceFilter, error) { + if *oldDiskstatsDeviceExclude != "" { + if !diskstatsDeviceExcludeSet { + logger.Warn("--collector.diskstats.ignored-devices is DEPRECATED and will be removed in 2.0.0, use --collector.diskstats.device-exclude") + *diskstatsDeviceExclude = *oldDiskstatsDeviceExclude + } else { + return deviceFilter{}, errors.New("--collector.diskstats.ignored-devices and --collector.diskstats.device-exclude are mutually exclusive") + } + } + + if *diskstatsDeviceExclude != "" && *diskstatsDeviceInclude != "" { + return deviceFilter{}, errors.New("device-exclude & device-include are mutually exclusive") + } + + if *diskstatsDeviceExclude != "" { + logger.Info("Parsed flag --collector.diskstats.device-exclude", "flag", *diskstatsDeviceExclude) + } + + if *diskstatsDeviceInclude != "" { + logger.Info("Parsed Flag --collector.diskstats.device-include", "flag", *diskstatsDeviceInclude) + } + + return newDeviceFilter(*diskstatsDeviceExclude, *diskstatsDeviceInclude), nil +} diff --git a/collector/diskstats_darwin.go b/collector/diskstats_darwin.go index 864220bf5f..abcf854083 100644 --- a/collector/diskstats_darwin.go +++ b/collector/diskstats_darwin.go @@ -11,26 +11,30 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nodiskstats +//go:build !nodiskstats package collector import ( "fmt" + "log/slog" - "github.com/go-kit/kit/log" "github.com/lufia/iostat" "github.com/prometheus/client_golang/prometheus" ) +const diskstatsDefaultIgnoredDevices = "" + type typedDescFunc struct { typedDesc value func(stat *iostat.DriveStats) float64 } type diskstatsCollector struct { - descs []typedDescFunc - logger log.Logger + descs []typedDescFunc + + deviceFilter deviceFilter + logger *slog.Logger } func init() { @@ -38,9 +42,14 @@ func init() { } // NewDiskstatsCollector returns a new Collector exposing disk device stats. -func NewDiskstatsCollector(logger log.Logger) (Collector, error) { +func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) { var diskLabelNames = []string{"device"} + deviceFilter, err := newDiskstatsDeviceFilter(logger) + if err != nil { + return nil, fmt.Errorf("failed to parse device filter flags: %w", err) + } + return &diskstatsCollector{ descs: []typedDescFunc{ { @@ -182,7 +191,9 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) { }, }, }, - logger: logger, + + deviceFilter: deviceFilter, + logger: logger, }, nil } @@ -193,6 +204,9 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error { } for _, stats := range diskStats { + if c.deviceFilter.ignored(stats.Name) { + continue + } for _, desc := range c.descs { v := desc.value(stats) ch <- desc.mustNewConstMetric(v, stats.Name) diff --git a/collector/diskstats_linux.go b/collector/diskstats_linux.go index b5ae82d850..daca55d968 100644 --- a/collector/diskstats_linux.go +++ b/collector/diskstats_linux.go @@ -11,51 +11,72 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nodiskstats +//go:build !nodiskstats package collector import ( "bufio" "fmt" - "io" + "log/slog" "os" - "regexp" "strconv" "strings" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" + "github.com/prometheus/procfs/blockdevice" ) const ( - diskSectorSize = 512 - diskstatsFilename = "diskstats" -) + secondsPerTick = 1.0 / 1000.0 -var ( - ignoredDevices = kingpin.Flag("collector.diskstats.ignored-devices", "Regexp of devices to ignore for diskstats.").Default("^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$").String() -) + // Read sectors and write sectors are the "standard UNIX 512-byte sectors, not any device- or filesystem-specific block size." + // See also https://www.kernel.org/doc/Documentation/block/stat.txt + unixSectorSize = 512.0 -type typedFactorDesc struct { - desc *prometheus.Desc - valueType prometheus.ValueType - factor float64 -} + diskstatsDefaultIgnoredDevices = "^(z?ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$" -func (d *typedFactorDesc) mustNewConstMetric(value float64, labels ...string) prometheus.Metric { - if d.factor != 0 { - value *= d.factor - } - return prometheus.MustNewConstMetric(d.desc, d.valueType, value, labels...) -} + // See udevadm(8). + udevDevicePropertyPrefix = "E:" + + // Udev device properties. + udevDMLVLayer = "DM_LV_LAYER" + udevDMLVName = "DM_LV_NAME" + udevDMName = "DM_NAME" + udevDMUUID = "DM_UUID" + udevDMVGName = "DM_VG_NAME" + udevIDATA = "ID_ATA" + udevIDATARotationRateRPM = "ID_ATA_ROTATION_RATE_RPM" + udevIDATASATA = "ID_ATA_SATA" + udevIDATASATASignalRateGen1 = "ID_ATA_SATA_SIGNAL_RATE_GEN1" + udevIDATASATASignalRateGen2 = "ID_ATA_SATA_SIGNAL_RATE_GEN2" + udevIDATAWriteCache = "ID_ATA_WRITE_CACHE" + udevIDATAWriteCacheEnabled = "ID_ATA_WRITE_CACHE_ENABLED" + udevIDFSType = "ID_FS_TYPE" + udevIDFSUsage = "ID_FS_USAGE" + udevIDFSUUID = "ID_FS_UUID" + udevIDFSVersion = "ID_FS_VERSION" + udevIDModel = "ID_MODEL" + udevIDPath = "ID_PATH" + udevIDRevision = "ID_REVISION" + udevIDSerial = "ID_SERIAL" + udevIDSerialShort = "ID_SERIAL_SHORT" + udevIDWWN = "ID_WWN" + udevSCSIIdentSerial = "SCSI_IDENT_SERIAL" +) + +type udevInfo map[string]string type diskstatsCollector struct { - ignoredDevicesPattern *regexp.Regexp - descs []typedFactorDesc - logger log.Logger + deviceFilter deviceFilter + fs blockdevice.FS + infoDesc typedDesc + descs []typedDesc + filesystemInfoDesc typedDesc + deviceMapperInfoDesc typedDesc + ataDescs map[string]typedDesc + logger *slog.Logger + getUdevDeviceProperties func(uint32, uint32) (udevInfo, error) } func init() { @@ -64,12 +85,29 @@ func init() { // NewDiskstatsCollector returns a new Collector exposing disk device stats. // Docs from https://www.kernel.org/doc/Documentation/iostats.txt -func NewDiskstatsCollector(logger log.Logger) (Collector, error) { +func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) { var diskLabelNames = []string{"device"} + fs, err := blockdevice.NewFS(*procPath, *sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } - return &diskstatsCollector{ - ignoredDevicesPattern: regexp.MustCompile(*ignoredDevices), - descs: []typedFactorDesc{ + deviceFilter, err := newDiskstatsDeviceFilter(logger) + if err != nil { + return nil, fmt.Errorf("failed to parse device filter flags: %w", err) + } + + collector := diskstatsCollector{ + deviceFilter: deviceFilter, + fs: fs, + infoDesc: typedDesc{ + desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "info"), + "Info of /sys/block/.", + []string{"device", "major", "minor", "path", "wwn", "model", "serial", "revision", "rotational"}, + nil, + ), valueType: prometheus.GaugeValue, + }, + descs: []typedDesc{ { desc: readsCompletedDesc, valueType: prometheus.CounterValue, }, @@ -83,11 +121,9 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) { }, { desc: readBytesDesc, valueType: prometheus.CounterValue, - factor: diskSectorSize, }, { desc: readTimeSecondsDesc, valueType: prometheus.CounterValue, - factor: .001, }, { desc: writesCompletedDesc, valueType: prometheus.CounterValue, @@ -102,11 +138,9 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) { }, { desc: writtenBytesDesc, valueType: prometheus.CounterValue, - factor: diskSectorSize, }, { desc: writeTimeSecondsDesc, valueType: prometheus.CounterValue, - factor: .001, }, { desc: prometheus.NewDesc( @@ -118,7 +152,6 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) { }, { desc: ioTimeSecondsDesc, valueType: prometheus.CounterValue, - factor: .001, }, { desc: prometheus.NewDesc( @@ -127,7 +160,6 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) { diskLabelNames, nil, ), valueType: prometheus.CounterValue, - factor: .001, }, { desc: prometheus.NewDesc( @@ -160,7 +192,6 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) { diskLabelNames, nil, ), valueType: prometheus.CounterValue, - factor: .001, }, { desc: prometheus.NewDesc( @@ -177,64 +208,196 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) { diskLabelNames, nil, ), valueType: prometheus.CounterValue, - factor: .001, + }, + }, + filesystemInfoDesc: typedDesc{ + desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "filesystem_info"), + "Info about disk filesystem.", + []string{"device", "type", "usage", "uuid", "version"}, + nil, + ), valueType: prometheus.GaugeValue, + }, + deviceMapperInfoDesc: typedDesc{ + desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "device_mapper_info"), + "Info about disk device mapper.", + []string{"device", "name", "uuid", "vg_name", "lv_name", "lv_layer"}, + nil, + ), valueType: prometheus.GaugeValue, + }, + ataDescs: map[string]typedDesc{ + udevIDATAWriteCache: { + desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "ata_write_cache"), + "ATA disk has a write cache.", + []string{"device"}, + nil, + ), valueType: prometheus.GaugeValue, + }, + udevIDATAWriteCacheEnabled: { + desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "ata_write_cache_enabled"), + "ATA disk has its write cache enabled.", + []string{"device"}, + nil, + ), valueType: prometheus.GaugeValue, + }, + udevIDATARotationRateRPM: { + desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "ata_rotation_rate_rpm"), + "ATA disk rotation rate in RPMs (0 for SSDs).", + []string{"device"}, + nil, + ), valueType: prometheus.GaugeValue, }, }, logger: logger, - }, nil + } + + // Only enable getting device properties from udev if the directory is readable. + if stat, err := os.Stat(*udevDataPath); err != nil || !stat.IsDir() { + logger.Error("Failed to open directory, disabling udev device properties", "path", *udevDataPath) + } else { + collector.getUdevDeviceProperties = getUdevDeviceProperties + } + + return &collector, nil } func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error { - diskStats, err := getDiskStats() + diskStats, err := c.fs.ProcDiskstats() if err != nil { return fmt.Errorf("couldn't get diskstats: %w", err) } - for dev, stats := range diskStats { - if c.ignoredDevicesPattern.MatchString(dev) { - level.Debug(c.logger).Log("msg", "Ignoring device", "device", dev) + for _, stats := range diskStats { + dev := stats.DeviceName + if c.deviceFilter.ignored(dev) { continue } - for i, value := range stats { - // ignore unrecognized additional stats - if i >= len(c.descs) { + info, err := getUdevDeviceProperties(stats.MajorNumber, stats.MinorNumber) + if err != nil { + c.logger.Debug("Failed to parse udev info", "err", err) + } + + // This is usually the serial printed on the disk label. + serial := info[udevSCSIIdentSerial] + + // If it's undefined, fallback to ID_SERIAL_SHORT instead. + if serial == "" { + serial = info[udevIDSerialShort] + } + + // If still undefined, fallback to ID_SERIAL (used by virtio devices). + if serial == "" { + serial = info[udevIDSerial] + } + + queueStats, err := c.fs.SysBlockDeviceQueueStats(dev) + // Block Device Queue stats may not exist for all devices. + if err != nil && !os.IsNotExist(err) { + c.logger.Debug("Failed to get block device queue stats", "device", dev, "err", err) + } + + ch <- c.infoDesc.mustNewConstMetric(1.0, dev, + fmt.Sprint(stats.MajorNumber), + fmt.Sprint(stats.MinorNumber), + info[udevIDPath], + info[udevIDWWN], + info[udevIDModel], + serial, + info[udevIDRevision], + strconv.FormatUint(queueStats.Rotational, 2), + ) + + statCount := stats.IoStatsCount - 3 // Total diskstats record count, less MajorNumber, MinorNumber and DeviceName + + for i, val := range []float64{ + float64(stats.ReadIOs), + float64(stats.ReadMerges), + float64(stats.ReadSectors) * unixSectorSize, + float64(stats.ReadTicks) * secondsPerTick, + float64(stats.WriteIOs), + float64(stats.WriteMerges), + float64(stats.WriteSectors) * unixSectorSize, + float64(stats.WriteTicks) * secondsPerTick, + float64(stats.IOsInProgress), + float64(stats.IOsTotalTicks) * secondsPerTick, + float64(stats.WeightedIOTicks) * secondsPerTick, + float64(stats.DiscardIOs), + float64(stats.DiscardMerges), + float64(stats.DiscardSectors), + float64(stats.DiscardTicks) * secondsPerTick, + float64(stats.FlushRequestsCompleted), + float64(stats.TimeSpentFlushing) * secondsPerTick, + } { + if i >= statCount { break } - v, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("invalid value %s in diskstats: %w", value, err) + ch <- c.descs[i].mustNewConstMetric(val, dev) + } + + if fsType := info[udevIDFSType]; fsType != "" { + ch <- c.filesystemInfoDesc.mustNewConstMetric(1.0, dev, + fsType, + info[udevIDFSUsage], + info[udevIDFSUUID], + info[udevIDFSVersion], + ) + } + + if name := info[udevDMName]; name != "" { + ch <- c.deviceMapperInfoDesc.mustNewConstMetric(1.0, dev, + name, + info[udevDMUUID], + info[udevDMVGName], + info[udevDMLVName], + info[udevDMLVLayer], + ) + } + + if ata := info[udevIDATA]; ata != "" { + for attr, desc := range c.ataDescs { + str, ok := info[attr] + if !ok { + c.logger.Debug("Udev attribute does not exist", "attribute", attr) + continue + } + + if value, err := strconv.ParseFloat(str, 64); err == nil { + ch <- desc.mustNewConstMetric(value, dev) + } else { + c.logger.Error("Failed to parse ATA value", "err", err) + } } - ch <- c.descs[i].mustNewConstMetric(v, dev) } } return nil } -func getDiskStats() (map[string][]string, error) { - file, err := os.Open(procFilePath(diskstatsFilename)) +func getUdevDeviceProperties(major, minor uint32) (udevInfo, error) { + filename := udevDataFilePath(fmt.Sprintf("b%d:%d", major, minor)) + + data, err := os.Open(filename) if err != nil { return nil, err } - defer file.Close() + defer data.Close() - return parseDiskStats(file) -} - -func parseDiskStats(r io.Reader) (map[string][]string, error) { - var ( - diskStats = map[string][]string{} - scanner = bufio.NewScanner(r) - ) + info := make(udevInfo) + scanner := bufio.NewScanner(data) for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - if len(parts) < 4 { // we strip major, minor and dev - return nil, fmt.Errorf("invalid line in %s: %s", procFilePath(diskstatsFilename), scanner.Text()) + line := scanner.Text() + + // We're only interested in device properties. + if !strings.HasPrefix(line, udevDevicePropertyPrefix) { + continue + } + + line = strings.TrimPrefix(line, udevDevicePropertyPrefix) + + if name, value, found := strings.Cut(line, "="); found { + info[name] = value } - dev := parts[2] - diskStats[dev] = parts[3:] } - return diskStats, scanner.Err() + return info, nil } diff --git a/collector/diskstats_linux_test.go b/collector/diskstats_linux_test.go index 9bfecc58ff..08a5024c8a 100644 --- a/collector/diskstats_linux_test.go +++ b/collector/diskstats_linux_test.go @@ -11,42 +11,334 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nodiskstats + package collector import ( - "os" + "fmt" + "io" + "log/slog" + "strings" "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" ) -func TestDiskStats(t *testing.T) { - file, err := os.Open("fixtures/proc/diskstats") - if err != nil { - t.Fatal(err) - } - defer file.Close() +type testDiskStatsCollector struct { + dsc Collector +} + +func (c testDiskStatsCollector) Collect(ch chan<- prometheus.Metric) { + c.dsc.Update(ch) +} - diskStats, err := parseDiskStats(file) +func (c testDiskStatsCollector) Describe(ch chan<- *prometheus.Desc) { + prometheus.DescribeByCollect(c, ch) +} + +func NewTestDiskStatsCollector(logger *slog.Logger) (prometheus.Collector, error) { + dsc, err := NewDiskstatsCollector(logger) if err != nil { - t.Fatal(err) + return testDiskStatsCollector{}, err } + return testDiskStatsCollector{ + dsc: dsc, + }, err +} - if want, got := "25353629", diskStats["sda4"][0]; want != got { - t.Errorf("want diskstats sda4 %s, got %s", want, got) - } +func TestDiskStats(t *testing.T) { + *sysPath = "fixtures/sys" + *procPath = "fixtures/proc" + *udevDataPath = "fixtures/udev/data" + *diskstatsDeviceExclude = "^(z?ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$" + testcase := `# HELP node_disk_ata_rotation_rate_rpm ATA disk rotation rate in RPMs (0 for SSDs). +# TYPE node_disk_ata_rotation_rate_rpm gauge +node_disk_ata_rotation_rate_rpm{device="sda"} 7200 +node_disk_ata_rotation_rate_rpm{device="sdb"} 0 +node_disk_ata_rotation_rate_rpm{device="sdc"} 0 +# HELP node_disk_ata_write_cache ATA disk has a write cache. +# TYPE node_disk_ata_write_cache gauge +node_disk_ata_write_cache{device="sda"} 1 +node_disk_ata_write_cache{device="sdb"} 1 +node_disk_ata_write_cache{device="sdc"} 1 +# HELP node_disk_ata_write_cache_enabled ATA disk has its write cache enabled. +# TYPE node_disk_ata_write_cache_enabled gauge +node_disk_ata_write_cache_enabled{device="sda"} 0 +node_disk_ata_write_cache_enabled{device="sdb"} 1 +node_disk_ata_write_cache_enabled{device="sdc"} 0 +# HELP node_disk_device_mapper_info Info about disk device mapper. +# TYPE node_disk_device_mapper_info gauge +node_disk_device_mapper_info{device="dm-0",lv_layer="",lv_name="",name="nvme0n1_crypt",uuid="CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt",vg_name=""} 1 +node_disk_device_mapper_info{device="dm-1",lv_layer="",lv_name="swap_1",name="system-swap_1",uuid="LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-2",lv_layer="",lv_name="root",name="system-root",uuid="LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-3",lv_layer="",lv_name="var",name="system-var",uuid="LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-4",lv_layer="",lv_name="tmp",name="system-tmp",uuid="LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-5",lv_layer="",lv_name="home",name="system-home",uuid="LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf",vg_name="system"} 1 +# HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. +# TYPE node_disk_discard_time_seconds_total counter +node_disk_discard_time_seconds_total{device="sdb"} 11.13 +node_disk_discard_time_seconds_total{device="sdc"} 11.13 +# HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. +# TYPE node_disk_discarded_sectors_total counter +node_disk_discarded_sectors_total{device="sdb"} 1.925173784e+09 +node_disk_discarded_sectors_total{device="sdc"} 1.25173784e+08 +# HELP node_disk_discards_completed_total The total number of discards completed successfully. +# TYPE node_disk_discards_completed_total counter +node_disk_discards_completed_total{device="sdb"} 68851 +node_disk_discards_completed_total{device="sdc"} 18851 +# HELP node_disk_discards_merged_total The total number of discards merged. +# TYPE node_disk_discards_merged_total counter +node_disk_discards_merged_total{device="sdb"} 0 +node_disk_discards_merged_total{device="sdc"} 0 +# HELP node_disk_filesystem_info Info about disk filesystem. +# TYPE node_disk_filesystem_info gauge +node_disk_filesystem_info{device="dm-0",type="LVM2_member",usage="raid",uuid="c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB",version="LVM2 001"} 1 +node_disk_filesystem_info{device="dm-1",type="swap",usage="other",uuid="5272bb60-04b5-49cd-b730-be57c7604450",version="1"} 1 +node_disk_filesystem_info{device="dm-2",type="ext4",usage="filesystem",uuid="3deafd0d-faff-4695-8d15-51061ae1f51b",version="1.0"} 1 +node_disk_filesystem_info{device="dm-3",type="ext4",usage="filesystem",uuid="5c772222-f7d4-4c8e-87e8-e97df6b7a45e",version="1.0"} 1 +node_disk_filesystem_info{device="dm-4",type="ext4",usage="filesystem",uuid="a9479d44-60e1-4015-a1e5-bb065e6dd11b",version="1.0"} 1 +node_disk_filesystem_info{device="dm-5",type="ext4",usage="filesystem",uuid="b05b726a-c718-4c4d-8641-7c73a7696d83",version="1.0"} 1 +node_disk_filesystem_info{device="mmcblk0p1",type="vfat",usage="filesystem",uuid="6284-658D",version="FAT32"} 1 +node_disk_filesystem_info{device="mmcblk0p2",type="ext4",usage="filesystem",uuid="83324ce8-a6f3-4e35-ad64-dbb3d6b87a32",version="1.0"} 1 +node_disk_filesystem_info{device="sda",type="LVM2_member",usage="raid",uuid="cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw",version="LVM2 001"} 1 +node_disk_filesystem_info{device="sdc",type="LVM2_member",usage="raid",uuid="QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb",version="LVM2 001"} 1 +# HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. +# TYPE node_disk_flush_requests_time_seconds_total counter +node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944 +# HELP node_disk_flush_requests_total The total number of flush requests completed successfully +# TYPE node_disk_flush_requests_total counter +node_disk_flush_requests_total{device="sdc"} 1555 +# HELP node_disk_info Info of /sys/block/. +# TYPE node_disk_info gauge +node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="0x83e36d93",wwn=""} 1 +node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="0x83e36d93",wwn=""} 1 +node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="0x83e36d93",wwn=""} 1 +node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",rotational="0",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1 +node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",rotational="1",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1 +node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",rotational="0",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1 +node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",rotational="0",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1 +node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",rotational="0",serial="AAAABBBBCCCC1",wwn=""} 1 +node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",rotational="0",serial="",wwn=""} 1 +# HELP node_disk_io_now The number of I/Os currently in progress. +# TYPE node_disk_io_now gauge +node_disk_io_now{device="dm-0"} 0 +node_disk_io_now{device="dm-1"} 0 +node_disk_io_now{device="dm-2"} 0 +node_disk_io_now{device="dm-3"} 0 +node_disk_io_now{device="dm-4"} 0 +node_disk_io_now{device="dm-5"} 0 +node_disk_io_now{device="mmcblk0"} 0 +node_disk_io_now{device="mmcblk0p1"} 0 +node_disk_io_now{device="mmcblk0p2"} 0 +node_disk_io_now{device="nvme0n1"} 0 +node_disk_io_now{device="sda"} 0 +node_disk_io_now{device="sdb"} 0 +node_disk_io_now{device="sdc"} 0 +node_disk_io_now{device="sr0"} 0 +node_disk_io_now{device="vda"} 0 +# HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. +# TYPE node_disk_io_time_seconds_total counter +node_disk_io_time_seconds_total{device="dm-0"} 11325.968 +node_disk_io_time_seconds_total{device="dm-1"} 0.076 +node_disk_io_time_seconds_total{device="dm-2"} 65.4 +node_disk_io_time_seconds_total{device="dm-3"} 0.016 +node_disk_io_time_seconds_total{device="dm-4"} 0.024 +node_disk_io_time_seconds_total{device="dm-5"} 58.848 +node_disk_io_time_seconds_total{device="mmcblk0"} 0.136 +node_disk_io_time_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_io_time_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_io_time_seconds_total{device="nvme0n1"} 222.766 +node_disk_io_time_seconds_total{device="sda"} 9653.880000000001 +node_disk_io_time_seconds_total{device="sdb"} 60.730000000000004 +node_disk_io_time_seconds_total{device="sdc"} 10.73 +node_disk_io_time_seconds_total{device="sr0"} 0 +node_disk_io_time_seconds_total{device="vda"} 41614.592000000004 +# HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. +# TYPE node_disk_io_time_weighted_seconds_total counter +node_disk_io_time_weighted_seconds_total{device="dm-0"} 1.206301256e+06 +node_disk_io_time_weighted_seconds_total{device="dm-1"} 0.084 +node_disk_io_time_weighted_seconds_total{device="dm-2"} 129.416 +node_disk_io_time_weighted_seconds_total{device="dm-3"} 0.10400000000000001 +node_disk_io_time_weighted_seconds_total{device="dm-4"} 0.044 +node_disk_io_time_weighted_seconds_total{device="dm-5"} 105.632 +node_disk_io_time_weighted_seconds_total{device="mmcblk0"} 0.156 +node_disk_io_time_weighted_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_io_time_weighted_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_io_time_weighted_seconds_total{device="nvme0n1"} 1032.546 +node_disk_io_time_weighted_seconds_total{device="sda"} 82621.804 +node_disk_io_time_weighted_seconds_total{device="sdb"} 67.07000000000001 +node_disk_io_time_weighted_seconds_total{device="sdc"} 17.07 +node_disk_io_time_weighted_seconds_total{device="sr0"} 0 +node_disk_io_time_weighted_seconds_total{device="vda"} 2.0778722280000001e+06 +# HELP node_disk_read_bytes_total The total number of bytes read successfully. +# TYPE node_disk_read_bytes_total counter +node_disk_read_bytes_total{device="dm-0"} 5.13708655616e+11 +node_disk_read_bytes_total{device="dm-1"} 1.589248e+06 +node_disk_read_bytes_total{device="dm-2"} 1.578752e+08 +node_disk_read_bytes_total{device="dm-3"} 1.98144e+06 +node_disk_read_bytes_total{device="dm-4"} 529408 +node_disk_read_bytes_total{device="dm-5"} 4.3150848e+07 +node_disk_read_bytes_total{device="mmcblk0"} 798720 +node_disk_read_bytes_total{device="mmcblk0p1"} 81920 +node_disk_read_bytes_total{device="mmcblk0p2"} 389120 +node_disk_read_bytes_total{device="nvme0n1"} 2.377714176e+09 +node_disk_read_bytes_total{device="sda"} 5.13713216512e+11 +node_disk_read_bytes_total{device="sdb"} 4.944782848e+09 +node_disk_read_bytes_total{device="sdc"} 8.48782848e+08 +node_disk_read_bytes_total{device="sr0"} 0 +node_disk_read_bytes_total{device="vda"} 1.6727491584e+10 +# HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. +# TYPE node_disk_read_time_seconds_total counter +node_disk_read_time_seconds_total{device="dm-0"} 46229.572 +node_disk_read_time_seconds_total{device="dm-1"} 0.084 +node_disk_read_time_seconds_total{device="dm-2"} 6.5360000000000005 +node_disk_read_time_seconds_total{device="dm-3"} 0.10400000000000001 +node_disk_read_time_seconds_total{device="dm-4"} 0.028 +node_disk_read_time_seconds_total{device="dm-5"} 0.924 +node_disk_read_time_seconds_total{device="mmcblk0"} 0.156 +node_disk_read_time_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_read_time_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_read_time_seconds_total{device="nvme0n1"} 21.650000000000002 +node_disk_read_time_seconds_total{device="sda"} 18492.372 +node_disk_read_time_seconds_total{device="sdb"} 0.084 +node_disk_read_time_seconds_total{device="sdc"} 0.014 +node_disk_read_time_seconds_total{device="sr0"} 0 +node_disk_read_time_seconds_total{device="vda"} 8655.768 +# HELP node_disk_reads_completed_total The total number of reads completed successfully. +# TYPE node_disk_reads_completed_total counter +node_disk_reads_completed_total{device="dm-0"} 5.9910002e+07 +node_disk_reads_completed_total{device="dm-1"} 388 +node_disk_reads_completed_total{device="dm-2"} 11571 +node_disk_reads_completed_total{device="dm-3"} 3870 +node_disk_reads_completed_total{device="dm-4"} 392 +node_disk_reads_completed_total{device="dm-5"} 3729 +node_disk_reads_completed_total{device="mmcblk0"} 192 +node_disk_reads_completed_total{device="mmcblk0p1"} 17 +node_disk_reads_completed_total{device="mmcblk0p2"} 95 +node_disk_reads_completed_total{device="nvme0n1"} 47114 +node_disk_reads_completed_total{device="sda"} 2.5354637e+07 +node_disk_reads_completed_total{device="sdb"} 326552 +node_disk_reads_completed_total{device="sdc"} 126552 +node_disk_reads_completed_total{device="sr0"} 0 +node_disk_reads_completed_total{device="vda"} 1.775784e+06 +# HELP node_disk_reads_merged_total The total number of reads merged. +# TYPE node_disk_reads_merged_total counter +node_disk_reads_merged_total{device="dm-0"} 0 +node_disk_reads_merged_total{device="dm-1"} 0 +node_disk_reads_merged_total{device="dm-2"} 0 +node_disk_reads_merged_total{device="dm-3"} 0 +node_disk_reads_merged_total{device="dm-4"} 0 +node_disk_reads_merged_total{device="dm-5"} 0 +node_disk_reads_merged_total{device="mmcblk0"} 3 +node_disk_reads_merged_total{device="mmcblk0p1"} 3 +node_disk_reads_merged_total{device="mmcblk0p2"} 0 +node_disk_reads_merged_total{device="nvme0n1"} 4 +node_disk_reads_merged_total{device="sda"} 3.4367663e+07 +node_disk_reads_merged_total{device="sdb"} 841 +node_disk_reads_merged_total{device="sdc"} 141 +node_disk_reads_merged_total{device="sr0"} 0 +node_disk_reads_merged_total{device="vda"} 15386 +# HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. +# TYPE node_disk_write_time_seconds_total counter +node_disk_write_time_seconds_total{device="dm-0"} 1.1585578e+06 +node_disk_write_time_seconds_total{device="dm-1"} 0 +node_disk_write_time_seconds_total{device="dm-2"} 122.884 +node_disk_write_time_seconds_total{device="dm-3"} 0 +node_disk_write_time_seconds_total{device="dm-4"} 0.016 +node_disk_write_time_seconds_total{device="dm-5"} 104.684 +node_disk_write_time_seconds_total{device="mmcblk0"} 0 +node_disk_write_time_seconds_total{device="mmcblk0p1"} 0 +node_disk_write_time_seconds_total{device="mmcblk0p2"} 0 +node_disk_write_time_seconds_total{device="nvme0n1"} 1011.053 +node_disk_write_time_seconds_total{device="sda"} 63877.96 +node_disk_write_time_seconds_total{device="sdb"} 5.007 +node_disk_write_time_seconds_total{device="sdc"} 1.0070000000000001 +node_disk_write_time_seconds_total{device="sr0"} 0 +node_disk_write_time_seconds_total{device="vda"} 2.069221364e+06 +# HELP node_disk_writes_completed_total The total number of writes completed successfully. +# TYPE node_disk_writes_completed_total counter +node_disk_writes_completed_total{device="dm-0"} 3.9231014e+07 +node_disk_writes_completed_total{device="dm-1"} 74 +node_disk_writes_completed_total{device="dm-2"} 153522 +node_disk_writes_completed_total{device="dm-3"} 0 +node_disk_writes_completed_total{device="dm-4"} 38 +node_disk_writes_completed_total{device="dm-5"} 98918 +node_disk_writes_completed_total{device="mmcblk0"} 0 +node_disk_writes_completed_total{device="mmcblk0p1"} 0 +node_disk_writes_completed_total{device="mmcblk0p2"} 0 +node_disk_writes_completed_total{device="nvme0n1"} 1.07832e+06 +node_disk_writes_completed_total{device="sda"} 2.8444756e+07 +node_disk_writes_completed_total{device="sdb"} 41822 +node_disk_writes_completed_total{device="sdc"} 11822 +node_disk_writes_completed_total{device="sr0"} 0 +node_disk_writes_completed_total{device="vda"} 6.038856e+06 +# HELP node_disk_writes_merged_total The number of writes merged. +# TYPE node_disk_writes_merged_total counter +node_disk_writes_merged_total{device="dm-0"} 0 +node_disk_writes_merged_total{device="dm-1"} 0 +node_disk_writes_merged_total{device="dm-2"} 0 +node_disk_writes_merged_total{device="dm-3"} 0 +node_disk_writes_merged_total{device="dm-4"} 0 +node_disk_writes_merged_total{device="dm-5"} 0 +node_disk_writes_merged_total{device="mmcblk0"} 0 +node_disk_writes_merged_total{device="mmcblk0p1"} 0 +node_disk_writes_merged_total{device="mmcblk0p2"} 0 +node_disk_writes_merged_total{device="nvme0n1"} 43950 +node_disk_writes_merged_total{device="sda"} 1.1134226e+07 +node_disk_writes_merged_total{device="sdb"} 2895 +node_disk_writes_merged_total{device="sdc"} 1895 +node_disk_writes_merged_total{device="sr0"} 0 +node_disk_writes_merged_total{device="vda"} 2.0711856e+07 +# HELP node_disk_written_bytes_total The total number of bytes written successfully. +# TYPE node_disk_written_bytes_total counter +node_disk_written_bytes_total{device="dm-0"} 2.5891680256e+11 +node_disk_written_bytes_total{device="dm-1"} 303104 +node_disk_written_bytes_total{device="dm-2"} 2.607828992e+09 +node_disk_written_bytes_total{device="dm-3"} 0 +node_disk_written_bytes_total{device="dm-4"} 70144 +node_disk_written_bytes_total{device="dm-5"} 5.89664256e+08 +node_disk_written_bytes_total{device="mmcblk0"} 0 +node_disk_written_bytes_total{device="mmcblk0p1"} 0 +node_disk_written_bytes_total{device="mmcblk0p2"} 0 +node_disk_written_bytes_total{device="nvme0n1"} 2.0199236096e+10 +node_disk_written_bytes_total{device="sda"} 2.58916880384e+11 +node_disk_written_bytes_total{device="sdb"} 1.01012736e+09 +node_disk_written_bytes_total{device="sdc"} 8.852736e+07 +node_disk_written_bytes_total{device="sr0"} 0 +node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 +` - if want, got := "68", diskStats["mmcblk0p2"][10]; want != got { - t.Errorf("want diskstats mmcblk0p2 %s, got %s", want, got) + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + collector, err := NewDiskstatsCollector(logger) + if err != nil { + t.Fatal(err) } - - if want, got := "11130", diskStats["sdb"][14]; want != got { - t.Errorf("want diskstats sdb %s, got %s", want, got) + c, err := NewTestDiskStatsCollector(logger) + if err != nil { + t.Fatal(err) } + reg := prometheus.NewRegistry() + reg.MustRegister(c) - if want, got := "1555", diskStats["sdc"][15]; want != got { - t.Errorf("want diskstats sdc %s, got %s", want, got) - } + sink := make(chan prometheus.Metric) + go func() { + err = collector.Update(sink) + if err != nil { + panic(fmt.Errorf("failed to update collector: %s", err)) + } + close(sink) + }() - if want, got := "1944", diskStats["sdc"][16]; want != got { - t.Errorf("want diskstats sdc %s, got %s", want, got) + err = testutil.GatherAndCompare(reg, strings.NewReader(testcase)) + if err != nil { + t.Fatal(err) } } diff --git a/collector/diskstats_openbsd.go b/collector/diskstats_openbsd.go index 3385f3777c..c642a7bf6f 100644 --- a/collector/diskstats_openbsd.go +++ b/collector/diskstats_openbsd.go @@ -11,14 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nodiskstats +//go:build !nodiskstats && !amd64 package collector import ( + "fmt" + "log/slog" "unsafe" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) @@ -29,13 +30,17 @@ import ( */ import "C" +const diskstatsDefaultIgnoredDevices = "" + type diskstatsCollector struct { rxfer typedDesc rbytes typedDesc wxfer typedDesc wbytes typedDesc time typedDesc - logger log.Logger + + deviceFilter deviceFilter + logger *slog.Logger } func init() { @@ -43,14 +48,21 @@ func init() { } // NewDiskstatsCollector returns a new Collector exposing disk device stats. -func NewDiskstatsCollector(logger log.Logger) (Collector, error) { +func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) { + deviceFilter, err := newDiskstatsDeviceFilter(logger) + if err != nil { + return nil, fmt.Errorf("failed to parse device filter flags: %w", err) + } + return &diskstatsCollector{ rxfer: typedDesc{readsCompletedDesc, prometheus.CounterValue}, rbytes: typedDesc{readBytesDesc, prometheus.CounterValue}, wxfer: typedDesc{writesCompletedDesc, prometheus.CounterValue}, wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue}, time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue}, - logger: logger, + + deviceFilter: deviceFilter, + logger: logger, }, nil } @@ -65,6 +77,9 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) (err error) { for i := 0; i < ndisks; i++ { diskname := C.GoString(&diskstats[i].ds_name[0]) + if c.deviceFilter.ignored(diskname) { + continue + } ch <- c.rxfer.mustNewConstMetric(float64(diskstats[i].ds_rxfer), diskname) ch <- c.rbytes.mustNewConstMetric(float64(diskstats[i].ds_rbytes), diskname) diff --git a/collector/diskstats_openbsd_amd64.go b/collector/diskstats_openbsd_amd64.go new file mode 100644 index 0000000000..bbb9e8b933 --- /dev/null +++ b/collector/diskstats_openbsd_amd64.go @@ -0,0 +1,104 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nodiskstats + +package collector + +import ( + "fmt" + "log/slog" + "unsafe" + + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +const ( + DS_DISKNAMELEN = 16 + + diskstatsDefaultIgnoredDevices = "" +) + +type DiskStats struct { + Name [DS_DISKNAMELEN]int8 + Busy int32 + Rxfer uint64 + Wxfer uint64 + Seek uint64 + Rbytes uint64 + Wbytes uint64 + Attachtime unix.Timeval + Timestamp unix.Timeval + Time unix.Timeval +} + +type diskstatsCollector struct { + rxfer typedDesc + rbytes typedDesc + wxfer typedDesc + wbytes typedDesc + time typedDesc + + deviceFilter deviceFilter + logger *slog.Logger +} + +func init() { + registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector) +} + +// NewDiskstatsCollector returns a new Collector exposing disk device stats. +func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) { + deviceFilter, err := newDiskstatsDeviceFilter(logger) + if err != nil { + return nil, fmt.Errorf("failed to parse device filter flags: %w", err) + } + + return &diskstatsCollector{ + rxfer: typedDesc{readsCompletedDesc, prometheus.CounterValue}, + rbytes: typedDesc{readBytesDesc, prometheus.CounterValue}, + wxfer: typedDesc{writesCompletedDesc, prometheus.CounterValue}, + wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue}, + time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue}, + + deviceFilter: deviceFilter, + logger: logger, + }, nil +} + +func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) (err error) { + diskstatsb, err := unix.SysctlRaw("hw.diskstats") + if err != nil { + return err + } + + ndisks := len(diskstatsb) / int(unsafe.Sizeof(DiskStats{})) + diskstats := *(*[]DiskStats)(unsafe.Pointer(&diskstatsb)) + + for i := 0; i < ndisks; i++ { + dn := *(*[DS_DISKNAMELEN]int8)(unsafe.Pointer(&diskstats[i].Name[0])) + diskname := int8ToString(dn[:]) + if c.deviceFilter.ignored(diskname) { + continue + } + + ch <- c.rxfer.mustNewConstMetric(float64(diskstats[i].Rxfer), diskname) + ch <- c.rbytes.mustNewConstMetric(float64(diskstats[i].Rbytes), diskname) + ch <- c.wxfer.mustNewConstMetric(float64(diskstats[i].Wxfer), diskname) + ch <- c.wbytes.mustNewConstMetric(float64(diskstats[i].Wbytes), diskname) + time := float64(diskstats[i].Time.Sec) + float64(diskstats[i].Time.Usec)/1000000 + ch <- c.time.mustNewConstMetric(time, diskname) + } + return nil +} diff --git a/collector/dmi.go b/collector/dmi.go new file mode 100644 index 0000000000..575c331950 --- /dev/null +++ b/collector/dmi.go @@ -0,0 +1,104 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && !nodmi + +package collector + +import ( + "errors" + "fmt" + "log/slog" + "os" + "strings" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +type dmiCollector struct { + infoDesc *prometheus.Desc + values []string +} + +func init() { + registerCollector("dmi", defaultEnabled, NewDMICollector) +} + +// NewDMICollector returns a new Collector exposing DMI information. +func NewDMICollector(logger *slog.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + dmi, err := fs.DMIClass() + if err != nil { + if errors.Is(err, os.ErrNotExist) { + logger.Debug("Platform does not support Desktop Management Interface (DMI) information", "err", err) + dmi = &sysfs.DMIClass{} + } else { + return nil, fmt.Errorf("failed to read Desktop Management Interface (DMI) information: %w", err) + } + } + + var labels, values []string + for label, value := range map[string]*string{ + "bios_date": dmi.BiosDate, + "bios_release": dmi.BiosRelease, + "bios_vendor": dmi.BiosVendor, + "bios_version": dmi.BiosVersion, + "board_asset_tag": dmi.BoardAssetTag, + "board_name": dmi.BoardName, + "board_serial": dmi.BoardSerial, + "board_vendor": dmi.BoardVendor, + "board_version": dmi.BoardVersion, + "chassis_asset_tag": dmi.ChassisAssetTag, + "chassis_serial": dmi.ChassisSerial, + "chassis_vendor": dmi.ChassisVendor, + "chassis_version": dmi.ChassisVersion, + "product_family": dmi.ProductFamily, + "product_name": dmi.ProductName, + "product_serial": dmi.ProductSerial, + "product_sku": dmi.ProductSKU, + "product_uuid": dmi.ProductUUID, + "product_version": dmi.ProductVersion, + "system_vendor": dmi.SystemVendor, + } { + if value != nil { + labels = append(labels, label) + values = append(values, strings.ToValidUTF8(*value, "�")) + } + } + + // Construct DMI metric only once since it will not change until the next reboot. + return &dmiCollector{ + infoDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "dmi", "info"), + "A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, "+ + "board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, "+ + "chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, "+ + "product_sku, product_uuid, product_version, system_vendor if provided by DMI.", + labels, nil, + ), + values: values, + }, nil +} + +func (c *dmiCollector) Update(ch chan<- prometheus.Metric) error { + if len(c.values) == 0 { + return ErrNoData + } + ch <- prometheus.MustNewConstMetric(c.infoDesc, prometheus.GaugeValue, 1.0, c.values...) + return nil +} diff --git a/collector/drbd_linux.go b/collector/drbd_linux.go index 281c079dd9..f3102700c9 100644 --- a/collector/drbd_linux.go +++ b/collector/drbd_linux.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nodrbd +//go:build !nodrbd package collector @@ -19,12 +19,11 @@ import ( "bufio" "errors" "fmt" + "log/slog" "os" "strconv" "strings" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" ) @@ -78,14 +77,14 @@ type drbdCollector struct { numerical map[string]drbdNumericalMetric stringPair map[string]drbdStringPairMetric connected *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { registerCollector("drbd", defaultDisabled, newDRBDCollector) } -func newDRBDCollector(logger log.Logger) (Collector, error) { +func newDRBDCollector(logger *slog.Logger) (Collector, error) { return &drbdCollector{ numerical: map[string]drbdNumericalMetric{ "ns": newDRBDNumericalMetric( @@ -190,7 +189,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error { file, err := os.Open(statsFile) if err != nil { if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "stats file does not exist, skipping", "file", statsFile, "err", err) + c.logger.Debug("stats file does not exist, skipping", "file", statsFile, "err", err) return ErrNoData } @@ -207,7 +206,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error { kv := strings.Split(field, ":") if len(kv) != 2 { - level.Debug(c.logger).Log("msg", "skipping invalid key:value pair", "field", field) + c.logger.Debug("skipping invalid key:value pair", "field", field) continue } @@ -273,7 +272,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error { continue } - level.Debug(c.logger).Log("msg", "unhandled key-value pair", "key", kv[0], "value", kv[1]) + c.logger.Debug("unhandled key-value pair", "key", kv[0], "value", kv[1]) } return scanner.Err() diff --git a/collector/drm_linux.go b/collector/drm_linux.go new file mode 100644 index 0000000000..80356ee84c --- /dev/null +++ b/collector/drm_linux.go @@ -0,0 +1,139 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nogpu + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +const ( + drmCollectorSubsystem = "drm" +) + +type drmCollector struct { + fs sysfs.FS + logger *slog.Logger + CardInfo *prometheus.Desc + GPUBusyPercent *prometheus.Desc + MemoryGTTSize *prometheus.Desc + MemoryGTTUsed *prometheus.Desc + MemoryVisibleVRAMSize *prometheus.Desc + MemoryVisibleVRAMUsed *prometheus.Desc + MemoryVRAMSize *prometheus.Desc + MemoryVRAMUsed *prometheus.Desc +} + +func init() { + registerCollector("drm", defaultDisabled, NewDrmCollector) +} + +// NewDrmCollector returns a new Collector exposing /sys/class/drm/card?/device stats. +func NewDrmCollector(logger *slog.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + return &drmCollector{ + fs: fs, + logger: logger, + CardInfo: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "card_info"), + "Card information", + []string{"card", "memory_vendor", "power_performance_level", "unique_id", "vendor"}, nil, + ), + GPUBusyPercent: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "gpu_busy_percent"), + "How busy the GPU is as a percentage.", + []string{"card"}, nil, + ), + MemoryGTTSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_gtt_size_bytes"), + "The size of the graphics translation table (GTT) block in bytes.", + []string{"card"}, nil, + ), + MemoryGTTUsed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_gtt_used_bytes"), + "The used amount of the graphics translation table (GTT) block in bytes.", + []string{"card"}, nil, + ), + MemoryVisibleVRAMSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vis_vram_size_bytes"), + "The size of visible VRAM in bytes.", + []string{"card"}, nil, + ), + MemoryVisibleVRAMUsed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vis_vram_used_bytes"), + "The used amount of visible VRAM in bytes.", + []string{"card"}, nil, + ), + MemoryVRAMSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vram_size_bytes"), + "The size of VRAM in bytes.", + []string{"card"}, nil, + ), + MemoryVRAMUsed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vram_used_bytes"), + "The used amount of VRAM in bytes.", + []string{"card"}, nil, + ), + }, nil +} + +func (c *drmCollector) Update(ch chan<- prometheus.Metric) error { + return c.updateAMDCards(ch) +} + +func (c *drmCollector) updateAMDCards(ch chan<- prometheus.Metric) error { + vendor := "amd" + stats, err := c.fs.ClassDRMCardAMDGPUStats() + if err != nil { + return err + } + + for _, s := range stats { + ch <- prometheus.MustNewConstMetric( + c.CardInfo, prometheus.GaugeValue, 1, + s.Name, s.MemoryVRAMVendor, s.PowerDPMForcePerformanceLevel, s.UniqueID, vendor) + + ch <- prometheus.MustNewConstMetric( + c.GPUBusyPercent, prometheus.GaugeValue, float64(s.GPUBusyPercent), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryGTTSize, prometheus.GaugeValue, float64(s.MemoryGTTSize), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryGTTUsed, prometheus.GaugeValue, float64(s.MemoryGTTUsed), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryVRAMSize, prometheus.GaugeValue, float64(s.MemoryVRAMSize), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryVRAMUsed, prometheus.GaugeValue, float64(s.MemoryVRAMUsed), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryVisibleVRAMSize, prometheus.GaugeValue, float64(s.MemoryVisibleVRAMSize), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryVisibleVRAMUsed, prometheus.GaugeValue, float64(s.MemoryVisibleVRAMUsed), s.Name) + } + + return nil +} diff --git a/collector/edac_linux.go b/collector/edac_linux.go index 1248e2e799..d3a2a07a83 100644 --- a/collector/edac_linux.go +++ b/collector/edac_linux.go @@ -11,16 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noedac +//go:build !noedac package collector import ( "fmt" + "log/slog" "path/filepath" "regexp" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -38,7 +38,7 @@ type edacCollector struct { ueCount *prometheus.Desc csRowCECount *prometheus.Desc csRowUECount *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -46,7 +46,7 @@ func init() { } // NewEdacCollector returns a new Collector exposing edac stats. -func NewEdacCollector(logger log.Logger) (Collector, error) { +func NewEdacCollector(logger *slog.Logger) (Collector, error) { return &edacCollector{ ceCount: prometheus.NewDesc( prometheus.BuildFQName(namespace, edacSubsystem, "correctable_errors_total"), diff --git a/collector/entropy_linux.go b/collector/entropy_linux.go index 3c42c3d0a6..1373bf3648 100644 --- a/collector/entropy_linux.go +++ b/collector/entropy_linux.go @@ -11,20 +11,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noentropy +//go:build !noentropy package collector import ( "fmt" + "log/slog" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" ) type entropyCollector struct { - entropyAvail *prometheus.Desc - logger log.Logger + fs procfs.FS + entropyAvail *prometheus.Desc + entropyPoolSize *prometheus.Desc + logger *slog.Logger } func init() { @@ -32,24 +35,45 @@ func init() { } // NewEntropyCollector returns a new Collector exposing entropy stats. -func NewEntropyCollector(logger log.Logger) (Collector, error) { +func NewEntropyCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + return &entropyCollector{ + fs: fs, entropyAvail: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "entropy_available_bits"), "Bits of available entropy.", nil, nil, ), + entropyPoolSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "entropy_pool_size_bits"), + "Bits of entropy pool.", + nil, nil, + ), logger: logger, }, nil } func (c *entropyCollector) Update(ch chan<- prometheus.Metric) error { - value, err := readUintFromFile(procFilePath("sys/kernel/random/entropy_avail")) + stats, err := c.fs.KernelRandom() if err != nil { - return fmt.Errorf("couldn't get entropy_avail: %w", err) + return fmt.Errorf("failed to get kernel random stats: %w", err) + } + + if stats.EntropyAvaliable == nil { + return fmt.Errorf("couldn't get entropy_avail") + } + ch <- prometheus.MustNewConstMetric( + c.entropyAvail, prometheus.GaugeValue, float64(*stats.EntropyAvaliable)) + + if stats.PoolSize == nil { + return fmt.Errorf("couldn't get entropy poolsize") } ch <- prometheus.MustNewConstMetric( - c.entropyAvail, prometheus.GaugeValue, float64(value)) + c.entropyPoolSize, prometheus.GaugeValue, float64(*stats.PoolSize)) return nil } diff --git a/collector/ethtool_linux.go b/collector/ethtool_linux.go new file mode 100644 index 0000000000..e4d86bcd8c --- /dev/null +++ b/collector/ethtool_linux.go @@ -0,0 +1,520 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noethtool + +// The hard work of collecting data from the kernel via the ethtool interfaces is done by +// https://github.com/safchain/ethtool/ +// by Sylvain Afchain. Used under the Apache license. + +package collector + +import ( + "errors" + "fmt" + "log/slog" + "os" + "regexp" + "sort" + "strings" + "sync" + "syscall" + + "github.com/alecthomas/kingpin/v2" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" + "github.com/safchain/ethtool" + "golang.org/x/sys/unix" +) + +var ( + ethtoolDeviceInclude = kingpin.Flag("collector.ethtool.device-include", "Regexp of ethtool devices to include (mutually exclusive to device-exclude).").String() + ethtoolDeviceExclude = kingpin.Flag("collector.ethtool.device-exclude", "Regexp of ethtool devices to exclude (mutually exclusive to device-include).").String() + ethtoolIncludedMetrics = kingpin.Flag("collector.ethtool.metrics-include", "Regexp of ethtool stats to include.").Default(".*").String() + ethtoolReceivedRegex = regexp.MustCompile(`(^|_)rx(_|$)`) + ethtoolTransmitRegex = regexp.MustCompile(`(^|_)tx(_|$)`) +) + +type Ethtool interface { + DriverInfo(string) (ethtool.DrvInfo, error) + Stats(string) (map[string]uint64, error) + LinkInfo(string) (ethtool.EthtoolCmd, error) +} + +type ethtoolLibrary struct { + ethtool *ethtool.Ethtool +} + +func (e *ethtoolLibrary) DriverInfo(intf string) (ethtool.DrvInfo, error) { + return e.ethtool.DriverInfo(intf) +} + +func (e *ethtoolLibrary) Stats(intf string) (map[string]uint64, error) { + return e.ethtool.Stats(intf) +} + +func (e *ethtoolLibrary) LinkInfo(intf string) (ethtool.EthtoolCmd, error) { + var ethtoolCmd ethtool.EthtoolCmd + _, err := ethtoolCmd.CmdGet(intf) + return ethtoolCmd, err +} + +type ethtoolCollector struct { + fs sysfs.FS + entries map[string]*prometheus.Desc + entriesMutex sync.Mutex + ethtool Ethtool + deviceFilter deviceFilter + infoDesc *prometheus.Desc + metricsPattern *regexp.Regexp + logger *slog.Logger +} + +// makeEthtoolCollector is the internal constructor for EthtoolCollector. +// This allows NewEthtoolTestCollector to override its .ethtool interface +// for testing. +func makeEthtoolCollector(logger *slog.Logger) (*ethtoolCollector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + e, err := ethtool.NewEthtool() + if err != nil { + return nil, fmt.Errorf("failed to initialize ethtool library: %w", err) + } + + if *ethtoolDeviceInclude != "" { + logger.Info("Parsed flag --collector.ethtool.device-include", "flag", *ethtoolDeviceInclude) + } + if *ethtoolDeviceExclude != "" { + logger.Info("Parsed flag --collector.ethtool.device-exclude", "flag", *ethtoolDeviceExclude) + } + if *ethtoolIncludedMetrics != "" { + logger.Info("Parsed flag --collector.ethtool.metrics-include", "flag", *ethtoolIncludedMetrics) + } + + // Pre-populate some common ethtool metrics. + return ðtoolCollector{ + fs: fs, + ethtool: ðtoolLibrary{e}, + deviceFilter: newDeviceFilter(*ethtoolDeviceExclude, *ethtoolDeviceInclude), + metricsPattern: regexp.MustCompile(*ethtoolIncludedMetrics), + logger: logger, + entries: map[string]*prometheus.Desc{ + "rx_bytes": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "received_bytes_total"), + "Network interface bytes received", + []string{"device"}, nil, + ), + "rx_dropped": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "received_dropped_total"), + "Number of received frames dropped", + []string{"device"}, nil, + ), + "rx_errors": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "received_errors_total"), + "Number of received frames with errors", + []string{"device"}, nil, + ), + "rx_packets": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "received_packets_total"), + "Network interface packets received", + []string{"device"}, nil, + ), + "tx_bytes": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "transmitted_bytes_total"), + "Network interface bytes sent", + []string{"device"}, nil, + ), + "tx_errors": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "transmitted_errors_total"), + "Number of sent frames with errors", + []string{"device"}, nil, + ), + "tx_packets": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "transmitted_packets_total"), + "Network interface packets sent", + []string{"device"}, nil, + ), + + // link info + "supported_port": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "supported_port_info"), + "Type of ports or PHYs supported by network device", + []string{"device", "type"}, nil, + ), + "supported_speed": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "supported_speed_bytes"), + "Combination of speeds and features supported by network device", + []string{"device", "duplex", "mode"}, nil, + ), + "supported_autonegotiate": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "autonegotiate_supported"), + "If this port device supports autonegotiate", + []string{"device"}, nil, + ), + "supported_pause": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "pause_supported"), + "If this port device supports pause frames", + []string{"device"}, nil, + ), + "supported_asymmetricpause": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "asymmetricpause_supported"), + "If this port device supports asymmetric pause frames", + []string{"device"}, nil, + ), + "advertised_speed": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "advertised_speed_bytes"), + "Combination of speeds and features offered by network device", + []string{"device", "duplex", "mode"}, nil, + ), + "advertised_autonegotiate": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "autonegotiate_advertised"), + "If this port device offers autonegotiate", + []string{"device"}, nil, + ), + "advertised_pause": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "pause_advertised"), + "If this port device offers pause capability", + []string{"device"}, nil, + ), + "advertised_asymmetricpause": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "asymmetricpause_advertised"), + "If this port device offers asymmetric pause capability", + []string{"device"}, nil, + ), + "autonegotiate": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "autonegotiate"), + "If this port is using autonegotiate", + []string{"device"}, nil, + ), + }, + infoDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "info"), + "A metric with a constant '1' value labeled by bus_info, device, driver, expansion_rom_version, firmware_version, version.", + []string{"bus_info", "device", "driver", "expansion_rom_version", "firmware_version", "version"}, nil, + ), + }, nil +} + +func init() { + registerCollector("ethtool", defaultDisabled, NewEthtoolCollector) +} + +// Generate the fully-qualified metric name for the ethool metric. +func buildEthtoolFQName(metric string) string { + metricName := strings.TrimLeft(strings.ToLower(SanitizeMetricName(metric)), "_") + metricName = ethtoolReceivedRegex.ReplaceAllString(metricName, "${1}received${2}") + metricName = ethtoolTransmitRegex.ReplaceAllString(metricName, "${1}transmitted${2}") + return prometheus.BuildFQName(namespace, "ethtool", metricName) +} + +// NewEthtoolCollector returns a new Collector exposing ethtool stats. +func NewEthtoolCollector(logger *slog.Logger) (Collector, error) { + return makeEthtoolCollector(logger) +} + +// updatePortCapabilities generates metrics for autonegotiate, pause and asymmetricpause. +// The bit offsets here correspond to ethtool_link_mode_bit_indices in linux/include/uapi/linux/ethtool.h +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/ethtool.h +func (c *ethtoolCollector) updatePortCapabilities(ch chan<- prometheus.Metric, prefix string, device string, linkModes uint32) { + var ( + autonegotiate = 0.0 + pause = 0.0 + asymmetricPause = 0.0 + ) + if linkModes&(1<, value is always 1.", + []string{"fc_host", "speed", "port_state", "port_type", "port_id", "port_name", "fabric_name", "symbolic_name", "supported_classes", "supported_speeds", "dev_loss_tmo"}, + nil, + ) + infoValue := 1.0 + + // First push the Host values + ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, utils.SafeDereference( + host.Name, + host.Speed, + host.PortState, + host.PortType, + host.PortID, + host.PortName, + host.FabricName, + host.SymbolicName, + host.SupportedClasses, + host.SupportedSpeeds, + host.DevLossTMO, + )...) + + // Then the counters + // Note: `procfs` guarantees these a safe dereference for these counters. + c.pushCounter(ch, "dumped_frames_total", *host.Counters.DumpedFrames, *host.Name) + c.pushCounter(ch, "error_frames_total", *host.Counters.ErrorFrames, *host.Name) + c.pushCounter(ch, "invalid_crc_total", *host.Counters.InvalidCRCCount, *host.Name) + c.pushCounter(ch, "rx_frames_total", *host.Counters.RXFrames, *host.Name) + c.pushCounter(ch, "rx_words_total", *host.Counters.RXWords, *host.Name) + c.pushCounter(ch, "tx_frames_total", *host.Counters.TXFrames, *host.Name) + c.pushCounter(ch, "tx_words_total", *host.Counters.TXWords, *host.Name) + c.pushCounter(ch, "seconds_since_last_reset_total", *host.Counters.SecondsSinceLastReset, *host.Name) + c.pushCounter(ch, "invalid_tx_words_total", *host.Counters.InvalidTXWordCount, *host.Name) + c.pushCounter(ch, "link_failure_total", *host.Counters.LinkFailureCount, *host.Name) + c.pushCounter(ch, "loss_of_sync_total", *host.Counters.LossOfSyncCount, *host.Name) + c.pushCounter(ch, "loss_of_signal_total", *host.Counters.LossOfSignalCount, *host.Name) + c.pushCounter(ch, "nos_total", *host.Counters.NosCount, *host.Name) + c.pushCounter(ch, "fcp_packet_aborts_total", *host.Counters.FCPPacketAborts, *host.Name) + } + + return nil +} diff --git a/collector/filefd_linux.go b/collector/filefd_linux.go index 450c4e34cb..c24215c43c 100644 --- a/collector/filefd_linux.go +++ b/collector/filefd_linux.go @@ -11,18 +11,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nofilefd +//go:build !nofilefd package collector import ( "bytes" "fmt" - "io/ioutil" + "io" + "log/slog" "os" "strconv" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -31,7 +31,7 @@ const ( ) type fileFDStatCollector struct { - logger log.Logger + logger *slog.Logger } func init() { @@ -39,7 +39,7 @@ func init() { } // NewFileFDStatCollector returns a new Collector exposing file-nr stats. -func NewFileFDStatCollector(logger log.Logger) (Collector, error) { +func NewFileFDStatCollector(logger *slog.Logger) (Collector, error) { return &fileFDStatCollector{logger}, nil } @@ -72,7 +72,7 @@ func parseFileFDStats(filename string) (map[string]string, error) { } defer file.Close() - content, err := ioutil.ReadAll(file) + content, err := io.ReadAll(file) if err != nil { return nil, err } diff --git a/collector/filefd_linux_test.go b/collector/filefd_linux_test.go index 37e16a4e09..b43c1ed29f 100644 --- a/collector/filefd_linux_test.go +++ b/collector/filefd_linux_test.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nofilefd + package collector import "testing" diff --git a/collector/filesystem_aix.go b/collector/filesystem_aix.go new file mode 100644 index 0000000000..e4db6bce99 --- /dev/null +++ b/collector/filesystem_aix.go @@ -0,0 +1,64 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nofilesystem + +package collector + +import ( + "github.com/power-devops/perfstat" +) + +const ( + defMountPointsExcluded = "^/(dev|aha)($|/)" + defFSTypesExcluded = "^procfs$" +) + +// Expose filesystem fullness. +func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { + fsStat, err := perfstat.FileSystemStat() + if err != nil { + return nil, err + } + for _, stat := range fsStat { + if c.mountPointFilter.ignored(stat.MountPoint) { + c.logger.Debug("Ignoring mount point", "mountpoint", stat.MountPoint) + continue + } + fstype := stat.TypeString() + if c.fsTypeFilter.ignored(fstype) { + c.logger.Debug("Ignoring fs type", "type", fstype) + continue + } + + ro := 0.0 + if stat.Flags&perfstat.VFS_READONLY != 0 { + ro = 1.0 + } + + stats = append(stats, filesystemStats{ + labels: filesystemLabels{ + device: stat.Device, + mountPoint: stat.MountPoint, + fsType: fstype, + }, + size: float64(stat.TotalBlocks * 512.0), + free: float64(stat.FreeBlocks * 512.0), + avail: float64(stat.FreeBlocks * 512.0), // AIX doesn't distinguish between free and available blocks. + files: float64(stat.TotalInodes), + filesFree: float64(stat.FreeInodes), + ro: ro, + }) + } + return stats, nil +} diff --git a/collector/filesystem_bsd.go b/collector/filesystem_bsd.go index 7b30904021..79dde087c3 100644 --- a/collector/filesystem_bsd.go +++ b/collector/filesystem_bsd.go @@ -11,16 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build openbsd darwin,amd64 dragonfly -// +build !nofilesystem +//go:build dragonfly && !nofilesystem package collector import ( "errors" "unsafe" - - "github.com/go-kit/kit/log/level" ) /* @@ -32,9 +29,9 @@ import ( import "C" const ( - defIgnoredMountPoints = "^/(dev)($|/)" - defIgnoredFSTypes = "^devfs$" - readOnly = 0x1 // MNT_RDONLY + defMountPointsExcluded = "^/(dev)($|/)" + defFSTypesExcluded = "^devfs$" + readOnly = 0x1 // MNT_RDONLY ) // Expose filesystem fullness. @@ -49,15 +46,15 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { stats = []filesystemStats{} for i := 0; i < int(count); i++ { mountpoint := C.GoString(&mnt[i].f_mntonname[0]) - if c.ignoredMountPointsPattern.MatchString(mountpoint) { - level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint) + if c.mountPointFilter.ignored(mountpoint) { + c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint) continue } device := C.GoString(&mnt[i].f_mntfromname[0]) fstype := C.GoString(&mnt[i].f_fstypename[0]) - if c.ignoredFSTypesPattern.MatchString(fstype) { - level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype) + if c.fsTypeFilter.ignored(fstype) { + c.logger.Debug("Ignoring fs type", "type", fstype) continue } diff --git a/collector/filesystem_common.go b/collector/filesystem_common.go index 7def2b9933..efcd26a0f9 100644 --- a/collector/filesystem_common.go +++ b/collector/filesystem_common.go @@ -11,56 +11,83 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nofilesystem -// +build linux freebsd openbsd darwin,amd64 dragonfly +//go:build !nofilesystem && (linux || freebsd || netbsd || openbsd || darwin || dragonfly || aix) package collector import ( - "regexp" + "errors" + "fmt" + "log/slog" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" ) // Arch-dependent implementation must define: -// * defIgnoredMountPoints -// * defIgnoredFSTypes +// * defMountPointsExcluded +// * defFSTypesExcluded // * filesystemLabelNames // * filesystemCollector.GetStats var ( - ignoredMountPoints = kingpin.Flag( + mountPointsExcludeSet bool + mountPointsExclude = kingpin.Flag( + "collector.filesystem.mount-points-exclude", + "Regexp of mount points to exclude for filesystem collector. (mutually exclusive to mount-points-include)", + ).Default(defMountPointsExcluded).PreAction(func(c *kingpin.ParseContext) error { + mountPointsExcludeSet = true + return nil + }).String() + oldMountPointsExcluded = kingpin.Flag( "collector.filesystem.ignored-mount-points", "Regexp of mount points to ignore for filesystem collector.", - ).Default(defIgnoredMountPoints).String() - ignoredFSTypes = kingpin.Flag( + ).Hidden().String() + mountPointsInclude = kingpin.Flag( + "collector.filesystem.mount-points-include", + "Regexp of mount points to include for filesystem collector. (mutually exclusive to mount-points-exclude)", + ).String() + + fsTypesExcludeSet bool + fsTypesExclude = kingpin.Flag( + "collector.filesystem.fs-types-exclude", + "Regexp of filesystem types to exclude for filesystem collector. (mutually exclusive to fs-types-include)", + ).Default(defFSTypesExcluded).PreAction(func(c *kingpin.ParseContext) error { + fsTypesExcludeSet = true + return nil + }).String() + oldFSTypesExcluded = kingpin.Flag( "collector.filesystem.ignored-fs-types", "Regexp of filesystem types to ignore for filesystem collector.", - ).Default(defIgnoredFSTypes).String() + ).Hidden().String() + fsTypesInclude = kingpin.Flag( + "collector.filesystem.fs-types-include", + "Regexp of filesystem types to exclude for filesystem collector. (mutually exclusive to fs-types-exclude)", + ).String() - filesystemLabelNames = []string{"device", "mountpoint", "fstype"} + filesystemLabelNames = []string{"device", "mountpoint", "fstype", "device_error"} ) type filesystemCollector struct { - ignoredMountPointsPattern *regexp.Regexp - ignoredFSTypesPattern *regexp.Regexp + mountPointFilter deviceFilter + fsTypeFilter deviceFilter sizeDesc, freeDesc, availDesc *prometheus.Desc filesDesc, filesFreeDesc *prometheus.Desc + purgeableDesc *prometheus.Desc roDesc, deviceErrorDesc *prometheus.Desc - logger log.Logger + mountInfoDesc *prometheus.Desc + logger *slog.Logger } type filesystemLabels struct { - device, mountPoint, fsType, options string + device, mountPoint, fsType, mountOptions, superOptions, deviceError, major, minor string } type filesystemStats struct { labels filesystemLabels size, free, avail float64 files, filesFree float64 + purgeable float64 ro, deviceError float64 } @@ -69,12 +96,8 @@ func init() { } // NewFilesystemCollector returns a new Collector exposing filesystems stats. -func NewFilesystemCollector(logger log.Logger) (Collector, error) { - subsystem := "filesystem" - level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.ignored-mount-points", "flag", *ignoredMountPoints) - mountPointPattern := regexp.MustCompile(*ignoredMountPoints) - level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.ignored-fs-types", "flag", *ignoredMountPoints) - filesystemsTypesPattern := regexp.MustCompile(*ignoredFSTypes) +func NewFilesystemCollector(logger *slog.Logger) (Collector, error) { + const subsystem = "filesystem" sizeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "size_bytes"), @@ -106,6 +129,12 @@ func NewFilesystemCollector(logger log.Logger) (Collector, error) { filesystemLabelNames, nil, ) + purgeableDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "purgeable_bytes"), + "Filesystem space available including purgeable space (MacOS specific).", + filesystemLabelNames, nil, + ) + roDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "readonly"), "Filesystem read-only status.", @@ -118,17 +147,36 @@ func NewFilesystemCollector(logger log.Logger) (Collector, error) { filesystemLabelNames, nil, ) + mountInfoDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "mount_info"), + "Filesystem mount information.", + []string{"device", "major", "minor", "mountpoint"}, + nil, + ) + + mountPointFilter, err := newMountPointsFilter(logger) + if err != nil { + return nil, fmt.Errorf("unable to parse mount points filter flags: %w", err) + } + + fsTypeFilter, err := newFSTypeFilter(logger) + if err != nil { + return nil, fmt.Errorf("unable to parse fs types filter flags: %w", err) + } + return &filesystemCollector{ - ignoredMountPointsPattern: mountPointPattern, - ignoredFSTypesPattern: filesystemsTypesPattern, - sizeDesc: sizeDesc, - freeDesc: freeDesc, - availDesc: availDesc, - filesDesc: filesDesc, - filesFreeDesc: filesFreeDesc, - roDesc: roDesc, - deviceErrorDesc: deviceErrorDesc, - logger: logger, + mountPointFilter: mountPointFilter, + fsTypeFilter: fsTypeFilter, + sizeDesc: sizeDesc, + freeDesc: freeDesc, + availDesc: availDesc, + filesDesc: filesDesc, + filesFreeDesc: filesFreeDesc, + purgeableDesc: purgeableDesc, + roDesc: roDesc, + deviceErrorDesc: deviceErrorDesc, + mountInfoDesc: mountInfoDesc, + logger: logger, }, nil } @@ -147,36 +195,105 @@ func (c *filesystemCollector) Update(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.deviceErrorDesc, prometheus.GaugeValue, - s.deviceError, s.labels.device, s.labels.mountPoint, s.labels.fsType, + s.deviceError, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, ) + ch <- prometheus.MustNewConstMetric( + c.roDesc, prometheus.GaugeValue, + s.ro, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, + ) + if s.deviceError > 0 { continue } ch <- prometheus.MustNewConstMetric( c.sizeDesc, prometheus.GaugeValue, - s.size, s.labels.device, s.labels.mountPoint, s.labels.fsType, + s.size, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, ) ch <- prometheus.MustNewConstMetric( c.freeDesc, prometheus.GaugeValue, - s.free, s.labels.device, s.labels.mountPoint, s.labels.fsType, + s.free, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, ) ch <- prometheus.MustNewConstMetric( c.availDesc, prometheus.GaugeValue, - s.avail, s.labels.device, s.labels.mountPoint, s.labels.fsType, + s.avail, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, ) ch <- prometheus.MustNewConstMetric( c.filesDesc, prometheus.GaugeValue, - s.files, s.labels.device, s.labels.mountPoint, s.labels.fsType, + s.files, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, ) ch <- prometheus.MustNewConstMetric( c.filesFreeDesc, prometheus.GaugeValue, - s.filesFree, s.labels.device, s.labels.mountPoint, s.labels.fsType, + s.filesFree, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, ) ch <- prometheus.MustNewConstMetric( - c.roDesc, prometheus.GaugeValue, - s.ro, s.labels.device, s.labels.mountPoint, s.labels.fsType, + c.mountInfoDesc, prometheus.GaugeValue, + 1.0, s.labels.device, s.labels.major, s.labels.minor, s.labels.mountPoint, ) + if s.purgeable >= 0 { + ch <- prometheus.MustNewConstMetric( + c.purgeableDesc, prometheus.GaugeValue, + s.purgeable, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, + ) + } } return nil } + +func newMountPointsFilter(logger *slog.Logger) (deviceFilter, error) { + if *oldMountPointsExcluded != "" { + if !mountPointsExcludeSet { + logger.Warn("--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude") + *mountPointsExclude = *oldMountPointsExcluded + } else { + return deviceFilter{}, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive") + } + } + + if *mountPointsInclude != "" && !mountPointsExcludeSet { + logger.Debug("mount-points-exclude flag not set when mount-points-include flag is set, assuming include is desired") + *mountPointsExclude = "" + } + + if *mountPointsExclude != "" && *mountPointsInclude != "" { + return deviceFilter{}, errors.New("--collector.filesystem.mount-points-exclude and --collector.filesystem.mount-points-include are mutually exclusive") + } + + if *mountPointsExclude != "" { + logger.Info("Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude) + } + if *mountPointsInclude != "" { + logger.Info("Parsed flag --collector.filesystem.mount-points-include", "flag", *mountPointsInclude) + } + + return newDeviceFilter(*mountPointsExclude, *mountPointsInclude), nil +} + +func newFSTypeFilter(logger *slog.Logger) (deviceFilter, error) { + if *oldFSTypesExcluded != "" { + if !fsTypesExcludeSet { + logger.Warn("--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude") + *fsTypesExclude = *oldFSTypesExcluded + } else { + return deviceFilter{}, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive") + } + } + + if *fsTypesInclude != "" && !fsTypesExcludeSet { + logger.Debug("fs-types-exclude flag not set when fs-types-include flag is set, assuming include is desired") + *fsTypesExclude = "" + } + + if *fsTypesExclude != "" && *fsTypesInclude != "" { + return deviceFilter{}, errors.New("--collector.filesystem.fs-types-exclude and --collector.filesystem.fs-types-include are mutually exclusive") + } + + if *fsTypesExclude != "" { + logger.Info("Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude) + } + if *fsTypesInclude != "" { + logger.Info("Parsed flag --collector.filesystem.fs-types-include", "flag", *fsTypesInclude) + } + + return newDeviceFilter(*fsTypesExclude, *fsTypesInclude), nil +} diff --git a/collector/filesystem_freebsd.go b/collector/filesystem_freebsd.go index 1d377b1ff5..cbc99d9f02 100644 --- a/collector/filesystem_freebsd.go +++ b/collector/filesystem_freebsd.go @@ -11,50 +11,52 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nofilesystem +//go:build !nofilesystem package collector import ( - "github.com/go-kit/kit/log/level" "golang.org/x/sys/unix" ) const ( - defIgnoredMountPoints = "^/(dev)($|/)" - defIgnoredFSTypes = "^devfs$" - readOnly = 0x1 // MNT_RDONLY - noWait = 0x2 // MNT_NOWAIT + defMountPointsExcluded = "^/(dev)($|/)" + defFSTypesExcluded = "^devfs$" ) // Expose filesystem fullness. func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { - n, err := unix.Getfsstat(nil, noWait) + n, err := unix.Getfsstat(nil, unix.MNT_NOWAIT) if err != nil { return nil, err } buf := make([]unix.Statfs_t, n) - _, err = unix.Getfsstat(buf, noWait) + _, err = unix.Getfsstat(buf, unix.MNT_NOWAIT) if err != nil { return nil, err } stats := []filesystemStats{} for _, fs := range buf { - mountpoint := bytesToString(fs.Mntonname[:]) - if c.ignoredMountPointsPattern.MatchString(mountpoint) { - level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint) + mountpoint := unix.ByteSliceToString(fs.Mntonname[:]) + if c.mountPointFilter.ignored(mountpoint) { + c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint) continue } - device := bytesToString(fs.Mntfromname[:]) - fstype := bytesToString(fs.Fstypename[:]) - if c.ignoredFSTypesPattern.MatchString(fstype) { - level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype) + device := unix.ByteSliceToString(fs.Mntfromname[:]) + fstype := unix.ByteSliceToString(fs.Fstypename[:]) + if c.fsTypeFilter.ignored(fstype) { + c.logger.Debug("Ignoring fs type", "type", fstype) + continue + } + + if (fs.Flags & unix.MNT_IGNORE) != 0 { + c.logger.Debug("Ignoring mount flagged as ignore", "mountpoint", mountpoint) continue } var ro float64 - if (fs.Flags & readOnly) != 0 { + if (fs.Flags & unix.MNT_RDONLY) != 0 { ro = 1 } diff --git a/collector/filesystem_linux.go b/collector/filesystem_linux.go index 00a7323154..3739f0fee8 100644 --- a/collector/filesystem_linux.go +++ b/collector/filesystem_linux.go @@ -11,34 +11,39 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nofilesystem +//go:build !nofilesystem package collector import ( - "bufio" + "bytes" "errors" "fmt" - "io" + "log/slog" "os" + "slices" + "strconv" "strings" "sync" "time" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "golang.org/x/sys/unix" - "gopkg.in/alecthomas/kingpin.v2" + + "github.com/prometheus/procfs" ) const ( - defIgnoredMountPoints = "^/(dev|proc|sys|var/lib/docker/.+)($|/)" - defIgnoredFSTypes = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$" + defMountPointsExcluded = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/)" + defFSTypesExcluded = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|erofs|sysfs|tracefs)$" ) var mountTimeout = kingpin.Flag("collector.filesystem.mount-timeout", "how long to wait for a mount to respond before marking it as stale"). Hidden().Default("5s").Duration() +var statWorkerCount = kingpin.Flag("collector.filesystem.stat-workers", + "how many stat calls to process simultaneously"). + Hidden().Default("4").Int() var stuckMounts = make(map[string]struct{}) var stuckMountsMtx = &sync.Mutex{} @@ -49,133 +54,196 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { return nil, err } stats := []filesystemStats{} - for _, labels := range mps { - if c.ignoredMountPointsPattern.MatchString(labels.mountPoint) { - level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", labels.mountPoint) - continue - } - if c.ignoredFSTypesPattern.MatchString(labels.fsType) { - level.Debug(c.logger).Log("msg", "Ignoring fs", "type", labels.fsType) - continue - } - stuckMountsMtx.Lock() - if _, ok := stuckMounts[labels.mountPoint]; ok { - stats = append(stats, filesystemStats{ - labels: labels, - deviceError: 1, - }) - level.Debug(c.logger).Log("msg", "Mount point is in an unresponsive state", "mountpoint", labels.mountPoint) + labelChan := make(chan filesystemLabels) + statChan := make(chan filesystemStats) + wg := sync.WaitGroup{} + + workerCount := max(*statWorkerCount, 1) + + for i := 0; i < workerCount; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for labels := range labelChan { + statChan <- c.processStat(labels) + } + }() + } + + go func() { + for _, labels := range mps { + if c.mountPointFilter.ignored(labels.mountPoint) { + c.logger.Debug("Ignoring mount point", "mountpoint", labels.mountPoint) + continue + } + if c.fsTypeFilter.ignored(labels.fsType) { + c.logger.Debug("Ignoring fs type", "type", labels.fsType) + continue + } + + stuckMountsMtx.Lock() + if _, ok := stuckMounts[labels.mountPoint]; ok { + labels.deviceError = "mountpoint timeout" + stats = append(stats, filesystemStats{ + labels: labels, + deviceError: 1, + }) + c.logger.Debug("Mount point is in an unresponsive state", "mountpoint", labels.mountPoint) + stuckMountsMtx.Unlock() + continue + } + stuckMountsMtx.Unlock() - continue + labelChan <- labels } - stuckMountsMtx.Unlock() + close(labelChan) + wg.Wait() + close(statChan) + }() - // The success channel is used do tell the "watcher" that the stat - // finished successfully. The channel is closed on success. - success := make(chan struct{}) - go stuckMountWatcher(labels.mountPoint, success, c.logger) + for stat := range statChan { + stats = append(stats, stat) + } + return stats, nil +} - buf := new(unix.Statfs_t) - err = unix.Statfs(rootfsFilePath(labels.mountPoint), buf) - stuckMountsMtx.Lock() - close(success) - // If the mount has been marked as stuck, unmark it and log it's recovery. - if _, ok := stuckMounts[labels.mountPoint]; ok { - level.Debug(c.logger).Log("msg", "Mount point has recovered, monitoring will resume", "mountpoint", labels.mountPoint) - delete(stuckMounts, labels.mountPoint) - } - stuckMountsMtx.Unlock() +func (c *filesystemCollector) processStat(labels filesystemLabels) filesystemStats { + var ro float64 + if isFilesystemReadOnly(labels) { + ro = 1 + } - if err != nil { - stats = append(stats, filesystemStats{ - labels: labels, - deviceError: 1, - }) + success := make(chan struct{}) + go stuckMountWatcher(labels.mountPoint, success, c.logger) - level.Debug(c.logger).Log("msg", "Error on statfs() system call", "rootfs", rootfsFilePath(labels.mountPoint), "err", err) - continue - } + buf := new(unix.Statfs_t) + err := unix.Statfs(rootfsFilePath(labels.mountPoint), buf) + stuckMountsMtx.Lock() + close(success) - var ro float64 - for _, option := range strings.Split(labels.options, ",") { - if option == "ro" { - ro = 1 - break - } + // If the mount has been marked as stuck, unmark it and log it's recovery. + if _, ok := stuckMounts[labels.mountPoint]; ok { + c.logger.Debug("Mount point has recovered, monitoring will resume", "mountpoint", labels.mountPoint) + delete(stuckMounts, labels.mountPoint) + } + stuckMountsMtx.Unlock() + + // Remove options from labels because options will not be used from this point forward + // and keeping them can lead to errors when the same device is mounted to the same mountpoint + // twice, with different options (metrics would be recorded multiple times). + labels.mountOptions = "" + labels.superOptions = "" + + if err != nil { + labels.deviceError = err.Error() + c.logger.Debug("Error on statfs() system call", "rootfs", rootfsFilePath(labels.mountPoint), "err", err) + return filesystemStats{ + labels: labels, + deviceError: 1, + ro: ro, } + } - stats = append(stats, filesystemStats{ - labels: labels, - size: float64(buf.Blocks) * float64(buf.Bsize), - free: float64(buf.Bfree) * float64(buf.Bsize), - avail: float64(buf.Bavail) * float64(buf.Bsize), - files: float64(buf.Files), - filesFree: float64(buf.Ffree), - ro: ro, - }) + return filesystemStats{ + labels: labels, + size: float64(buf.Blocks) * float64(buf.Bsize), + free: float64(buf.Bfree) * float64(buf.Bsize), + avail: float64(buf.Bavail) * float64(buf.Bsize), + files: float64(buf.Files), + filesFree: float64(buf.Ffree), + ro: ro, } - return stats, nil } // stuckMountWatcher listens on the given success channel and if the channel closes // then the watcher does nothing. If instead the timeout is reached, the // mount point that is being watched is marked as stuck. -func stuckMountWatcher(mountPoint string, success chan struct{}, logger log.Logger) { +func stuckMountWatcher(mountPoint string, success chan struct{}, logger *slog.Logger) { + mountCheckTimer := time.NewTimer(*mountTimeout) + defer mountCheckTimer.Stop() select { case <-success: // Success - case <-time.After(*mountTimeout): + case <-mountCheckTimer.C: // Timed out, mark mount as stuck stuckMountsMtx.Lock() select { case <-success: // Success came in just after the timeout was reached, don't label the mount as stuck default: - level.Debug(logger).Log("msg", "Mount point timed out, it is being labeled as stuck and will not be monitored", "mountpoint", mountPoint) + logger.Debug("Mount point timed out, it is being labeled as stuck and will not be monitored", "mountpoint", mountPoint) stuckMounts[mountPoint] = struct{}{} } stuckMountsMtx.Unlock() } } -func mountPointDetails(logger log.Logger) ([]filesystemLabels, error) { - file, err := os.Open(procFilePath("1/mounts")) +func mountPointDetails(logger *slog.Logger) ([]filesystemLabels, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + mountInfo, err := fs.GetProcMounts(1) if errors.Is(err, os.ErrNotExist) { - // Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid. - level.Debug(logger).Log("msg", "Reading root mounts failed, falling back to system mounts", "err", err) - file, err = os.Open(procFilePath("mounts")) + // Fallback to `/proc/self/mountinfo` if `/proc/1/mountinfo` is missing due hidepid. + logger.Debug("Reading root mounts failed, falling back to self mounts", "err", err) + mountInfo, err = fs.GetMounts() } if err != nil { return nil, err } - defer file.Close() - return parseFilesystemLabels(file) + return parseFilesystemLabels(mountInfo) } -func parseFilesystemLabels(r io.Reader) ([]filesystemLabels, error) { +func parseFilesystemLabels(mountInfo []*procfs.MountInfo) ([]filesystemLabels, error) { var filesystems []filesystemLabels - scanner := bufio.NewScanner(r) - for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - - if len(parts) < 4 { - return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text()) + for _, mount := range mountInfo { + major, minor := 0, 0 + _, err := fmt.Sscanf(mount.MajorMinorVer, "%d:%d", &major, &minor) + if err != nil { + return nil, fmt.Errorf("malformed mount point MajorMinorVer: %q", mount.MajorMinorVer) } // Ensure we handle the translation of \040 and \011 // as per fstab(5). - parts[1] = strings.Replace(parts[1], "\\040", " ", -1) - parts[1] = strings.Replace(parts[1], "\\011", "\t", -1) + mount.MountPoint = strings.ReplaceAll(mount.MountPoint, "\\040", " ") + mount.MountPoint = strings.ReplaceAll(mount.MountPoint, "\\011", "\t") filesystems = append(filesystems, filesystemLabels{ - device: parts[0], - mountPoint: rootfsStripPrefix(parts[1]), - fsType: parts[2], - options: parts[3], + device: mount.Source, + mountPoint: rootfsStripPrefix(mount.MountPoint), + fsType: mount.FSType, + mountOptions: mountOptionsString(mount.Options), + superOptions: mountOptionsString(mount.SuperOptions), + major: strconv.Itoa(major), + minor: strconv.Itoa(minor), + deviceError: "", }) } - return filesystems, scanner.Err() + return filesystems, nil +} + +// see https://github.com/prometheus/node_exporter/issues/3157#issuecomment-2422761187 +// if either mount or super options contain "ro" the filesystem is read-only +func isFilesystemReadOnly(labels filesystemLabels) bool { + if slices.Contains(strings.Split(labels.mountOptions, ","), "ro") || slices.Contains(strings.Split(labels.superOptions, ","), "ro") { + return true + } + + return false +} + +func mountOptionsString(m map[string]string) string { + b := new(bytes.Buffer) + for key, value := range m { + if value == "" { + fmt.Fprintf(b, "%s", key) + } else { + fmt.Fprintf(b, "%s=%s", key, value) + } + } + return b.String() } diff --git a/collector/filesystem_linux_test.go b/collector/filesystem_linux_test.go index e40177906f..a838e93271 100644 --- a/collector/filesystem_linux_test.go +++ b/collector/filesystem_linux_test.go @@ -11,36 +11,83 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nofilesystem + package collector import ( - "github.com/go-kit/kit/log" - "strings" + "io" + "log/slog" "testing" - kingpin "gopkg.in/alecthomas/kingpin.v2" + "github.com/alecthomas/kingpin/v2" + + "github.com/prometheus/procfs" ) func Test_parseFilesystemLabelsError(t *testing.T) { tests := []struct { name string - in string + in []*procfs.MountInfo }{ { - name: "too few fields", - in: "hello world", + name: "malformed Major:Minor", + in: []*procfs.MountInfo{ + { + MajorMinorVer: "nope", + }, + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if _, err := parseFilesystemLabels(strings.NewReader(tt.in)); err == nil { + if _, err := parseFilesystemLabels(tt.in); err == nil { t.Fatal("expected an error, but none occurred") } }) } } +func Test_isFilesystemReadOnly(t *testing.T) { + tests := map[string]struct { + labels filesystemLabels + expected bool + }{ + "/media/volume1": { + labels: filesystemLabels{ + mountOptions: "rw,nosuid,nodev,noexec,relatime", + superOptions: "rw,devices", + }, + expected: false, + }, + "/media/volume2": { + labels: filesystemLabels{ + mountOptions: "ro,relatime", + superOptions: "rw,fd=22,pgrp=1,timeout=300,minproto=5,maxproto=5,direct", + }, expected: true, + }, + "/media/volume3": { + labels: filesystemLabels{ + mountOptions: "rw,user_id=1000,group_id=1000", + superOptions: "ro", + }, expected: true, + }, + "/media/volume4": { + labels: filesystemLabels{ + mountOptions: "ro,nosuid,noexec", + superOptions: "ro,nodev", + }, expected: true, + }, + } + + for _, tt := range tests { + if got := isFilesystemReadOnly(tt.labels); got != tt.expected { + t.Errorf("Expected %t, got %t", tt.expected, got) + } + } +} + func TestMountPointDetails(t *testing.T) { if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "./fixtures/proc"}); err != nil { t.Fatal(err) @@ -77,17 +124,26 @@ func TestMountPointDetails(t *testing.T) { "/run/user/1000/gvfs": "", "/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "", "/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "", + "/var/lib/containers/storage/overlay": "", } - filesystems, err := mountPointDetails(log.NewNopLogger()) + filesystems, err := mountPointDetails(slog.New(slog.NewTextHandler(io.Discard, nil))) if err != nil { t.Log(err) } + foundSet := map[string]bool{} for _, fs := range filesystems { if _, ok := expected[fs.mountPoint]; !ok { t.Errorf("Got unexpected %s", fs.mountPoint) } + foundSet[fs.mountPoint] = true + } + + for mountPoint := range expected { + if _, ok := foundSet[mountPoint]; !ok { + t.Errorf("Expected %s, got nothing", mountPoint) + } } } @@ -100,7 +156,7 @@ func TestMountsFallback(t *testing.T) { "/": "", } - filesystems, err := mountPointDetails(log.NewNopLogger()) + filesystems, err := mountPointDetails(slog.New(slog.NewTextHandler(io.Discard, nil))) if err != nil { t.Log(err) } @@ -128,7 +184,7 @@ func TestPathRootfs(t *testing.T) { "/sys/fs/cgroup": "", } - filesystems, err := mountPointDetails(log.NewNopLogger()) + filesystems, err := mountPointDetails(slog.New(slog.NewTextHandler(io.Discard, nil))) if err != nil { t.Log(err) } diff --git a/collector/filesystem_macos.go b/collector/filesystem_macos.go new file mode 100644 index 0000000000..be0a49062f --- /dev/null +++ b/collector/filesystem_macos.go @@ -0,0 +1,110 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !nofilesystem + +package collector + +/* +#cgo CFLAGS: -x objective-c +#cgo LDFLAGS: -framework Foundation +#import +#include +#include +#include +#include + +double purgeable(char *path) { + double value = -1.0f; + + @autoreleasepool { + NSError *error = nil; + NSString *str = [NSString stringWithUTF8String:path]; + NSURL *fileURL = [[NSURL alloc] initFileURLWithPath:str]; + + NSDictionary *results = [fileURL resourceValuesForKeys:@[NSURLVolumeAvailableCapacityForImportantUsageKey] error:&error]; + if (results) { + CFNumberRef tmp = CFDictionaryGetValue((CFDictionaryRef)results, NSURLVolumeAvailableCapacityForImportantUsageKey); + if (tmp != NULL) { + CFNumberGetValue(tmp, kCFNumberFloat64Type, &value); + } + } + + [fileURL release]; + } + + return value; +} +*/ +import "C" + +import ( + "errors" + "unsafe" +) + +const ( + defMountPointsExcluded = "^/(dev)($|/)" + defFSTypesExcluded = "^devfs$" + readOnly = 0x1 // MNT_RDONLY +) + +// Expose filesystem fullness. +func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { + var mntbuf *C.struct_statfs + count := C.getmntinfo(&mntbuf, C.MNT_NOWAIT) + if count == 0 { + return nil, errors.New("getmntinfo() failed") + } + + mnt := (*[1 << 20]C.struct_statfs)(unsafe.Pointer(mntbuf)) + stats = []filesystemStats{} + for i := 0; i < int(count); i++ { + mountpoint := C.GoString(&mnt[i].f_mntonname[0]) + if c.mountPointFilter.ignored(mountpoint) { + c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint) + continue + } + + device := C.GoString(&mnt[i].f_mntfromname[0]) + fstype := C.GoString(&mnt[i].f_fstypename[0]) + if c.fsTypeFilter.ignored(fstype) { + c.logger.Debug("Ignoring fs type", "type", fstype) + continue + } + + var ro float64 + if (mnt[i].f_flags & readOnly) != 0 { + ro = 1 + } + + mountpointCString := C.CString(mountpoint) + defer C.free(unsafe.Pointer(mountpointCString)) + + stats = append(stats, filesystemStats{ + labels: filesystemLabels{ + device: device, + mountPoint: rootfsStripPrefix(mountpoint), + fsType: fstype, + }, + size: float64(mnt[i].f_blocks) * float64(mnt[i].f_bsize), + free: float64(mnt[i].f_bfree) * float64(mnt[i].f_bsize), + avail: float64(mnt[i].f_bavail) * float64(mnt[i].f_bsize), + files: float64(mnt[i].f_files), + filesFree: float64(mnt[i].f_ffree), + purgeable: float64(C.purgeable(mountpointCString)), + ro: ro, + }) + } + return stats, nil +} diff --git a/collector/filesystem_netbsd.go b/collector/filesystem_netbsd.go new file mode 100644 index 0000000000..21f73abba1 --- /dev/null +++ b/collector/filesystem_netbsd.go @@ -0,0 +1,131 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nofilesystem + +package collector + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + defMountPointsExcluded = "^/(dev)($|/)" + defFSTypesExcluded = "^(kernfs|procfs|ptyfs|fdesc)$" + _VFS_NAMELEN = 32 + _VFS_MNAMELEN = 1024 +) + +/* + * Go uses the NetBSD 9 ABI and thus syscall.SYS_GETVFSSTAT is compat_90_getvfsstat. + * We have to declare struct statvfs90 because it is not included in the unix package. + * See NetBSD/src/sys/compat/sys/statvfs.h. + */ +type statvfs90 struct { + F_flag uint + F_bsize uint + F_frsize uint + F_iosize uint + + F_blocks uint64 + F_bfree uint64 + F_bavail uint64 + F_bresvd uint64 + + F_files uint64 + F_ffree uint64 + F_favail uint64 + F_fresvd uint64 + + F_syncreads uint64 + F_syncwrites uint64 + + F_asyncreads uint64 + F_asyncwrites uint64 + + F_fsidx [2]uint32 + F_fsid uint32 + F_namemax uint + F_owner uint32 + F_spare [4]uint32 + + F_fstypename [_VFS_NAMELEN]byte + F_mntonname [_VFS_MNAMELEN]byte + F_mntfromname [_VFS_MNAMELEN]byte + + cgo_pad [4]byte +} + +func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { + var mnt []statvfs90 + if syscall.SYS_GETVFSSTAT != 356 /* compat_90_getvfsstat */ { + /* + * Catch if golang ever updates to newer ABI and bail. + */ + return nil, fmt.Errorf("getvfsstat: ABI mismatch") + } + for { + r1, _, errno := syscall.Syscall(syscall.SYS_GETVFSSTAT, uintptr(0), 0, unix.ST_NOWAIT) + if errno != 0 { + return nil, fmt.Errorf("getvfsstat: %s", string(errno)) + } + mnt = make([]statvfs90, r1, r1) + r2, _, errno := syscall.Syscall(syscall.SYS_GETVFSSTAT, uintptr(unsafe.Pointer(&mnt[0])), unsafe.Sizeof(mnt[0])*r1, unix.ST_NOWAIT /* ST_NOWAIT */) + if errno != 0 { + return nil, fmt.Errorf("getvfsstat: %s", string(errno)) + } + if r1 == r2 { + break + } + } + + stats = []filesystemStats{} + for _, v := range mnt { + mountpoint := unix.ByteSliceToString(v.F_mntonname[:]) + if c.mountPointFilter.ignored(mountpoint) { + c.logger.Debug("msg", "Ignoring mount point", "mountpoint", mountpoint) + continue + } + + device := unix.ByteSliceToString(v.F_mntfromname[:]) + fstype := unix.ByteSliceToString(v.F_fstypename[:]) + if c.fsTypeFilter.ignored(fstype) { + c.logger.Debug("msg", "Ignoring fs type", "type", fstype) + continue + } + + var ro float64 + if (v.F_flag & unix.MNT_RDONLY) != 0 { + ro = 1 + } + + stats = append(stats, filesystemStats{ + labels: filesystemLabels{ + device: device, + mountPoint: mountpoint, + fsType: fstype, + }, + size: float64(v.F_blocks) * float64(v.F_bsize), + free: float64(v.F_bfree) * float64(v.F_bsize), + avail: float64(v.F_bavail) * float64(v.F_bsize), + files: float64(v.F_files), + filesFree: float64(v.F_ffree), + ro: ro, + }) + } + return stats, nil +} diff --git a/collector/filesystem_openbsd.go b/collector/filesystem_openbsd.go new file mode 100644 index 0000000000..3ca929d845 --- /dev/null +++ b/collector/filesystem_openbsd.go @@ -0,0 +1,75 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nofilesystem + +package collector + +import ( + "golang.org/x/sys/unix" +) + +const ( + defMountPointsExcluded = "^/(dev)($|/)" + defFSTypesExcluded = "^devfs$" +) + +// Expose filesystem fullness. +func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { + var mnt []unix.Statfs_t + size, err := unix.Getfsstat(mnt, unix.MNT_NOWAIT) + if err != nil { + return nil, err + } + mnt = make([]unix.Statfs_t, size) + _, err = unix.Getfsstat(mnt, unix.MNT_NOWAIT) + if err != nil { + return nil, err + } + + stats = []filesystemStats{} + for _, v := range mnt { + mountpoint := unix.ByteSliceToString(v.F_mntonname[:]) + if c.mountPointFilter.ignored(mountpoint) { + c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint) + continue + } + + device := unix.ByteSliceToString(v.F_mntfromname[:]) + fstype := unix.ByteSliceToString(v.F_fstypename[:]) + if c.fsTypeFilter.ignored(fstype) { + c.logger.Debug("Ignoring fs type", "type", fstype) + continue + } + + var ro float64 + if (v.F_flags & unix.MNT_RDONLY) != 0 { + ro = 1 + } + + stats = append(stats, filesystemStats{ + labels: filesystemLabels{ + device: device, + mountPoint: mountpoint, + fsType: fstype, + }, + size: float64(v.F_blocks) * float64(v.F_bsize), + free: float64(v.F_bfree) * float64(v.F_bsize), + avail: float64(v.F_bavail) * float64(v.F_bsize), + files: float64(v.F_files), + filesFree: float64(v.F_ffree), + ro: ro, + }) + } + return stats, nil +} diff --git a/collector/fixtures/e2e-64k-page-output.txt b/collector/fixtures/e2e-64k-page-output.txt index fe32915bfc..522350cd7d 100644 --- a/collector/fixtures/e2e-64k-page-output.txt +++ b/collector/fixtures/e2e-64k-page-output.txt @@ -1,57 +1,59 @@ -# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. # TYPE go_gc_duration_seconds summary +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent. +# TYPE go_gc_gogc_percent gauge +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes. +# TYPE go_gc_gomemlimit_bytes gauge # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge # HELP go_info Information about the Go environment. # TYPE go_info gauge -# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. # TYPE go_memstats_alloc_bytes gauge -# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. # TYPE go_memstats_alloc_bytes_total counter -# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. # TYPE go_memstats_buck_hash_sys_bytes gauge -# HELP go_memstats_frees_total Total number of frees. +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. # TYPE go_memstats_frees_total counter -# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. -# TYPE go_memstats_gc_cpu_fraction gauge -# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. # TYPE go_memstats_gc_sys_bytes gauge -# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. # TYPE go_memstats_heap_alloc_bytes gauge -# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. # TYPE go_memstats_heap_idle_bytes gauge -# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes # TYPE go_memstats_heap_inuse_bytes gauge -# HELP go_memstats_heap_objects Number of allocated objects. +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. # TYPE go_memstats_heap_objects gauge -# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. # TYPE go_memstats_heap_released_bytes gauge -# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. # TYPE go_memstats_heap_sys_bytes gauge # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge -# HELP go_memstats_lookups_total Total number of pointer lookups. -# TYPE go_memstats_lookups_total counter -# HELP go_memstats_mallocs_total Total number of mallocs. +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. # TYPE go_memstats_mallocs_total counter -# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. # TYPE go_memstats_mcache_inuse_bytes gauge -# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. # TYPE go_memstats_mcache_sys_bytes gauge -# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. # TYPE go_memstats_mspan_inuse_bytes gauge -# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. # TYPE go_memstats_mspan_sys_bytes gauge -# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. # TYPE go_memstats_next_gc_bytes gauge -# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. # TYPE go_memstats_other_sys_bytes gauge -# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. # TYPE go_memstats_stack_inuse_bytes gauge -# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. # TYPE go_memstats_stack_sys_bytes gauge -# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. # TYPE go_memstats_sys_bytes gauge +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads. +# TYPE go_sched_gomaxprocs_threads gauge # HELP go_threads Number of OS threads created. # TYPE go_threads gauge # HELP node_arp_entries ARP entries by device @@ -99,13 +101,16 @@ node_bcache_cache_misses_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-8 node_bcache_cache_read_races_total{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_readaheads_total Count of times readahead occurred. # TYPE node_bcache_cache_readaheads_total counter -node_bcache_cache_readaheads_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +node_bcache_cache_readaheads_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 13 # HELP node_bcache_congested Congestion. # TYPE node_bcache_congested gauge node_bcache_congested{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_dirty_data_bytes Amount of dirty data for this backing device in the cache. # TYPE node_bcache_dirty_data_bytes gauge node_bcache_dirty_data_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_dirty_target_bytes Current dirty data target threshold for this backing device in bytes. +# TYPE node_bcache_dirty_target_bytes gauge +node_bcache_dirty_target_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 2.189426688e+10 # HELP node_bcache_io_errors Number of errors that have occurred, decayed by io_error_halflife. # TYPE node_bcache_io_errors gauge node_bcache_io_errors{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 @@ -124,6 +129,18 @@ node_bcache_root_usage_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_tree_depth Depth of the btree. # TYPE node_bcache_tree_depth gauge node_bcache_tree_depth{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_writeback_change Last writeback rate change step for this backing device. +# TYPE node_bcache_writeback_change gauge +node_bcache_writeback_change{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 329204 +# HELP node_bcache_writeback_rate Current writeback rate for this backing device in bytes. +# TYPE node_bcache_writeback_rate gauge +node_bcache_writeback_rate{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1.150976e+06 +# HELP node_bcache_writeback_rate_integral_term Current result of integral controller, part of writeback rate +# TYPE node_bcache_writeback_rate_integral_term gauge +node_bcache_writeback_rate_integral_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 808960 +# HELP node_bcache_writeback_rate_proportional_term Current result of proportional controller, part of writeback rate +# TYPE node_bcache_writeback_rate_proportional_term gauge +node_bcache_writeback_rate_proportional_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 437748 # HELP node_bcache_written_bytes_total Sum of all data that has been written to the cache. # TYPE node_bcache_written_bytes_total counter node_bcache_written_bytes_total{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 @@ -140,6 +157,70 @@ node_bonding_slaves{master="int"} 2 # HELP node_boot_time_seconds Node boot time, in unixtime. # TYPE node_boot_time_seconds gauge node_boot_time_seconds 1.418183276e+09 +# HELP node_btrfs_allocation_ratio Data allocation ratio for a layout/data type +# TYPE node_btrfs_allocation_ratio gauge +node_btrfs_allocation_ratio{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 +node_btrfs_allocation_ratio{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.3333333333333333 +node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2 +node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 +node_btrfs_allocation_ratio{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2 +node_btrfs_allocation_ratio{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 +# HELP node_btrfs_commit_seconds_total Sum of the duration of all commits, in seconds. +# TYPE node_btrfs_commit_seconds_total counter +node_btrfs_commit_seconds_total{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 47836.09 +node_btrfs_commit_seconds_total{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +# HELP node_btrfs_commits_total The total number of commits that have occurred. +# TYPE node_btrfs_commits_total counter +node_btrfs_commits_total{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 258051 +node_btrfs_commits_total{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +# HELP node_btrfs_device_size_bytes Size of a device that is part of the filesystem. +# TYPE node_btrfs_device_size_bytes gauge +node_btrfs_device_size_bytes{device="loop22",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop23",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop24",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop25",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop25",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop26",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+10 +# HELP node_btrfs_global_rsv_size_bytes Size of global reserve. +# TYPE node_btrfs_global_rsv_size_bytes gauge +node_btrfs_global_rsv_size_bytes{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.6777216e+07 +node_btrfs_global_rsv_size_bytes{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.6777216e+07 +# HELP node_btrfs_info Filesystem information +# TYPE node_btrfs_info gauge +node_btrfs_info{label="",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1 +node_btrfs_info{label="fixture",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 +# HELP node_btrfs_last_commit_seconds Duration of the most recent commit, in seconds. +# TYPE node_btrfs_last_commit_seconds gauge +node_btrfs_last_commit_seconds{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 +node_btrfs_last_commit_seconds{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +# HELP node_btrfs_max_commit_seconds Duration of the slowest commit, in seconds. +# TYPE node_btrfs_max_commit_seconds gauge +node_btrfs_max_commit_seconds{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 51.462 +node_btrfs_max_commit_seconds{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +# HELP node_btrfs_reserved_bytes Amount of space reserved for a data type +# TYPE node_btrfs_reserved_bytes gauge +node_btrfs_reserved_bytes{block_group_type="data",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 +node_btrfs_reserved_bytes{block_group_type="data",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +node_btrfs_reserved_bytes{block_group_type="metadata",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 +node_btrfs_reserved_bytes{block_group_type="metadata",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +node_btrfs_reserved_bytes{block_group_type="system",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 +node_btrfs_reserved_bytes{block_group_type="system",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +# HELP node_btrfs_size_bytes Amount of space allocated for a layout/data type +# TYPE node_btrfs_size_bytes gauge +node_btrfs_size_bytes{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2.147483648e+09 +node_btrfs_size_bytes{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 6.44087808e+08 +node_btrfs_size_bytes{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+09 +node_btrfs_size_bytes{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 4.29391872e+08 +node_btrfs_size_bytes{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 8.388608e+06 +node_btrfs_size_bytes{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.6777216e+07 +# HELP node_btrfs_used_bytes Amount of used space by a layout/data type +# TYPE node_btrfs_used_bytes gauge +node_btrfs_used_bytes{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 8.08189952e+08 +node_btrfs_used_bytes{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +node_btrfs_used_bytes{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 933888 +node_btrfs_used_bytes{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 114688 +node_btrfs_used_bytes{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 16384 +node_btrfs_used_bytes{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 16384 # HELP node_buddyinfo_blocks Count of free blocks according to size. # TYPE node_buddyinfo_blocks gauge node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 @@ -175,6 +256,34 @@ node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 +# HELP node_cgroups_cgroups Current cgroup number of the subsystem. +# TYPE node_cgroups_cgroups gauge +node_cgroups_cgroups{subsys_name="blkio"} 170 +node_cgroups_cgroups{subsys_name="cpu"} 172 +node_cgroups_cgroups{subsys_name="cpuacct"} 172 +node_cgroups_cgroups{subsys_name="cpuset"} 47 +node_cgroups_cgroups{subsys_name="devices"} 170 +node_cgroups_cgroups{subsys_name="freezer"} 47 +node_cgroups_cgroups{subsys_name="hugetlb"} 47 +node_cgroups_cgroups{subsys_name="memory"} 234 +node_cgroups_cgroups{subsys_name="net_cls"} 47 +node_cgroups_cgroups{subsys_name="perf_event"} 47 +node_cgroups_cgroups{subsys_name="pids"} 170 +node_cgroups_cgroups{subsys_name="rdma"} 1 +# HELP node_cgroups_enabled Current cgroup number of the subsystem. +# TYPE node_cgroups_enabled gauge +node_cgroups_enabled{subsys_name="blkio"} 1 +node_cgroups_enabled{subsys_name="cpu"} 1 +node_cgroups_enabled{subsys_name="cpuacct"} 1 +node_cgroups_enabled{subsys_name="cpuset"} 1 +node_cgroups_enabled{subsys_name="devices"} 1 +node_cgroups_enabled{subsys_name="freezer"} 1 +node_cgroups_enabled{subsys_name="hugetlb"} 1 +node_cgroups_enabled{subsys_name="memory"} 1 +node_cgroups_enabled{subsys_name="net_cls"} 1 +node_cgroups_enabled{subsys_name="perf_event"} 1 +node_cgroups_enabled{subsys_name="pids"} 1 +node_cgroups_enabled{subsys_name="rdma"} 1 # HELP node_context_switches_total Total number of context switches. # TYPE node_context_switches_total counter node_context_switches_total 3.8014093e+07 @@ -184,13 +293,13 @@ node_cooling_device_cur_state{name="0",type="Processor"} 0 # HELP node_cooling_device_max_state Maximum throttle state of the cooling device # TYPE node_cooling_device_max_state gauge node_cooling_device_max_state{name="0",type="Processor"} 3 -# HELP node_cpu_core_throttles_total Number of times this cpu core has been throttled. +# HELP node_cpu_core_throttles_total Number of times this CPU core has been throttled. # TYPE node_cpu_core_throttles_total counter node_cpu_core_throttles_total{core="0",package="0"} 5 node_cpu_core_throttles_total{core="0",package="1"} 0 node_cpu_core_throttles_total{core="1",package="0"} 0 node_cpu_core_throttles_total{core="1",package="1"} 9 -# HELP node_cpu_guest_seconds_total Seconds the cpus spent in guests (VMs) for each mode. +# HELP node_cpu_guest_seconds_total Seconds the CPUs spent in guests (VMs) for each mode. # TYPE node_cpu_guest_seconds_total counter node_cpu_guest_seconds_total{cpu="0",mode="nice"} 0.01 node_cpu_guest_seconds_total{cpu="0",mode="user"} 0.02 @@ -208,39 +317,46 @@ node_cpu_guest_seconds_total{cpu="6",mode="nice"} 0.07 node_cpu_guest_seconds_total{cpu="6",mode="user"} 0.08 node_cpu_guest_seconds_total{cpu="7",mode="nice"} 0.08 node_cpu_guest_seconds_total{cpu="7",mode="user"} 0.09 -# HELP node_cpu_info CPU information from /proc/cpuinfo. -# TYPE node_cpu_info gauge -node_cpu_info{cachesize="8192 KB",core="0",cpu="0",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 -node_cpu_info{cachesize="8192 KB",core="0",cpu="4",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 -node_cpu_info{cachesize="8192 KB",core="1",cpu="1",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 -node_cpu_info{cachesize="8192 KB",core="1",cpu="5",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 -node_cpu_info{cachesize="8192 KB",core="2",cpu="2",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 -node_cpu_info{cachesize="8192 KB",core="2",cpu="6",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 -node_cpu_info{cachesize="8192 KB",core="3",cpu="3",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 -node_cpu_info{cachesize="8192 KB",core="3",cpu="7",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 -# HELP node_cpu_package_throttles_total Number of times this cpu package has been throttled. +# HELP node_cpu_isolated Whether each core is isolated, information from /sys/devices/system/cpu/isolated. +# TYPE node_cpu_isolated gauge +node_cpu_isolated{cpu="1"} 1 +node_cpu_isolated{cpu="3"} 1 +node_cpu_isolated{cpu="4"} 1 +node_cpu_isolated{cpu="5"} 1 +node_cpu_isolated{cpu="9"} 1 +# HELP node_cpu_package_throttles_total Number of times this CPU package has been throttled. # TYPE node_cpu_package_throttles_total counter node_cpu_package_throttles_total{package="0"} 30 node_cpu_package_throttles_total{package="1"} 6 -# HELP node_cpu_scaling_frequency_hertz Current scaled cpu thread frequency in hertz. +# HELP node_cpu_scaling_frequency_hertz Current scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_hertz gauge node_cpu_scaling_frequency_hertz{cpu="0"} 1.699981e+09 node_cpu_scaling_frequency_hertz{cpu="1"} 1.699981e+09 node_cpu_scaling_frequency_hertz{cpu="2"} 8e+06 node_cpu_scaling_frequency_hertz{cpu="3"} 8e+06 -# HELP node_cpu_scaling_frequency_max_hertz Maximum scaled cpu thread frequency in hertz. +# HELP node_cpu_scaling_frequency_max_hertz Maximum scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_max_hertz gauge node_cpu_scaling_frequency_max_hertz{cpu="0"} 3.7e+09 node_cpu_scaling_frequency_max_hertz{cpu="1"} 3.7e+09 node_cpu_scaling_frequency_max_hertz{cpu="2"} 4.2e+09 node_cpu_scaling_frequency_max_hertz{cpu="3"} 4.2e+09 -# HELP node_cpu_scaling_frequency_min_hertz Minimum scaled cpu thread frequency in hertz. +# HELP node_cpu_scaling_frequency_min_hertz Minimum scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_min_hertz gauge node_cpu_scaling_frequency_min_hertz{cpu="0"} 8e+08 node_cpu_scaling_frequency_min_hertz{cpu="1"} 8e+08 node_cpu_scaling_frequency_min_hertz{cpu="2"} 1e+06 node_cpu_scaling_frequency_min_hertz{cpu="3"} 1e+06 -# HELP node_cpu_seconds_total Seconds the cpus spent in each mode. +# HELP node_cpu_scaling_governor Current enabled CPU frequency governor. +# TYPE node_cpu_scaling_governor gauge +node_cpu_scaling_governor{cpu="0",governor="performance"} 0 +node_cpu_scaling_governor{cpu="0",governor="powersave"} 1 +node_cpu_scaling_governor{cpu="1",governor="performance"} 0 +node_cpu_scaling_governor{cpu="1",governor="powersave"} 1 +node_cpu_scaling_governor{cpu="2",governor="performance"} 0 +node_cpu_scaling_governor{cpu="2",governor="powersave"} 1 +node_cpu_scaling_governor{cpu="3",governor="performance"} 0 +node_cpu_scaling_governor{cpu="3",governor="powersave"} 1 +# HELP node_cpu_seconds_total Seconds the CPUs spent in each mode. # TYPE node_cpu_seconds_total counter node_cpu_seconds_total{cpu="0",mode="idle"} 10870.69 node_cpu_seconds_total{cpu="0",mode="iowait"} 2.2 @@ -306,18 +422,87 @@ node_cpu_seconds_total{cpu="7",mode="softirq"} 0.31 node_cpu_seconds_total{cpu="7",mode="steal"} 0 node_cpu_seconds_total{cpu="7",mode="system"} 101.64 node_cpu_seconds_total{cpu="7",mode="user"} 290.98 +# HELP node_cpu_vulnerabilities_info Details of each CPU vulnerability reported by sysfs. The value of the series is an int encoded state of the vulnerability. The same state is stored as a string in the label +# TYPE node_cpu_vulnerabilities_info gauge +node_cpu_vulnerabilities_info{codename="itlb_multihit",mitigation="",state="not affected"} 1 +node_cpu_vulnerabilities_info{codename="mds",mitigation="",state="vulnerable"} 1 +node_cpu_vulnerabilities_info{codename="retbleed",mitigation="untrained return thunk; SMT enabled with STIBP protection",state="mitigation"} 1 +node_cpu_vulnerabilities_info{codename="spectre_v1",mitigation="usercopy/swapgs barriers and __user pointer sanitization",state="mitigation"} 1 +node_cpu_vulnerabilities_info{codename="spectre_v2",mitigation="Retpolines, IBPB: conditional, STIBP: always-on, RSB filling, PBRSB-eIBRS: Not affected",state="mitigation"} 1 +# HELP node_disk_ata_rotation_rate_rpm ATA disk rotation rate in RPMs (0 for SSDs). +# TYPE node_disk_ata_rotation_rate_rpm gauge +node_disk_ata_rotation_rate_rpm{device="sda"} 7200 +node_disk_ata_rotation_rate_rpm{device="sdb"} 0 +node_disk_ata_rotation_rate_rpm{device="sdc"} 0 +# HELP node_disk_ata_write_cache ATA disk has a write cache. +# TYPE node_disk_ata_write_cache gauge +node_disk_ata_write_cache{device="sda"} 1 +node_disk_ata_write_cache{device="sdb"} 1 +node_disk_ata_write_cache{device="sdc"} 1 +# HELP node_disk_ata_write_cache_enabled ATA disk has its write cache enabled. +# TYPE node_disk_ata_write_cache_enabled gauge +node_disk_ata_write_cache_enabled{device="sda"} 0 +node_disk_ata_write_cache_enabled{device="sdb"} 1 +node_disk_ata_write_cache_enabled{device="sdc"} 0 +# HELP node_disk_device_mapper_info Info about disk device mapper. +# TYPE node_disk_device_mapper_info gauge +node_disk_device_mapper_info{device="dm-0",lv_layer="",lv_name="",name="nvme0n1_crypt",uuid="CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt",vg_name=""} 1 +node_disk_device_mapper_info{device="dm-1",lv_layer="",lv_name="swap_1",name="system-swap_1",uuid="LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-2",lv_layer="",lv_name="root",name="system-root",uuid="LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-3",lv_layer="",lv_name="var",name="system-var",uuid="LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-4",lv_layer="",lv_name="tmp",name="system-tmp",uuid="LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-5",lv_layer="",lv_name="home",name="system-home",uuid="LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf",vg_name="system"} 1 # HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. # TYPE node_disk_discard_time_seconds_total counter node_disk_discard_time_seconds_total{device="sdb"} 11.13 +node_disk_discard_time_seconds_total{device="sdc"} 11.13 # HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. # TYPE node_disk_discarded_sectors_total counter node_disk_discarded_sectors_total{device="sdb"} 1.925173784e+09 +node_disk_discarded_sectors_total{device="sdc"} 1.25173784e+08 # HELP node_disk_discards_completed_total The total number of discards completed successfully. # TYPE node_disk_discards_completed_total counter node_disk_discards_completed_total{device="sdb"} 68851 +node_disk_discards_completed_total{device="sdc"} 18851 # HELP node_disk_discards_merged_total The total number of discards merged. # TYPE node_disk_discards_merged_total counter node_disk_discards_merged_total{device="sdb"} 0 +node_disk_discards_merged_total{device="sdc"} 0 +# HELP node_disk_filesystem_info Info about disk filesystem. +# TYPE node_disk_filesystem_info gauge +node_disk_filesystem_info{device="dm-0",type="LVM2_member",usage="raid",uuid="c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB",version="LVM2 001"} 1 +node_disk_filesystem_info{device="dm-1",type="swap",usage="other",uuid="5272bb60-04b5-49cd-b730-be57c7604450",version="1"} 1 +node_disk_filesystem_info{device="dm-2",type="ext4",usage="filesystem",uuid="3deafd0d-faff-4695-8d15-51061ae1f51b",version="1.0"} 1 +node_disk_filesystem_info{device="dm-3",type="ext4",usage="filesystem",uuid="5c772222-f7d4-4c8e-87e8-e97df6b7a45e",version="1.0"} 1 +node_disk_filesystem_info{device="dm-4",type="ext4",usage="filesystem",uuid="a9479d44-60e1-4015-a1e5-bb065e6dd11b",version="1.0"} 1 +node_disk_filesystem_info{device="dm-5",type="ext4",usage="filesystem",uuid="b05b726a-c718-4c4d-8641-7c73a7696d83",version="1.0"} 1 +node_disk_filesystem_info{device="mmcblk0p1",type="vfat",usage="filesystem",uuid="6284-658D",version="FAT32"} 1 +node_disk_filesystem_info{device="mmcblk0p2",type="ext4",usage="filesystem",uuid="83324ce8-a6f3-4e35-ad64-dbb3d6b87a32",version="1.0"} 1 +node_disk_filesystem_info{device="sda",type="LVM2_member",usage="raid",uuid="cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw",version="LVM2 001"} 1 +node_disk_filesystem_info{device="sdc",type="LVM2_member",usage="raid",uuid="QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb",version="LVM2 001"} 1 +# HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. +# TYPE node_disk_flush_requests_time_seconds_total counter +node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944 +# HELP node_disk_flush_requests_total The total number of flush requests completed successfully +# TYPE node_disk_flush_requests_total counter +node_disk_flush_requests_total{device="sdc"} 1555 +# HELP node_disk_info Info of /sys/block/. +# TYPE node_disk_info gauge +node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="0x83e36d93",wwn=""} 1 +node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="0x83e36d93",wwn=""} 1 +node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="0x83e36d93",wwn=""} 1 +node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",rotational="0",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1 +node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",rotational="1",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1 +node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",rotational="0",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1 +node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",rotational="0",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1 +node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",rotational="0",serial="AAAABBBBCCCC1",wwn=""} 1 +node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",rotational="0",serial="",wwn=""} 1 # HELP node_disk_io_now The number of I/Os currently in progress. # TYPE node_disk_io_now gauge node_disk_io_now{device="dm-0"} 0 @@ -332,6 +517,7 @@ node_disk_io_now{device="mmcblk0p2"} 0 node_disk_io_now{device="nvme0n1"} 0 node_disk_io_now{device="sda"} 0 node_disk_io_now{device="sdb"} 0 +node_disk_io_now{device="sdc"} 0 node_disk_io_now{device="sr0"} 0 node_disk_io_now{device="vda"} 0 # HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. @@ -348,6 +534,7 @@ node_disk_io_time_seconds_total{device="mmcblk0p2"} 0.068 node_disk_io_time_seconds_total{device="nvme0n1"} 222.766 node_disk_io_time_seconds_total{device="sda"} 9653.880000000001 node_disk_io_time_seconds_total{device="sdb"} 60.730000000000004 +node_disk_io_time_seconds_total{device="sdc"} 10.73 node_disk_io_time_seconds_total{device="sr0"} 0 node_disk_io_time_seconds_total{device="vda"} 41614.592000000004 # HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. @@ -364,6 +551,7 @@ node_disk_io_time_weighted_seconds_total{device="mmcblk0p2"} 0.068 node_disk_io_time_weighted_seconds_total{device="nvme0n1"} 1032.546 node_disk_io_time_weighted_seconds_total{device="sda"} 82621.804 node_disk_io_time_weighted_seconds_total{device="sdb"} 67.07000000000001 +node_disk_io_time_weighted_seconds_total{device="sdc"} 17.07 node_disk_io_time_weighted_seconds_total{device="sr0"} 0 node_disk_io_time_weighted_seconds_total{device="vda"} 2.0778722280000001e+06 # HELP node_disk_read_bytes_total The total number of bytes read successfully. @@ -380,6 +568,7 @@ node_disk_read_bytes_total{device="mmcblk0p2"} 389120 node_disk_read_bytes_total{device="nvme0n1"} 2.377714176e+09 node_disk_read_bytes_total{device="sda"} 5.13713216512e+11 node_disk_read_bytes_total{device="sdb"} 4.944782848e+09 +node_disk_read_bytes_total{device="sdc"} 8.48782848e+08 node_disk_read_bytes_total{device="sr0"} 0 node_disk_read_bytes_total{device="vda"} 1.6727491584e+10 # HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. @@ -396,6 +585,7 @@ node_disk_read_time_seconds_total{device="mmcblk0p2"} 0.068 node_disk_read_time_seconds_total{device="nvme0n1"} 21.650000000000002 node_disk_read_time_seconds_total{device="sda"} 18492.372 node_disk_read_time_seconds_total{device="sdb"} 0.084 +node_disk_read_time_seconds_total{device="sdc"} 0.014 node_disk_read_time_seconds_total{device="sr0"} 0 node_disk_read_time_seconds_total{device="vda"} 8655.768 # HELP node_disk_reads_completed_total The total number of reads completed successfully. @@ -412,6 +602,7 @@ node_disk_reads_completed_total{device="mmcblk0p2"} 95 node_disk_reads_completed_total{device="nvme0n1"} 47114 node_disk_reads_completed_total{device="sda"} 2.5354637e+07 node_disk_reads_completed_total{device="sdb"} 326552 +node_disk_reads_completed_total{device="sdc"} 126552 node_disk_reads_completed_total{device="sr0"} 0 node_disk_reads_completed_total{device="vda"} 1.775784e+06 # HELP node_disk_reads_merged_total The total number of reads merged. @@ -428,6 +619,7 @@ node_disk_reads_merged_total{device="mmcblk0p2"} 0 node_disk_reads_merged_total{device="nvme0n1"} 4 node_disk_reads_merged_total{device="sda"} 3.4367663e+07 node_disk_reads_merged_total{device="sdb"} 841 +node_disk_reads_merged_total{device="sdc"} 141 node_disk_reads_merged_total{device="sr0"} 0 node_disk_reads_merged_total{device="vda"} 15386 # HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. @@ -444,6 +636,7 @@ node_disk_write_time_seconds_total{device="mmcblk0p2"} 0 node_disk_write_time_seconds_total{device="nvme0n1"} 1011.053 node_disk_write_time_seconds_total{device="sda"} 63877.96 node_disk_write_time_seconds_total{device="sdb"} 5.007 +node_disk_write_time_seconds_total{device="sdc"} 1.0070000000000001 node_disk_write_time_seconds_total{device="sr0"} 0 node_disk_write_time_seconds_total{device="vda"} 2.069221364e+06 # HELP node_disk_writes_completed_total The total number of writes completed successfully. @@ -460,6 +653,7 @@ node_disk_writes_completed_total{device="mmcblk0p2"} 0 node_disk_writes_completed_total{device="nvme0n1"} 1.07832e+06 node_disk_writes_completed_total{device="sda"} 2.8444756e+07 node_disk_writes_completed_total{device="sdb"} 41822 +node_disk_writes_completed_total{device="sdc"} 11822 node_disk_writes_completed_total{device="sr0"} 0 node_disk_writes_completed_total{device="vda"} 6.038856e+06 # HELP node_disk_writes_merged_total The number of writes merged. @@ -476,6 +670,7 @@ node_disk_writes_merged_total{device="mmcblk0p2"} 0 node_disk_writes_merged_total{device="nvme0n1"} 43950 node_disk_writes_merged_total{device="sda"} 1.1134226e+07 node_disk_writes_merged_total{device="sdb"} 2895 +node_disk_writes_merged_total{device="sdc"} 1895 node_disk_writes_merged_total{device="sr0"} 0 node_disk_writes_merged_total{device="vda"} 2.0711856e+07 # HELP node_disk_written_bytes_total The total number of bytes written successfully. @@ -492,8 +687,12 @@ node_disk_written_bytes_total{device="mmcblk0p2"} 0 node_disk_written_bytes_total{device="nvme0n1"} 2.0199236096e+10 node_disk_written_bytes_total{device="sda"} 2.58916880384e+11 node_disk_written_bytes_total{device="sdb"} 1.01012736e+09 +node_disk_written_bytes_total{device="sdc"} 8.852736e+07 node_disk_written_bytes_total{device="sr0"} 0 node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 +# HELP node_dmi_info A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, product_sku, product_uuid, product_version, system_vendor if provided by DMI. +# TYPE node_dmi_info gauge +node_dmi_info{bios_date="04/12/2021",bios_release="2.2",bios_vendor="Dell Inc.",bios_version="2.2.4",board_name="07PXPY",board_serial=".7N62AI2.GRTCL6944100GP.",board_vendor="Dell Inc.",board_version="A01",chassis_asset_tag="",chassis_serial="7N62AI2",chassis_vendor="Dell Inc.",chassis_version="",product_family="PowerEdge",product_name="PowerEdge R6515",product_serial="7N62AI2",product_sku="SKU=NotProvided;ModelName=PowerEdge R6515",product_uuid="83340ca8-cb49-4474-8c29-d2088ca84dd9",product_version="�[�",system_vendor="Dell Inc."} 1 # HELP node_drbd_activitylog_writes_total Number of updates of the activity log area of the meta data. # TYPE node_drbd_activitylog_writes_total counter node_drbd_activitylog_writes_total{device="drbd1"} 1100 @@ -558,8 +757,69 @@ node_edac_uncorrectable_errors_total{controller="0"} 5 # HELP node_entropy_available_bits Bits of available entropy. # TYPE node_entropy_available_bits gauge node_entropy_available_bits 1337 -# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which node_exporter was built. +# HELP node_entropy_pool_size_bits Bits of entropy pool. +# TYPE node_entropy_pool_size_bits gauge +node_entropy_pool_size_bits 4096 +# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. # TYPE node_exporter_build_info gauge +# HELP node_fibrechannel_dumped_frames_total Number of dumped frames +# TYPE node_fibrechannel_dumped_frames_total counter +node_fibrechannel_dumped_frames_total{fc_host="host1"} 0 +# HELP node_fibrechannel_error_frames_total Number of errors in frames +# TYPE node_fibrechannel_error_frames_total counter +node_fibrechannel_error_frames_total{fc_host="host0"} 0 +node_fibrechannel_error_frames_total{fc_host="host1"} 19 +# HELP node_fibrechannel_fcp_packet_aborts_total Number of aborted packets +# TYPE node_fibrechannel_fcp_packet_aborts_total counter +node_fibrechannel_fcp_packet_aborts_total{fc_host="host0"} 19 +# HELP node_fibrechannel_info Non-numeric data from /sys/class/fc_host/, value is always 1. +# TYPE node_fibrechannel_info gauge +node_fibrechannel_info{dev_loss_tmo="",fabric_name="",fc_host="host1",port_id="",port_name="",port_state="",port_type="",speed="8 Gbit",supported_classes="",supported_speeds="",symbolic_name=""} 1 +node_fibrechannel_info{dev_loss_tmo="30",fabric_name="0",fc_host="host0",port_id="000002",port_name="1000e0071bce95f2",port_state="Online",port_type="Point-To-Point (direct nport connection)",speed="16 Gbit",supported_classes="Class 3",supported_speeds="4 Gbit, 8 Gbit, 16 Gbit",symbolic_name="Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux"} 1 +# HELP node_fibrechannel_invalid_crc_total Invalid Cyclic Redundancy Check count +# TYPE node_fibrechannel_invalid_crc_total counter +node_fibrechannel_invalid_crc_total{fc_host="host0"} 2 +node_fibrechannel_invalid_crc_total{fc_host="host1"} 32 +# HELP node_fibrechannel_invalid_tx_words_total Number of invalid words transmitted by host port +# TYPE node_fibrechannel_invalid_tx_words_total counter +node_fibrechannel_invalid_tx_words_total{fc_host="host0"} 8 +node_fibrechannel_invalid_tx_words_total{fc_host="host1"} 128 +# HELP node_fibrechannel_link_failure_total Number of times the host port link has failed +# TYPE node_fibrechannel_link_failure_total counter +node_fibrechannel_link_failure_total{fc_host="host0"} 9 +node_fibrechannel_link_failure_total{fc_host="host1"} 144 +# HELP node_fibrechannel_loss_of_signal_total Number of times signal has been lost +# TYPE node_fibrechannel_loss_of_signal_total counter +node_fibrechannel_loss_of_signal_total{fc_host="host0"} 17 +node_fibrechannel_loss_of_signal_total{fc_host="host1"} 272 +# HELP node_fibrechannel_loss_of_sync_total Number of failures on either bit or transmission word boundaries +# TYPE node_fibrechannel_loss_of_sync_total counter +node_fibrechannel_loss_of_sync_total{fc_host="host0"} 16 +node_fibrechannel_loss_of_sync_total{fc_host="host1"} 256 +# HELP node_fibrechannel_nos_total Number Not_Operational Primitive Sequence received by host port +# TYPE node_fibrechannel_nos_total counter +node_fibrechannel_nos_total{fc_host="host0"} 18 +node_fibrechannel_nos_total{fc_host="host1"} 288 +# HELP node_fibrechannel_rx_frames_total Number of frames received +# TYPE node_fibrechannel_rx_frames_total counter +node_fibrechannel_rx_frames_total{fc_host="host0"} 3 +node_fibrechannel_rx_frames_total{fc_host="host1"} 48 +# HELP node_fibrechannel_rx_words_total Number of words received by host port +# TYPE node_fibrechannel_rx_words_total counter +node_fibrechannel_rx_words_total{fc_host="host0"} 4 +node_fibrechannel_rx_words_total{fc_host="host1"} 64 +# HELP node_fibrechannel_seconds_since_last_reset_total Number of seconds since last host port reset +# TYPE node_fibrechannel_seconds_since_last_reset_total counter +node_fibrechannel_seconds_since_last_reset_total{fc_host="host0"} 7 +node_fibrechannel_seconds_since_last_reset_total{fc_host="host1"} 112 +# HELP node_fibrechannel_tx_frames_total Number of frames transmitted by host port +# TYPE node_fibrechannel_tx_frames_total counter +node_fibrechannel_tx_frames_total{fc_host="host0"} 5 +node_fibrechannel_tx_frames_total{fc_host="host1"} 80 +# HELP node_fibrechannel_tx_words_total Number of words transmitted by host port +# TYPE node_fibrechannel_tx_words_total counter +node_fibrechannel_tx_words_total{fc_host="host0"} 6 +node_fibrechannel_tx_words_total{fc_host="host1"} 96 # HELP node_filefd_allocated File descriptor statistics: allocated. # TYPE node_filefd_allocated gauge node_filefd_allocated 1024 @@ -611,6 +871,10 @@ node_hwmon_fan_target_rpm{chip="nct6779",sensor="fan2"} 27000 # HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance # TYPE node_hwmon_fan_tolerance gauge node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0 +# HELP node_hwmon_freq_freq_mhz Hardware monitor for GPU frequency in MHz +# TYPE node_hwmon_freq_freq_mhz gauge +node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="mclk"} 300 +node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="sclk"} 214 # HELP node_hwmon_in_alarm Hardware sensor alarm status (in) # TYPE node_hwmon_in_alarm gauge node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0 @@ -724,18 +988,20 @@ node_hwmon_pwm_weight_temp_step_tol{chip="nct6779",sensor="pwm1"} 0 # TYPE node_hwmon_sensor_label gauge node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1 node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1 -node_hwmon_sensor_label{chip="platform_applesmc_768",label="left_side",sensor="fan1"} 1 -node_hwmon_sensor_label{chip="platform_applesmc_768",label="right_side",sensor="fan2"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_0",sensor="temp2"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_1",sensor="temp3"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_2",sensor="temp4"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_3",sensor="temp5"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="physical_id_0",sensor="temp1"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_0",sensor="temp2"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_1",sensor="temp3"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_2",sensor="temp4"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_3",sensor="temp5"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="physical_id_0",sensor="temp1"} 1 +node_hwmon_sensor_label{chip="hwmon4",label="mclk",sensor="freq2"} 1 +node_hwmon_sensor_label{chip="hwmon4",label="sclk",sensor="freq1"} 1 +node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side",sensor="fan1"} 1 +node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side",sensor="fan2"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 1",sensor="temp3"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 2",sensor="temp4"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 3",sensor="temp5"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Physical id 0",sensor="temp1"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 0",sensor="temp2"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 1",sensor="temp3"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 2",sensor="temp4"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 3",sensor="temp5"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Physical id 0",sensor="temp1"} 1 # HELP node_hwmon_temp_celsius Hardware monitor for temperature (input) # TYPE node_hwmon_temp_celsius gauge node_hwmon_temp_celsius{chip="hwmon4",sensor="temp1"} 55 @@ -1094,6 +1360,184 @@ node_ksmd_run 1 # HELP node_ksmd_sleep_seconds ksmd 'sleep_millisecs' file. # TYPE node_ksmd_sleep_seconds gauge node_ksmd_sleep_seconds 0.02 +# HELP node_lnstat_allocs_total linux network cache stats +# TYPE node_lnstat_allocs_total counter +node_lnstat_allocs_total{cpu="0",subsystem="arp_cache"} 1 +node_lnstat_allocs_total{cpu="0",subsystem="ndisc_cache"} 240 +node_lnstat_allocs_total{cpu="1",subsystem="arp_cache"} 13 +node_lnstat_allocs_total{cpu="1",subsystem="ndisc_cache"} 252 +# HELP node_lnstat_delete_list_total linux network cache stats +# TYPE node_lnstat_delete_list_total counter +node_lnstat_delete_list_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_delete_list_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_delete_list_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_delete_list_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_delete_total linux network cache stats +# TYPE node_lnstat_delete_total counter +node_lnstat_delete_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_delete_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_delete_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_delete_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_destroys_total linux network cache stats +# TYPE node_lnstat_destroys_total counter +node_lnstat_destroys_total{cpu="0",subsystem="arp_cache"} 2 +node_lnstat_destroys_total{cpu="0",subsystem="ndisc_cache"} 241 +node_lnstat_destroys_total{cpu="1",subsystem="arp_cache"} 14 +node_lnstat_destroys_total{cpu="1",subsystem="ndisc_cache"} 253 +# HELP node_lnstat_drop_total linux network cache stats +# TYPE node_lnstat_drop_total counter +node_lnstat_drop_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_drop_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_drop_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_drop_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_early_drop_total linux network cache stats +# TYPE node_lnstat_early_drop_total counter +node_lnstat_early_drop_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_early_drop_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_early_drop_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_early_drop_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_entries_total linux network cache stats +# TYPE node_lnstat_entries_total counter +node_lnstat_entries_total{cpu="0",subsystem="arp_cache"} 20 +node_lnstat_entries_total{cpu="0",subsystem="ndisc_cache"} 36 +node_lnstat_entries_total{cpu="0",subsystem="nf_conntrack"} 33 +node_lnstat_entries_total{cpu="1",subsystem="arp_cache"} 20 +node_lnstat_entries_total{cpu="1",subsystem="ndisc_cache"} 36 +node_lnstat_entries_total{cpu="1",subsystem="nf_conntrack"} 33 +node_lnstat_entries_total{cpu="2",subsystem="nf_conntrack"} 33 +node_lnstat_entries_total{cpu="3",subsystem="nf_conntrack"} 33 +# HELP node_lnstat_expect_create_total linux network cache stats +# TYPE node_lnstat_expect_create_total counter +node_lnstat_expect_create_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_expect_create_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_expect_create_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_expect_create_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_expect_delete_total linux network cache stats +# TYPE node_lnstat_expect_delete_total counter +node_lnstat_expect_delete_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_expect_delete_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_expect_delete_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_expect_delete_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_expect_new_total linux network cache stats +# TYPE node_lnstat_expect_new_total counter +node_lnstat_expect_new_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_expect_new_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_expect_new_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_expect_new_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_forced_gc_runs_total linux network cache stats +# TYPE node_lnstat_forced_gc_runs_total counter +node_lnstat_forced_gc_runs_total{cpu="0",subsystem="arp_cache"} 10 +node_lnstat_forced_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 249 +node_lnstat_forced_gc_runs_total{cpu="1",subsystem="arp_cache"} 22 +node_lnstat_forced_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 261 +# HELP node_lnstat_found_total linux network cache stats +# TYPE node_lnstat_found_total counter +node_lnstat_found_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_found_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_found_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_found_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_hash_grows_total linux network cache stats +# TYPE node_lnstat_hash_grows_total counter +node_lnstat_hash_grows_total{cpu="0",subsystem="arp_cache"} 3 +node_lnstat_hash_grows_total{cpu="0",subsystem="ndisc_cache"} 242 +node_lnstat_hash_grows_total{cpu="1",subsystem="arp_cache"} 15 +node_lnstat_hash_grows_total{cpu="1",subsystem="ndisc_cache"} 254 +# HELP node_lnstat_hits_total linux network cache stats +# TYPE node_lnstat_hits_total counter +node_lnstat_hits_total{cpu="0",subsystem="arp_cache"} 5 +node_lnstat_hits_total{cpu="0",subsystem="ndisc_cache"} 244 +node_lnstat_hits_total{cpu="1",subsystem="arp_cache"} 17 +node_lnstat_hits_total{cpu="1",subsystem="ndisc_cache"} 256 +# HELP node_lnstat_icmp_error_total linux network cache stats +# TYPE node_lnstat_icmp_error_total counter +node_lnstat_icmp_error_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_icmp_error_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_icmp_error_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_icmp_error_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_ignore_total linux network cache stats +# TYPE node_lnstat_ignore_total counter +node_lnstat_ignore_total{cpu="0",subsystem="nf_conntrack"} 22666 +node_lnstat_ignore_total{cpu="1",subsystem="nf_conntrack"} 22180 +node_lnstat_ignore_total{cpu="2",subsystem="nf_conntrack"} 22740 +node_lnstat_ignore_total{cpu="3",subsystem="nf_conntrack"} 22152 +# HELP node_lnstat_insert_failed_total linux network cache stats +# TYPE node_lnstat_insert_failed_total counter +node_lnstat_insert_failed_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_insert_failed_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_insert_failed_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_insert_failed_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_insert_total linux network cache stats +# TYPE node_lnstat_insert_total counter +node_lnstat_insert_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_insert_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_insert_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_insert_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_invalid_total linux network cache stats +# TYPE node_lnstat_invalid_total counter +node_lnstat_invalid_total{cpu="0",subsystem="nf_conntrack"} 3 +node_lnstat_invalid_total{cpu="1",subsystem="nf_conntrack"} 2 +node_lnstat_invalid_total{cpu="2",subsystem="nf_conntrack"} 1 +node_lnstat_invalid_total{cpu="3",subsystem="nf_conntrack"} 47 +# HELP node_lnstat_lookups_total linux network cache stats +# TYPE node_lnstat_lookups_total counter +node_lnstat_lookups_total{cpu="0",subsystem="arp_cache"} 4 +node_lnstat_lookups_total{cpu="0",subsystem="ndisc_cache"} 243 +node_lnstat_lookups_total{cpu="1",subsystem="arp_cache"} 16 +node_lnstat_lookups_total{cpu="1",subsystem="ndisc_cache"} 255 +# HELP node_lnstat_new_total linux network cache stats +# TYPE node_lnstat_new_total counter +node_lnstat_new_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_new_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_new_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_new_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_periodic_gc_runs_total linux network cache stats +# TYPE node_lnstat_periodic_gc_runs_total counter +node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="arp_cache"} 9 +node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 248 +node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="arp_cache"} 21 +node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 260 +# HELP node_lnstat_rcv_probes_mcast_total linux network cache stats +# TYPE node_lnstat_rcv_probes_mcast_total counter +node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="arp_cache"} 7 +node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="ndisc_cache"} 246 +node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="arp_cache"} 19 +node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="ndisc_cache"} 258 +# HELP node_lnstat_rcv_probes_ucast_total linux network cache stats +# TYPE node_lnstat_rcv_probes_ucast_total counter +node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="arp_cache"} 8 +node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="ndisc_cache"} 247 +node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="arp_cache"} 20 +node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="ndisc_cache"} 259 +# HELP node_lnstat_res_failed_total linux network cache stats +# TYPE node_lnstat_res_failed_total counter +node_lnstat_res_failed_total{cpu="0",subsystem="arp_cache"} 6 +node_lnstat_res_failed_total{cpu="0",subsystem="ndisc_cache"} 245 +node_lnstat_res_failed_total{cpu="1",subsystem="arp_cache"} 18 +node_lnstat_res_failed_total{cpu="1",subsystem="ndisc_cache"} 257 +# HELP node_lnstat_search_restart_total linux network cache stats +# TYPE node_lnstat_search_restart_total counter +node_lnstat_search_restart_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_search_restart_total{cpu="1",subsystem="nf_conntrack"} 2 +node_lnstat_search_restart_total{cpu="2",subsystem="nf_conntrack"} 1 +node_lnstat_search_restart_total{cpu="3",subsystem="nf_conntrack"} 4 +# HELP node_lnstat_searched_total linux network cache stats +# TYPE node_lnstat_searched_total counter +node_lnstat_searched_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_searched_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_searched_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_searched_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_table_fulls_total linux network cache stats +# TYPE node_lnstat_table_fulls_total counter +node_lnstat_table_fulls_total{cpu="0",subsystem="arp_cache"} 12 +node_lnstat_table_fulls_total{cpu="0",subsystem="ndisc_cache"} 251 +node_lnstat_table_fulls_total{cpu="1",subsystem="arp_cache"} 24 +node_lnstat_table_fulls_total{cpu="1",subsystem="ndisc_cache"} 263 +# HELP node_lnstat_unresolved_discards_total linux network cache stats +# TYPE node_lnstat_unresolved_discards_total counter +node_lnstat_unresolved_discards_total{cpu="0",subsystem="arp_cache"} 11 +node_lnstat_unresolved_discards_total{cpu="0",subsystem="ndisc_cache"} 250 +node_lnstat_unresolved_discards_total{cpu="1",subsystem="arp_cache"} 23 +node_lnstat_unresolved_discards_total{cpu="1",subsystem="ndisc_cache"} 262 # HELP node_load1 1m load average. # TYPE node_load1 gauge node_load1 0.21 @@ -1114,6 +1558,7 @@ node_md_blocks{device="md12"} 3.886394368e+09 node_md_blocks{device="md120"} 2.095104e+06 node_md_blocks{device="md126"} 1.855870976e+09 node_md_blocks{device="md127"} 3.12319552e+08 +node_md_blocks{device="md201"} 1.993728e+06 node_md_blocks{device="md219"} 7932 node_md_blocks{device="md3"} 5.853468288e+09 node_md_blocks{device="md4"} 4.883648e+06 @@ -1132,6 +1577,7 @@ node_md_blocks_synced{device="md12"} 3.886394368e+09 node_md_blocks_synced{device="md120"} 2.095104e+06 node_md_blocks_synced{device="md126"} 1.855870976e+09 node_md_blocks_synced{device="md127"} 3.12319552e+08 +node_md_blocks_synced{device="md201"} 114176 node_md_blocks_synced{device="md219"} 7932 node_md_blocks_synced{device="md3"} 5.853468288e+09 node_md_blocks_synced{device="md4"} 4.883648e+06 @@ -1139,6 +1585,14 @@ node_md_blocks_synced{device="md6"} 1.6775552e+07 node_md_blocks_synced{device="md7"} 7.813735424e+09 node_md_blocks_synced{device="md8"} 1.6775552e+07 node_md_blocks_synced{device="md9"} 0 +# HELP node_md_degraded Number of degraded disks on device. +# TYPE node_md_degraded gauge +node_md_degraded{device="md0"} 0 +node_md_degraded{device="md1"} 0 +node_md_degraded{device="md10"} 0 +node_md_degraded{device="md4"} 0 +node_md_degraded{device="md5"} 1 +node_md_degraded{device="md6"} 1 # HELP node_md_disks Number of active/failed/spare disks of device. # TYPE node_md_disks gauge node_md_disks{device="md0",state="active"} 2 @@ -1168,6 +1622,9 @@ node_md_disks{device="md126",state="spare"} 0 node_md_disks{device="md127",state="active"} 2 node_md_disks{device="md127",state="failed"} 0 node_md_disks{device="md127",state="spare"} 0 +node_md_disks{device="md201",state="active"} 2 +node_md_disks{device="md201",state="failed"} 0 +node_md_disks{device="md201",state="spare"} 0 node_md_disks{device="md219",state="active"} 0 node_md_disks{device="md219",state="failed"} 0 node_md_disks{device="md219",state="spare"} 3 @@ -1200,6 +1657,7 @@ node_md_disks_required{device="md12"} 2 node_md_disks_required{device="md120"} 2 node_md_disks_required{device="md126"} 2 node_md_disks_required{device="md127"} 2 +node_md_disks_required{device="md201"} 2 node_md_disks_required{device="md219"} 0 node_md_disks_required{device="md3"} 8 node_md_disks_required{device="md4"} 0 @@ -1207,69 +1665,98 @@ node_md_disks_required{device="md6"} 2 node_md_disks_required{device="md7"} 4 node_md_disks_required{device="md8"} 2 node_md_disks_required{device="md9"} 4 +# HELP node_md_raid_disks Number of raid disks on device. +# TYPE node_md_raid_disks gauge +node_md_raid_disks{device="md0"} 2 +node_md_raid_disks{device="md1"} 2 +node_md_raid_disks{device="md10"} 4 +node_md_raid_disks{device="md4"} 3 +node_md_raid_disks{device="md5"} 3 +node_md_raid_disks{device="md6"} 4 # HELP node_md_state Indicates the state of md-device. # TYPE node_md_state gauge node_md_state{device="md0",state="active"} 1 +node_md_state{device="md0",state="check"} 0 node_md_state{device="md0",state="inactive"} 0 node_md_state{device="md0",state="recovering"} 0 node_md_state{device="md0",state="resync"} 0 node_md_state{device="md00",state="active"} 1 +node_md_state{device="md00",state="check"} 0 node_md_state{device="md00",state="inactive"} 0 node_md_state{device="md00",state="recovering"} 0 node_md_state{device="md00",state="resync"} 0 node_md_state{device="md10",state="active"} 1 +node_md_state{device="md10",state="check"} 0 node_md_state{device="md10",state="inactive"} 0 node_md_state{device="md10",state="recovering"} 0 node_md_state{device="md10",state="resync"} 0 node_md_state{device="md101",state="active"} 1 +node_md_state{device="md101",state="check"} 0 node_md_state{device="md101",state="inactive"} 0 node_md_state{device="md101",state="recovering"} 0 node_md_state{device="md101",state="resync"} 0 node_md_state{device="md11",state="active"} 0 +node_md_state{device="md11",state="check"} 0 node_md_state{device="md11",state="inactive"} 0 node_md_state{device="md11",state="recovering"} 0 node_md_state{device="md11",state="resync"} 1 node_md_state{device="md12",state="active"} 1 +node_md_state{device="md12",state="check"} 0 node_md_state{device="md12",state="inactive"} 0 node_md_state{device="md12",state="recovering"} 0 node_md_state{device="md12",state="resync"} 0 node_md_state{device="md120",state="active"} 1 +node_md_state{device="md120",state="check"} 0 node_md_state{device="md120",state="inactive"} 0 node_md_state{device="md120",state="recovering"} 0 node_md_state{device="md120",state="resync"} 0 node_md_state{device="md126",state="active"} 1 +node_md_state{device="md126",state="check"} 0 node_md_state{device="md126",state="inactive"} 0 node_md_state{device="md126",state="recovering"} 0 node_md_state{device="md126",state="resync"} 0 node_md_state{device="md127",state="active"} 1 +node_md_state{device="md127",state="check"} 0 node_md_state{device="md127",state="inactive"} 0 node_md_state{device="md127",state="recovering"} 0 node_md_state{device="md127",state="resync"} 0 +node_md_state{device="md201",state="active"} 0 +node_md_state{device="md201",state="check"} 1 +node_md_state{device="md201",state="inactive"} 0 +node_md_state{device="md201",state="recovering"} 0 +node_md_state{device="md201",state="resync"} 0 node_md_state{device="md219",state="active"} 0 +node_md_state{device="md219",state="check"} 0 node_md_state{device="md219",state="inactive"} 1 node_md_state{device="md219",state="recovering"} 0 node_md_state{device="md219",state="resync"} 0 node_md_state{device="md3",state="active"} 1 +node_md_state{device="md3",state="check"} 0 node_md_state{device="md3",state="inactive"} 0 node_md_state{device="md3",state="recovering"} 0 node_md_state{device="md3",state="resync"} 0 node_md_state{device="md4",state="active"} 0 +node_md_state{device="md4",state="check"} 0 node_md_state{device="md4",state="inactive"} 1 node_md_state{device="md4",state="recovering"} 0 node_md_state{device="md4",state="resync"} 0 node_md_state{device="md6",state="active"} 0 +node_md_state{device="md6",state="check"} 0 node_md_state{device="md6",state="inactive"} 0 node_md_state{device="md6",state="recovering"} 1 node_md_state{device="md6",state="resync"} 0 node_md_state{device="md7",state="active"} 1 +node_md_state{device="md7",state="check"} 0 node_md_state{device="md7",state="inactive"} 0 node_md_state{device="md7",state="recovering"} 0 node_md_state{device="md7",state="resync"} 0 node_md_state{device="md8",state="active"} 0 +node_md_state{device="md8",state="check"} 0 node_md_state{device="md8",state="inactive"} 0 node_md_state{device="md8",state="recovering"} 0 node_md_state{device="md8",state="resync"} 1 node_md_state{device="md9",state="active"} 0 +node_md_state{device="md9",state="check"} 0 node_md_state{device="md9",state="inactive"} 0 node_md_state{device="md9",state="recovering"} 0 node_md_state{device="md9",state="resync"} 1 @@ -1874,6 +2361,15 @@ node_netstat_TcpExt_SyncookiesRecv 0 # HELP node_netstat_TcpExt_SyncookiesSent Statistic TcpExtSyncookiesSent. # TYPE node_netstat_TcpExt_SyncookiesSent untyped node_netstat_TcpExt_SyncookiesSent 0 +# HELP node_netstat_TcpExt_TCPOFOQueue Statistic TcpExtTCPOFOQueue. +# TYPE node_netstat_TcpExt_TCPOFOQueue untyped +node_netstat_TcpExt_TCPOFOQueue 42 +# HELP node_netstat_TcpExt_TCPRcvQDrop Statistic TcpExtTCPRcvQDrop. +# TYPE node_netstat_TcpExt_TCPRcvQDrop untyped +node_netstat_TcpExt_TCPRcvQDrop 131 +# HELP node_netstat_TcpExt_TCPTimeouts Statistic TcpExtTCPTimeouts. +# TYPE node_netstat_TcpExt_TCPTimeouts untyped +node_netstat_TcpExt_TCPTimeouts 115 # HELP node_netstat_Tcp_ActiveOpens Statistic TcpActiveOpens. # TYPE node_netstat_Tcp_ActiveOpens untyped node_netstat_Tcp_ActiveOpens 3556 @@ -1940,254 +2436,127 @@ node_netstat_Udp_RcvbufErrors 9 # HELP node_netstat_Udp_SndbufErrors Statistic UdpSndbufErrors. # TYPE node_netstat_Udp_SndbufErrors untyped node_netstat_Udp_SndbufErrors 8 -# HELP node_network_address_assign_type address_assign_type value of /sys/class/net/. +# HELP node_network_address_assign_type Network device property: address_assign_type # TYPE node_network_address_assign_type gauge +node_network_address_assign_type{device="bond0"} 3 node_network_address_assign_type{device="eth0"} 3 -# HELP node_network_carrier carrier value of /sys/class/net/. +# HELP node_network_carrier Network device property: carrier # TYPE node_network_carrier gauge +node_network_carrier{device="bond0"} 1 node_network_carrier{device="eth0"} 1 -# HELP node_network_carrier_changes_total carrier_changes_total value of /sys/class/net/. +# HELP node_network_carrier_changes_total Network device property: carrier_changes_total # TYPE node_network_carrier_changes_total counter +node_network_carrier_changes_total{device="bond0"} 2 node_network_carrier_changes_total{device="eth0"} 2 -# HELP node_network_carrier_down_changes_total carrier_down_changes_total value of /sys/class/net/. +# HELP node_network_carrier_down_changes_total Network device property: carrier_down_changes_total # TYPE node_network_carrier_down_changes_total counter +node_network_carrier_down_changes_total{device="bond0"} 1 node_network_carrier_down_changes_total{device="eth0"} 1 -# HELP node_network_carrier_up_changes_total carrier_up_changes_total value of /sys/class/net/. +# HELP node_network_carrier_up_changes_total Network device property: carrier_up_changes_total # TYPE node_network_carrier_up_changes_total counter +node_network_carrier_up_changes_total{device="bond0"} 1 node_network_carrier_up_changes_total{device="eth0"} 1 -# HELP node_network_device_id device_id value of /sys/class/net/. +# HELP node_network_device_id Network device property: device_id # TYPE node_network_device_id gauge +node_network_device_id{device="bond0"} 32 node_network_device_id{device="eth0"} 32 -# HELP node_network_dormant dormant value of /sys/class/net/. +# HELP node_network_dormant Network device property: dormant # TYPE node_network_dormant gauge +node_network_dormant{device="bond0"} 1 node_network_dormant{device="eth0"} 1 -# HELP node_network_flags flags value of /sys/class/net/. +# HELP node_network_flags Network device property: flags # TYPE node_network_flags gauge +node_network_flags{device="bond0"} 4867 node_network_flags{device="eth0"} 4867 -# HELP node_network_iface_id iface_id value of /sys/class/net/. +# HELP node_network_iface_id Network device property: iface_id # TYPE node_network_iface_id gauge +node_network_iface_id{device="bond0"} 2 node_network_iface_id{device="eth0"} 2 -# HELP node_network_iface_link iface_link value of /sys/class/net/. +# HELP node_network_iface_link Network device property: iface_link # TYPE node_network_iface_link gauge +node_network_iface_link{device="bond0"} 2 node_network_iface_link{device="eth0"} 2 -# HELP node_network_iface_link_mode iface_link_mode value of /sys/class/net/. +# HELP node_network_iface_link_mode Network device property: iface_link_mode # TYPE node_network_iface_link_mode gauge +node_network_iface_link_mode{device="bond0"} 1 node_network_iface_link_mode{device="eth0"} 1 # HELP node_network_info Non-numeric data from /sys/class/net/, value is always 1. # TYPE node_network_info gauge -node_network_info{address="01:01:01:01:01:01",broadcast="ff:ff:ff:ff:ff:ff",device="eth0",duplex="full",ifalias="",operstate="up"} 1 -# HELP node_network_mtu_bytes mtu_bytes value of /sys/class/net/. +node_network_info{address="01:01:01:01:01:01",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="bond0",duplex="full",ifalias="",operstate="up"} 1 +node_network_info{address="01:01:01:01:01:01",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="eth0",duplex="full",ifalias="",operstate="up"} 1 +# HELP node_network_mtu_bytes Network device property: mtu_bytes # TYPE node_network_mtu_bytes gauge +node_network_mtu_bytes{device="bond0"} 1500 node_network_mtu_bytes{device="eth0"} 1500 -# HELP node_network_name_assign_type name_assign_type value of /sys/class/net/. +# HELP node_network_name_assign_type Network device property: name_assign_type # TYPE node_network_name_assign_type gauge +node_network_name_assign_type{device="bond0"} 2 node_network_name_assign_type{device="eth0"} 2 -# HELP node_network_net_dev_group net_dev_group value of /sys/class/net/. +# HELP node_network_net_dev_group Network device property: net_dev_group # TYPE node_network_net_dev_group gauge +node_network_net_dev_group{device="bond0"} 0 node_network_net_dev_group{device="eth0"} 0 -# HELP node_network_protocol_type protocol_type value of /sys/class/net/. +# HELP node_network_protocol_type Network device property: protocol_type # TYPE node_network_protocol_type gauge +node_network_protocol_type{device="bond0"} 1 node_network_protocol_type{device="eth0"} 1 # HELP node_network_receive_bytes_total Network device statistic receive_bytes. # TYPE node_network_receive_bytes_total counter -node_network_receive_bytes_total{device="docker0"} 6.4910168e+07 -node_network_receive_bytes_total{device="eth0"} 6.8210035552e+10 -node_network_receive_bytes_total{device="flannel.1"} 1.8144009813e+10 -node_network_receive_bytes_total{device="ibr10:30"} 0 -node_network_receive_bytes_total{device="lo"} 4.35303245e+08 -node_network_receive_bytes_total{device="lxcbr0"} 0 -node_network_receive_bytes_total{device="tun0"} 1888 -node_network_receive_bytes_total{device="veth4B09XN"} 648 -node_network_receive_bytes_total{device="wlan0"} 1.0437182923e+10 -node_network_receive_bytes_total{device="💩0"} 5.7750104e+07 # HELP node_network_receive_compressed_total Network device statistic receive_compressed. # TYPE node_network_receive_compressed_total counter -node_network_receive_compressed_total{device="docker0"} 0 -node_network_receive_compressed_total{device="eth0"} 0 -node_network_receive_compressed_total{device="flannel.1"} 0 -node_network_receive_compressed_total{device="ibr10:30"} 0 node_network_receive_compressed_total{device="lo"} 0 -node_network_receive_compressed_total{device="lxcbr0"} 0 -node_network_receive_compressed_total{device="tun0"} 0 -node_network_receive_compressed_total{device="veth4B09XN"} 0 -node_network_receive_compressed_total{device="wlan0"} 0 -node_network_receive_compressed_total{device="💩0"} 0 # HELP node_network_receive_drop_total Network device statistic receive_drop. # TYPE node_network_receive_drop_total counter -node_network_receive_drop_total{device="docker0"} 0 -node_network_receive_drop_total{device="eth0"} 0 -node_network_receive_drop_total{device="flannel.1"} 0 -node_network_receive_drop_total{device="ibr10:30"} 0 node_network_receive_drop_total{device="lo"} 0 -node_network_receive_drop_total{device="lxcbr0"} 0 -node_network_receive_drop_total{device="tun0"} 0 -node_network_receive_drop_total{device="veth4B09XN"} 0 -node_network_receive_drop_total{device="wlan0"} 0 -node_network_receive_drop_total{device="💩0"} 0 # HELP node_network_receive_errs_total Network device statistic receive_errs. # TYPE node_network_receive_errs_total counter -node_network_receive_errs_total{device="docker0"} 0 -node_network_receive_errs_total{device="eth0"} 0 -node_network_receive_errs_total{device="flannel.1"} 0 -node_network_receive_errs_total{device="ibr10:30"} 0 node_network_receive_errs_total{device="lo"} 0 -node_network_receive_errs_total{device="lxcbr0"} 0 -node_network_receive_errs_total{device="tun0"} 0 -node_network_receive_errs_total{device="veth4B09XN"} 0 -node_network_receive_errs_total{device="wlan0"} 0 -node_network_receive_errs_total{device="💩0"} 0 # HELP node_network_receive_fifo_total Network device statistic receive_fifo. # TYPE node_network_receive_fifo_total counter -node_network_receive_fifo_total{device="docker0"} 0 -node_network_receive_fifo_total{device="eth0"} 0 -node_network_receive_fifo_total{device="flannel.1"} 0 -node_network_receive_fifo_total{device="ibr10:30"} 0 node_network_receive_fifo_total{device="lo"} 0 -node_network_receive_fifo_total{device="lxcbr0"} 0 -node_network_receive_fifo_total{device="tun0"} 0 -node_network_receive_fifo_total{device="veth4B09XN"} 0 -node_network_receive_fifo_total{device="wlan0"} 0 -node_network_receive_fifo_total{device="💩0"} 0 # HELP node_network_receive_frame_total Network device statistic receive_frame. # TYPE node_network_receive_frame_total counter -node_network_receive_frame_total{device="docker0"} 0 -node_network_receive_frame_total{device="eth0"} 0 -node_network_receive_frame_total{device="flannel.1"} 0 -node_network_receive_frame_total{device="ibr10:30"} 0 node_network_receive_frame_total{device="lo"} 0 -node_network_receive_frame_total{device="lxcbr0"} 0 -node_network_receive_frame_total{device="tun0"} 0 -node_network_receive_frame_total{device="veth4B09XN"} 0 -node_network_receive_frame_total{device="wlan0"} 0 -node_network_receive_frame_total{device="💩0"} 0 # HELP node_network_receive_multicast_total Network device statistic receive_multicast. # TYPE node_network_receive_multicast_total counter -node_network_receive_multicast_total{device="docker0"} 0 -node_network_receive_multicast_total{device="eth0"} 0 -node_network_receive_multicast_total{device="flannel.1"} 0 -node_network_receive_multicast_total{device="ibr10:30"} 0 node_network_receive_multicast_total{device="lo"} 0 -node_network_receive_multicast_total{device="lxcbr0"} 0 -node_network_receive_multicast_total{device="tun0"} 0 -node_network_receive_multicast_total{device="veth4B09XN"} 0 -node_network_receive_multicast_total{device="wlan0"} 0 -node_network_receive_multicast_total{device="💩0"} 72 +# HELP node_network_receive_nohandler_total Network device statistic receive_nohandler. +# TYPE node_network_receive_nohandler_total counter +node_network_receive_nohandler_total{device="lo"} 0 # HELP node_network_receive_packets_total Network device statistic receive_packets. # TYPE node_network_receive_packets_total counter -node_network_receive_packets_total{device="docker0"} 1.065585e+06 -node_network_receive_packets_total{device="eth0"} 5.20993275e+08 -node_network_receive_packets_total{device="flannel.1"} 2.28499337e+08 -node_network_receive_packets_total{device="ibr10:30"} 0 -node_network_receive_packets_total{device="lo"} 1.832522e+06 -node_network_receive_packets_total{device="lxcbr0"} 0 -node_network_receive_packets_total{device="tun0"} 24 -node_network_receive_packets_total{device="veth4B09XN"} 8 -node_network_receive_packets_total{device="wlan0"} 1.3899359e+07 -node_network_receive_packets_total{device="💩0"} 105557 -# HELP node_network_speed_bytes speed_bytes value of /sys/class/net/. +# HELP node_network_speed_bytes Network device property: speed_bytes # TYPE node_network_speed_bytes gauge node_network_speed_bytes{device="eth0"} 1.25e+08 # HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. # TYPE node_network_transmit_bytes_total counter -node_network_transmit_bytes_total{device="docker0"} 2.681662018e+09 -node_network_transmit_bytes_total{device="eth0"} 9.315587528e+09 -node_network_transmit_bytes_total{device="flannel.1"} 2.0758990068e+10 -node_network_transmit_bytes_total{device="ibr10:30"} 0 -node_network_transmit_bytes_total{device="lo"} 4.35303245e+08 -node_network_transmit_bytes_total{device="lxcbr0"} 2.630299e+06 -node_network_transmit_bytes_total{device="tun0"} 67120 -node_network_transmit_bytes_total{device="veth4B09XN"} 1.943284e+06 -node_network_transmit_bytes_total{device="wlan0"} 2.85164936e+09 -node_network_transmit_bytes_total{device="💩0"} 4.04570255e+08 # HELP node_network_transmit_carrier_total Network device statistic transmit_carrier. # TYPE node_network_transmit_carrier_total counter -node_network_transmit_carrier_total{device="docker0"} 0 -node_network_transmit_carrier_total{device="eth0"} 0 -node_network_transmit_carrier_total{device="flannel.1"} 0 -node_network_transmit_carrier_total{device="ibr10:30"} 0 node_network_transmit_carrier_total{device="lo"} 0 -node_network_transmit_carrier_total{device="lxcbr0"} 0 -node_network_transmit_carrier_total{device="tun0"} 0 -node_network_transmit_carrier_total{device="veth4B09XN"} 0 -node_network_transmit_carrier_total{device="wlan0"} 0 -node_network_transmit_carrier_total{device="💩0"} 0 # HELP node_network_transmit_colls_total Network device statistic transmit_colls. # TYPE node_network_transmit_colls_total counter -node_network_transmit_colls_total{device="docker0"} 0 -node_network_transmit_colls_total{device="eth0"} 0 -node_network_transmit_colls_total{device="flannel.1"} 0 -node_network_transmit_colls_total{device="ibr10:30"} 0 node_network_transmit_colls_total{device="lo"} 0 -node_network_transmit_colls_total{device="lxcbr0"} 0 -node_network_transmit_colls_total{device="tun0"} 0 -node_network_transmit_colls_total{device="veth4B09XN"} 0 -node_network_transmit_colls_total{device="wlan0"} 0 -node_network_transmit_colls_total{device="💩0"} 0 # HELP node_network_transmit_compressed_total Network device statistic transmit_compressed. # TYPE node_network_transmit_compressed_total counter -node_network_transmit_compressed_total{device="docker0"} 0 -node_network_transmit_compressed_total{device="eth0"} 0 -node_network_transmit_compressed_total{device="flannel.1"} 0 -node_network_transmit_compressed_total{device="ibr10:30"} 0 node_network_transmit_compressed_total{device="lo"} 0 -node_network_transmit_compressed_total{device="lxcbr0"} 0 -node_network_transmit_compressed_total{device="tun0"} 0 -node_network_transmit_compressed_total{device="veth4B09XN"} 0 -node_network_transmit_compressed_total{device="wlan0"} 0 -node_network_transmit_compressed_total{device="💩0"} 0 # HELP node_network_transmit_drop_total Network device statistic transmit_drop. # TYPE node_network_transmit_drop_total counter -node_network_transmit_drop_total{device="docker0"} 0 -node_network_transmit_drop_total{device="eth0"} 0 -node_network_transmit_drop_total{device="flannel.1"} 64 -node_network_transmit_drop_total{device="ibr10:30"} 0 node_network_transmit_drop_total{device="lo"} 0 -node_network_transmit_drop_total{device="lxcbr0"} 0 -node_network_transmit_drop_total{device="tun0"} 0 -node_network_transmit_drop_total{device="veth4B09XN"} 0 -node_network_transmit_drop_total{device="wlan0"} 0 -node_network_transmit_drop_total{device="💩0"} 0 # HELP node_network_transmit_errs_total Network device statistic transmit_errs. # TYPE node_network_transmit_errs_total counter -node_network_transmit_errs_total{device="docker0"} 0 -node_network_transmit_errs_total{device="eth0"} 0 -node_network_transmit_errs_total{device="flannel.1"} 0 -node_network_transmit_errs_total{device="ibr10:30"} 0 node_network_transmit_errs_total{device="lo"} 0 -node_network_transmit_errs_total{device="lxcbr0"} 0 -node_network_transmit_errs_total{device="tun0"} 0 -node_network_transmit_errs_total{device="veth4B09XN"} 0 -node_network_transmit_errs_total{device="wlan0"} 0 -node_network_transmit_errs_total{device="💩0"} 0 # HELP node_network_transmit_fifo_total Network device statistic transmit_fifo. # TYPE node_network_transmit_fifo_total counter -node_network_transmit_fifo_total{device="docker0"} 0 -node_network_transmit_fifo_total{device="eth0"} 0 -node_network_transmit_fifo_total{device="flannel.1"} 0 -node_network_transmit_fifo_total{device="ibr10:30"} 0 node_network_transmit_fifo_total{device="lo"} 0 -node_network_transmit_fifo_total{device="lxcbr0"} 0 -node_network_transmit_fifo_total{device="tun0"} 0 -node_network_transmit_fifo_total{device="veth4B09XN"} 0 -node_network_transmit_fifo_total{device="wlan0"} 0 -node_network_transmit_fifo_total{device="💩0"} 0 # HELP node_network_transmit_packets_total Network device statistic transmit_packets. # TYPE node_network_transmit_packets_total counter -node_network_transmit_packets_total{device="docker0"} 1.929779e+06 -node_network_transmit_packets_total{device="eth0"} 4.3451486e+07 -node_network_transmit_packets_total{device="flannel.1"} 2.58369223e+08 -node_network_transmit_packets_total{device="ibr10:30"} 0 -node_network_transmit_packets_total{device="lo"} 1.832522e+06 -node_network_transmit_packets_total{device="lxcbr0"} 28339 -node_network_transmit_packets_total{device="tun0"} 934 -node_network_transmit_packets_total{device="veth4B09XN"} 10640 -node_network_transmit_packets_total{device="wlan0"} 1.17262e+07 -node_network_transmit_packets_total{device="💩0"} 304261 -# HELP node_network_transmit_queue_length transmit_queue_length value of /sys/class/net/. +# HELP node_network_transmit_queue_length Network device property: transmit_queue_length # TYPE node_network_transmit_queue_length gauge +node_network_transmit_queue_length{device="bond0"} 1000 node_network_transmit_queue_length{device="eth0"} 1000 # HELP node_network_up Value is 1 if operstate is 'up', 0 otherwise. # TYPE node_network_up gauge +node_network_up{device="bond0"} 1 node_network_up{device="eth0"} 1 # HELP node_nf_conntrack_entries Number of currently allocated flow entries for connection tracking. # TYPE node_nf_conntrack_entries gauge @@ -2195,6 +2564,30 @@ node_nf_conntrack_entries 123 # HELP node_nf_conntrack_entries_limit Maximum size of connection tracking table. # TYPE node_nf_conntrack_entries_limit gauge node_nf_conntrack_entries_limit 65536 +# HELP node_nf_conntrack_stat_drop Number of packets dropped due to conntrack failure. +# TYPE node_nf_conntrack_stat_drop gauge +node_nf_conntrack_stat_drop 0 +# HELP node_nf_conntrack_stat_early_drop Number of dropped conntrack entries to make room for new ones, if maximum table size was reached. +# TYPE node_nf_conntrack_stat_early_drop gauge +node_nf_conntrack_stat_early_drop 0 +# HELP node_nf_conntrack_stat_found Number of searched entries which were successful. +# TYPE node_nf_conntrack_stat_found gauge +node_nf_conntrack_stat_found 0 +# HELP node_nf_conntrack_stat_ignore Number of packets seen which are already connected to a conntrack entry. +# TYPE node_nf_conntrack_stat_ignore gauge +node_nf_conntrack_stat_ignore 89738 +# HELP node_nf_conntrack_stat_insert Number of entries inserted into the list. +# TYPE node_nf_conntrack_stat_insert gauge +node_nf_conntrack_stat_insert 0 +# HELP node_nf_conntrack_stat_insert_failed Number of entries for which list insertion was attempted but failed. +# TYPE node_nf_conntrack_stat_insert_failed gauge +node_nf_conntrack_stat_insert_failed 0 +# HELP node_nf_conntrack_stat_invalid Number of packets seen which can not be tracked. +# TYPE node_nf_conntrack_stat_invalid gauge +node_nf_conntrack_stat_invalid 53 +# HELP node_nf_conntrack_stat_search_restart Number of conntrack table lookups which had to be restarted due to hashtable resizes. +# TYPE node_nf_conntrack_stat_search_restart gauge +node_nf_conntrack_stat_search_restart 7 # HELP node_nfs_connections_total Total number of NFSd TCP connections. # TYPE node_nfs_connections_total counter node_nfs_connections_total 45 @@ -2409,13 +2802,16 @@ node_nfsd_requests_total{method="SecInfo",proto="4"} 0 node_nfsd_requests_total{method="SetAttr",proto="2"} 0 node_nfsd_requests_total{method="SetAttr",proto="3"} 0 node_nfsd_requests_total{method="SetAttr",proto="4"} 0 +node_nfsd_requests_total{method="SetClientID",proto="4"} 3 +node_nfsd_requests_total{method="SetClientIDConfirm",proto="4"} 3 node_nfsd_requests_total{method="SymLink",proto="2"} 0 node_nfsd_requests_total{method="SymLink",proto="3"} 0 -node_nfsd_requests_total{method="Verify",proto="4"} 3 +node_nfsd_requests_total{method="Verify",proto="4"} 0 +node_nfsd_requests_total{method="WdelegGetattr",proto="4"} 15 node_nfsd_requests_total{method="WrCache",proto="2"} 0 node_nfsd_requests_total{method="Write",proto="2"} 0 node_nfsd_requests_total{method="Write",proto="3"} 0 -node_nfsd_requests_total{method="Write",proto="4"} 3 +node_nfsd_requests_total{method="Write",proto="4"} 0 # HELP node_nfsd_rpc_errors_total Total number of NFSd RPC errors by error type. # TYPE node_nfsd_rpc_errors_total counter node_nfsd_rpc_errors_total{error="auth"} 2 @@ -2427,6 +2823,91 @@ node_nfsd_server_rpcs_total 18628 # HELP node_nfsd_server_threads Total number of NFSd kernel threads that are running. # TYPE node_nfsd_server_threads gauge node_nfsd_server_threads 8 +# HELP node_nvme_info Non-numeric data from /sys/class/nvme/, value is always 1. +# TYPE node_nvme_info gauge +node_nvme_info{device="nvme0",firmware_revision="1B2QEXP7",model="Samsung SSD 970 PRO 512GB",serial="S680HF8N190894I",state="live"} 1 +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_pcidevice_current_link_transfers_per_second Value of current link's transfers per second (T/s) +# TYPE node_pcidevice_current_link_transfers_per_second gauge +node_pcidevice_current_link_transfers_per_second{bus="00",device="02",function="1",segment="0000"} 8e+09 +node_pcidevice_current_link_transfers_per_second{bus="01",device="00",function="0",segment="0000"} 8e+09 +node_pcidevice_current_link_transfers_per_second{bus="45",device="00",function="0",segment="0000"} 5e+09 +# HELP node_pcidevice_current_link_width Value of current link's width (number of lanes) +# TYPE node_pcidevice_current_link_width gauge +node_pcidevice_current_link_width{bus="00",device="02",function="1",segment="0000"} 4 +node_pcidevice_current_link_width{bus="01",device="00",function="0",segment="0000"} 4 +node_pcidevice_current_link_width{bus="45",device="00",function="0",segment="0000"} 4 +# HELP node_pcidevice_d3cold_allowed Whether the PCIe device supports D3cold power state (0/1). +# TYPE node_pcidevice_d3cold_allowed gauge +node_pcidevice_d3cold_allowed{bus="00",device="02",function="1",segment="0000"} 1 +node_pcidevice_d3cold_allowed{bus="01",device="00",function="0",segment="0000"} 1 +node_pcidevice_d3cold_allowed{bus="45",device="00",function="0",segment="0000"} 1 +# HELP node_pcidevice_info Non-numeric data from /sys/bus/pci/devices/, value is always 1. +# TYPE node_pcidevice_info gauge +node_pcidevice_info{bus="00",class_id="0x060400",device="02",device_id="0x1634",function="1",parent_bus="*",parent_device="*",parent_function="*",parent_segment="*",revision="0x00",segment="0000",subsystem_device_id="0x5095",subsystem_vendor_id="0x17aa",vendor_id="0x1022"} 1 +node_pcidevice_info{bus="01",class_id="0x010802",device="00",device_id="0x540a",function="0",parent_bus="00",parent_device="02",parent_function="1",parent_segment="0000",revision="0x01",segment="0000",subsystem_device_id="0x5021",subsystem_vendor_id="0xc0a9",vendor_id="0xc0a9"} 1 +node_pcidevice_info{bus="45",class_id="0x020000",device="00",device_id="0x1521",function="0",parent_bus="40",parent_device="01",parent_function="3",parent_segment="0000",revision="0x01",segment="0000",subsystem_device_id="0x00a3",subsystem_vendor_id="0x8086",vendor_id="0x8086"} 1 +# HELP node_pcidevice_max_link_transfers_per_second Value of maximum link's transfers per second (T/s) +# TYPE node_pcidevice_max_link_transfers_per_second gauge +node_pcidevice_max_link_transfers_per_second{bus="00",device="02",function="1",segment="0000"} 8e+09 +node_pcidevice_max_link_transfers_per_second{bus="01",device="00",function="0",segment="0000"} 1.6e+10 +node_pcidevice_max_link_transfers_per_second{bus="45",device="00",function="0",segment="0000"} 5e+09 +# HELP node_pcidevice_max_link_width Value of maximum link's width (number of lanes) +# TYPE node_pcidevice_max_link_width gauge +node_pcidevice_max_link_width{bus="00",device="02",function="1",segment="0000"} 8 +node_pcidevice_max_link_width{bus="01",device="00",function="0",segment="0000"} 4 +node_pcidevice_max_link_width{bus="45",device="00",function="0",segment="0000"} 4 +# HELP node_pcidevice_numa_node NUMA node number for the PCI device. -1 indicates unknown or not available. +# TYPE node_pcidevice_numa_node gauge +node_pcidevice_numa_node{bus="45",device="00",function="0",segment="0000"} 0 +# HELP node_pcidevice_power_state PCIe device power state, one of: D0, D1, D2, D3hot, D3cold, unknown or error. +# TYPE node_pcidevice_power_state gauge +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D0"} 1 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D1"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D2"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D3cold"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D3hot"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="error"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="unknown"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D0"} 1 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D1"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D2"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D3cold"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D3hot"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="error"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="unknown"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D0"} 1 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D1"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D2"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D3cold"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D3hot"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="error"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="unknown"} 0 +# HELP node_pcidevice_sriov_drivers_autoprobe Whether SR-IOV drivers autoprobe is enabled for the device (0/1). +# TYPE node_pcidevice_sriov_drivers_autoprobe gauge +node_pcidevice_sriov_drivers_autoprobe{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_drivers_autoprobe{bus="01",device="00",function="0",segment="0000"} 1 +node_pcidevice_sriov_drivers_autoprobe{bus="45",device="00",function="0",segment="0000"} 1 +# HELP node_pcidevice_sriov_numvfs Number of Virtual Functions (VFs) currently enabled for SR-IOV. +# TYPE node_pcidevice_sriov_numvfs gauge +node_pcidevice_sriov_numvfs{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_numvfs{bus="01",device="00",function="0",segment="0000"} 4 +node_pcidevice_sriov_numvfs{bus="45",device="00",function="0",segment="0000"} 0 +# HELP node_pcidevice_sriov_totalvfs Total number of Virtual Functions (VFs) supported by the device. +# TYPE node_pcidevice_sriov_totalvfs gauge +node_pcidevice_sriov_totalvfs{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_totalvfs{bus="01",device="00",function="0",segment="0000"} 8 +node_pcidevice_sriov_totalvfs{bus="45",device="00",function="0",segment="0000"} 7 +# HELP node_pcidevice_sriov_vf_total_msix Total number of MSI-X vectors for Virtual Functions. +# TYPE node_pcidevice_sriov_vf_total_msix gauge +node_pcidevice_sriov_vf_total_msix{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_vf_total_msix{bus="01",device="00",function="0",segment="0000"} 16 +node_pcidevice_sriov_vf_total_msix{bus="45",device="00",function="0",segment="0000"} 0 # HELP node_power_supply_capacity capacity value of /sys/class/power_supply/. # TYPE node_power_supply_capacity gauge node_power_supply_capacity{power_supply="BAT0"} 81 @@ -2435,32 +2916,32 @@ node_power_supply_capacity{power_supply="BAT0"} 81 node_power_supply_cyclecount{power_supply="BAT0"} 0 # HELP node_power_supply_energy_full energy_full value of /sys/class/power_supply/. # TYPE node_power_supply_energy_full gauge -node_power_supply_energy_full{power_supply="BAT0"} 4.507e+07 +node_power_supply_energy_full{power_supply="BAT0"} 45.07 # HELP node_power_supply_energy_full_design energy_full_design value of /sys/class/power_supply/. # TYPE node_power_supply_energy_full_design gauge -node_power_supply_energy_full_design{power_supply="BAT0"} 4.752e+07 -# HELP node_power_supply_energy_now energy_now value of /sys/class/power_supply/. -# TYPE node_power_supply_energy_now gauge -node_power_supply_energy_now{power_supply="BAT0"} 3.658e+07 +node_power_supply_energy_full_design{power_supply="BAT0"} 47.52 +# HELP node_power_supply_energy_watthour energy_watthour value of /sys/class/power_supply/. +# TYPE node_power_supply_energy_watthour gauge +node_power_supply_energy_watthour{power_supply="BAT0"} 36.58 # HELP node_power_supply_info info of /sys/class/power_supply/. # TYPE node_power_supply_info gauge node_power_supply_info{power_supply="AC",type="Mains"} 1 -node_power_supply_info{capacity_level="Normal",manufacturer="LGC",model_name="LNV-45N1",power_supply="BAT0",serial_number="38109",status="Discharging",technology="Li-ion",type="Battery"} 1 +node_power_supply_info{capacity_level="Normal",manufacturer="LGC",model_name="LNV-45N1��",power_supply="BAT0",serial_number="38109",status="Discharging",technology="Li-ion",type="Battery"} 1 # HELP node_power_supply_online online value of /sys/class/power_supply/. # TYPE node_power_supply_online gauge node_power_supply_online{power_supply="AC"} 0 -# HELP node_power_supply_power_now power_now value of /sys/class/power_supply/. -# TYPE node_power_supply_power_now gauge -node_power_supply_power_now{power_supply="BAT0"} 5.002e+06 +# HELP node_power_supply_power_watt power_watt value of /sys/class/power_supply/. +# TYPE node_power_supply_power_watt gauge +node_power_supply_power_watt{power_supply="BAT0"} 5.002 # HELP node_power_supply_present present value of /sys/class/power_supply/. # TYPE node_power_supply_present gauge node_power_supply_present{power_supply="BAT0"} 1 # HELP node_power_supply_voltage_min_design voltage_min_design value of /sys/class/power_supply/. # TYPE node_power_supply_voltage_min_design gauge -node_power_supply_voltage_min_design{power_supply="BAT0"} 1.08e+07 -# HELP node_power_supply_voltage_now voltage_now value of /sys/class/power_supply/. -# TYPE node_power_supply_voltage_now gauge -node_power_supply_voltage_now{power_supply="BAT0"} 1.166e+07 +node_power_supply_voltage_min_design{power_supply="BAT0"} 10.8 +# HELP node_power_supply_voltage_volt voltage_volt value of /sys/class/power_supply/. +# TYPE node_power_supply_voltage_volt gauge +node_power_supply_voltage_volt{power_supply="BAT0"} 11.66 # HELP node_pressure_cpu_waiting_seconds_total Total time in seconds that processes have waited for CPU time # TYPE node_pressure_cpu_waiting_seconds_total counter node_pressure_cpu_waiting_seconds_total 14.036781000000001 @@ -2470,6 +2951,9 @@ node_pressure_io_stalled_seconds_total 159.229614 # HELP node_pressure_io_waiting_seconds_total Total time in seconds that processes have waited due to IO congestion # TYPE node_pressure_io_waiting_seconds_total counter node_pressure_io_waiting_seconds_total 159.886802 +# HELP node_pressure_irq_stalled_seconds_total Total time in seconds no process could make progress due to IRQ congestion +# TYPE node_pressure_irq_stalled_seconds_total counter +node_pressure_irq_stalled_seconds_total 0.008494 # HELP node_pressure_memory_stalled_seconds_total Total time in seconds no process could make progress due to memory congestion # TYPE node_pressure_memory_stalled_seconds_total counter node_pressure_memory_stalled_seconds_total 0 @@ -2484,23 +2968,32 @@ node_processes_max_processes 123 node_processes_max_threads 7801 # HELP node_processes_pids Number of PIDs # TYPE node_processes_pids gauge -node_processes_pids 1 +node_processes_pids 3 # HELP node_processes_state Number of processes in each state. # TYPE node_processes_state gauge -node_processes_state{state="S"} 1 +node_processes_state{state="I"} 1 +node_processes_state{state="S"} 2 # HELP node_processes_threads Allocated threads in system # TYPE node_processes_threads gauge -node_processes_threads 1 +node_processes_threads 3 # HELP node_procs_blocked Number of processes blocked waiting for I/O to complete. # TYPE node_procs_blocked gauge node_procs_blocked 0 # HELP node_procs_running Number of processes in runnable state. # TYPE node_procs_running gauge node_procs_running 2 +# HELP node_qdisc_backlog Number of bytes currently in queue to be sent. +# TYPE node_qdisc_backlog gauge +node_qdisc_backlog{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_backlog{device="wlan0",kind="fq"} 0 # HELP node_qdisc_bytes_total Number of bytes sent. # TYPE node_qdisc_bytes_total counter node_qdisc_bytes_total{device="eth0",kind="pfifo_fast"} 83 node_qdisc_bytes_total{device="wlan0",kind="fq"} 42 +# HELP node_qdisc_current_queue_length Number of packets currently in queue to be sent. +# TYPE node_qdisc_current_queue_length gauge +node_qdisc_current_queue_length{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_current_queue_length{device="wlan0",kind="fq"} 0 # HELP node_qdisc_drops_total Number of packets dropped. # TYPE node_qdisc_drops_total counter node_qdisc_drops_total{device="eth0",kind="pfifo_fast"} 0 @@ -2519,10 +3012,10 @@ node_qdisc_requeues_total{device="eth0",kind="pfifo_fast"} 2 node_qdisc_requeues_total{device="wlan0",kind="fq"} 1 # HELP node_rapl_core_joules_total Current RAPL core value in joules # TYPE node_rapl_core_joules_total counter -node_rapl_core_joules_total{index="0"} 118821.284256 +node_rapl_core_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0:0"} 118821.284256 # HELP node_rapl_package_joules_total Current RAPL package value in joules # TYPE node_rapl_package_joules_total counter -node_rapl_package_joules_total{index="0"} 240422.366267 +node_rapl_package_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0"} 240422.366267 # HELP node_schedstat_running_seconds_total Number of seconds CPU spent running a process. # TYPE node_schedstat_running_seconds_total counter node_schedstat_running_seconds_total{cpu="0"} 2.045936778163039e+06 @@ -2542,20 +3035,26 @@ node_schedstat_waiting_seconds_total{cpu="1"} 364107.263788241 node_scrape_collector_success{collector="arp"} 1 node_scrape_collector_success{collector="bcache"} 1 node_scrape_collector_success{collector="bonding"} 1 +node_scrape_collector_success{collector="btrfs"} 1 node_scrape_collector_success{collector="buddyinfo"} 1 +node_scrape_collector_success{collector="cgroups"} 1 node_scrape_collector_success{collector="conntrack"} 1 node_scrape_collector_success{collector="cpu"} 1 +node_scrape_collector_success{collector="cpu_vulnerabilities"} 1 node_scrape_collector_success{collector="cpufreq"} 1 node_scrape_collector_success{collector="diskstats"} 1 +node_scrape_collector_success{collector="dmi"} 1 node_scrape_collector_success{collector="drbd"} 1 node_scrape_collector_success{collector="edac"} 1 node_scrape_collector_success{collector="entropy"} 1 +node_scrape_collector_success{collector="fibrechannel"} 1 node_scrape_collector_success{collector="filefd"} 1 node_scrape_collector_success{collector="hwmon"} 1 node_scrape_collector_success{collector="infiniband"} 1 node_scrape_collector_success{collector="interrupts"} 1 node_scrape_collector_success{collector="ipvs"} 1 node_scrape_collector_success{collector="ksmd"} 1 +node_scrape_collector_success{collector="lnstat"} 1 node_scrape_collector_success{collector="loadavg"} 1 node_scrape_collector_success{collector="mdadm"} 1 node_scrape_collector_success{collector="meminfo"} 1 @@ -2566,30 +3065,84 @@ node_scrape_collector_success{collector="netdev"} 1 node_scrape_collector_success{collector="netstat"} 1 node_scrape_collector_success{collector="nfs"} 1 node_scrape_collector_success{collector="nfsd"} 1 +node_scrape_collector_success{collector="nvme"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="pcidevice"} 1 node_scrape_collector_success{collector="powersupplyclass"} 1 node_scrape_collector_success{collector="pressure"} 1 node_scrape_collector_success{collector="processes"} 1 node_scrape_collector_success{collector="qdisc"} 1 node_scrape_collector_success{collector="rapl"} 1 node_scrape_collector_success{collector="schedstat"} 1 +node_scrape_collector_success{collector="slabinfo"} 1 node_scrape_collector_success{collector="sockstat"} 1 +node_scrape_collector_success{collector="softirqs"} 1 node_scrape_collector_success{collector="softnet"} 1 node_scrape_collector_success{collector="stat"} 1 +node_scrape_collector_success{collector="sysctl"} 1 +node_scrape_collector_success{collector="tapestats"} 1 node_scrape_collector_success{collector="textfile"} 1 node_scrape_collector_success{collector="thermal_zone"} 1 +node_scrape_collector_success{collector="time"} 1 +node_scrape_collector_success{collector="udp_queues"} 1 node_scrape_collector_success{collector="vmstat"} 1 +node_scrape_collector_success{collector="watchdog"} 1 node_scrape_collector_success{collector="wifi"} 1 +node_scrape_collector_success{collector="xfrm"} 1 node_scrape_collector_success{collector="xfs"} 1 node_scrape_collector_success{collector="zfs"} 1 +node_scrape_collector_success{collector="zoneinfo"} 1 +# HELP node_slabinfo_active_objects The number of objects that are currently active (i.e., in use). +# TYPE node_slabinfo_active_objects gauge +node_slabinfo_active_objects{slab="dmaengine-unmap-128"} 1206 +node_slabinfo_active_objects{slab="kmalloc-8192"} 132 +node_slabinfo_active_objects{slab="kmem_cache"} 320 +node_slabinfo_active_objects{slab="tw_sock_TCP"} 704 +# HELP node_slabinfo_object_size_bytes The size of objects in this slab, in bytes. +# TYPE node_slabinfo_object_size_bytes gauge +node_slabinfo_object_size_bytes{slab="dmaengine-unmap-128"} 1088 +node_slabinfo_object_size_bytes{slab="kmalloc-8192"} 8192 +node_slabinfo_object_size_bytes{slab="kmem_cache"} 256 +node_slabinfo_object_size_bytes{slab="tw_sock_TCP"} 256 +# HELP node_slabinfo_objects The total number of allocated objects (i.e., objects that are both in use and not in use). +# TYPE node_slabinfo_objects gauge +node_slabinfo_objects{slab="dmaengine-unmap-128"} 1320 +node_slabinfo_objects{slab="kmalloc-8192"} 148 +node_slabinfo_objects{slab="kmem_cache"} 320 +node_slabinfo_objects{slab="tw_sock_TCP"} 864 +# HELP node_slabinfo_objects_per_slab The number of objects stored in each slab. +# TYPE node_slabinfo_objects_per_slab gauge +node_slabinfo_objects_per_slab{slab="dmaengine-unmap-128"} 30 +node_slabinfo_objects_per_slab{slab="kmalloc-8192"} 4 +node_slabinfo_objects_per_slab{slab="kmem_cache"} 32 +node_slabinfo_objects_per_slab{slab="tw_sock_TCP"} 32 +# HELP node_slabinfo_pages_per_slab The number of pages allocated for each slab. +# TYPE node_slabinfo_pages_per_slab gauge +node_slabinfo_pages_per_slab{slab="dmaengine-unmap-128"} 8 +node_slabinfo_pages_per_slab{slab="kmalloc-8192"} 8 +node_slabinfo_pages_per_slab{slab="kmem_cache"} 2 +node_slabinfo_pages_per_slab{slab="tw_sock_TCP"} 2 +# HELP node_sockstat_FRAG6_inuse Number of FRAG6 sockets in state inuse. +# TYPE node_sockstat_FRAG6_inuse gauge +node_sockstat_FRAG6_inuse 0 +# HELP node_sockstat_FRAG6_memory Number of FRAG6 sockets in state memory. +# TYPE node_sockstat_FRAG6_memory gauge +node_sockstat_FRAG6_memory 0 # HELP node_sockstat_FRAG_inuse Number of FRAG sockets in state inuse. # TYPE node_sockstat_FRAG_inuse gauge node_sockstat_FRAG_inuse 0 # HELP node_sockstat_FRAG_memory Number of FRAG sockets in state memory. # TYPE node_sockstat_FRAG_memory gauge node_sockstat_FRAG_memory 0 +# HELP node_sockstat_RAW6_inuse Number of RAW6 sockets in state inuse. +# TYPE node_sockstat_RAW6_inuse gauge +node_sockstat_RAW6_inuse 1 # HELP node_sockstat_RAW_inuse Number of RAW sockets in state inuse. # TYPE node_sockstat_RAW_inuse gauge node_sockstat_RAW_inuse 0 +# HELP node_sockstat_TCP6_inuse Number of TCP6 sockets in state inuse. +# TYPE node_sockstat_TCP6_inuse gauge +node_sockstat_TCP6_inuse 17 # HELP node_sockstat_TCP_alloc Number of TCP sockets in state alloc. # TYPE node_sockstat_TCP_alloc gauge node_sockstat_TCP_alloc 17 @@ -2601,13 +3154,19 @@ node_sockstat_TCP_inuse 4 node_sockstat_TCP_mem 1 # HELP node_sockstat_TCP_mem_bytes Number of TCP sockets in state mem_bytes. # TYPE node_sockstat_TCP_mem_bytes gauge -node_sockstat_TCP_mem_bytes 65536 +node_sockstat_TCP_mem_bytes 4096 # HELP node_sockstat_TCP_orphan Number of TCP sockets in state orphan. # TYPE node_sockstat_TCP_orphan gauge node_sockstat_TCP_orphan 0 # HELP node_sockstat_TCP_tw Number of TCP sockets in state tw. # TYPE node_sockstat_TCP_tw gauge node_sockstat_TCP_tw 4 +# HELP node_sockstat_UDP6_inuse Number of UDP6 sockets in state inuse. +# TYPE node_sockstat_UDP6_inuse gauge +node_sockstat_UDP6_inuse 9 +# HELP node_sockstat_UDPLITE6_inuse Number of UDPLITE6 sockets in state inuse. +# TYPE node_sockstat_UDPLITE6_inuse gauge +node_sockstat_UDPLITE6_inuse 0 # HELP node_sockstat_UDPLITE_inuse Number of UDPLITE sockets in state inuse. # TYPE node_sockstat_UDPLITE_inuse gauge node_sockstat_UDPLITE_inuse 0 @@ -2620,27 +3179,142 @@ node_sockstat_UDP_mem 0 # HELP node_sockstat_UDP_mem_bytes Number of UDP sockets in state mem_bytes. # TYPE node_sockstat_UDP_mem_bytes gauge node_sockstat_UDP_mem_bytes 0 -# HELP node_sockstat_sockets_used Number of sockets sockets in state used. +# HELP node_sockstat_sockets_used Number of IPv4 sockets in use. # TYPE node_sockstat_sockets_used gauge node_sockstat_sockets_used 229 +# HELP node_softirqs_functions_total Softirq counts per CPU. +# TYPE node_softirqs_functions_total counter +node_softirqs_functions_total{cpu="0",type="BLOCK"} 23776 +node_softirqs_functions_total{cpu="0",type="HI"} 7 +node_softirqs_functions_total{cpu="0",type="HRTIMER"} 40 +node_softirqs_functions_total{cpu="0",type="IRQ_POLL"} 0 +node_softirqs_functions_total{cpu="0",type="NET_RX"} 43066 +node_softirqs_functions_total{cpu="0",type="NET_TX"} 2301 +node_softirqs_functions_total{cpu="0",type="RCU"} 155929 +node_softirqs_functions_total{cpu="0",type="SCHED"} 378895 +node_softirqs_functions_total{cpu="0",type="TASKLET"} 372 +node_softirqs_functions_total{cpu="0",type="TIMER"} 424191 +node_softirqs_functions_total{cpu="1",type="BLOCK"} 24115 +node_softirqs_functions_total{cpu="1",type="HI"} 1 +node_softirqs_functions_total{cpu="1",type="HRTIMER"} 346 +node_softirqs_functions_total{cpu="1",type="IRQ_POLL"} 0 +node_softirqs_functions_total{cpu="1",type="NET_RX"} 104508 +node_softirqs_functions_total{cpu="1",type="NET_TX"} 2430 +node_softirqs_functions_total{cpu="1",type="RCU"} 146631 +node_softirqs_functions_total{cpu="1",type="SCHED"} 152852 +node_softirqs_functions_total{cpu="1",type="TASKLET"} 1899 +node_softirqs_functions_total{cpu="1",type="TIMER"} 108342 +# HELP node_softirqs_total Number of softirq calls. +# TYPE node_softirqs_total counter +node_softirqs_total{vector="block"} 186066 +node_softirqs_total{vector="block_iopoll"} 0 +node_softirqs_total{vector="hi"} 250191 +node_softirqs_total{vector="hrtimer"} 12499 +node_softirqs_total{vector="net_rx"} 211099 +node_softirqs_total{vector="net_tx"} 1647 +node_softirqs_total{vector="rcu"} 508444 +node_softirqs_total{vector="sched"} 622196 +node_softirqs_total{vector="tasklet"} 1.783454e+06 +node_softirqs_total{vector="timer"} 1.481983e+06 +# HELP node_softnet_backlog_len Softnet backlog status +# TYPE node_softnet_backlog_len gauge +node_softnet_backlog_len{cpu="0"} 0 +node_softnet_backlog_len{cpu="1"} 0 +node_softnet_backlog_len{cpu="2"} 0 +node_softnet_backlog_len{cpu="3"} 0 +# HELP node_softnet_cpu_collision_total Number of collision occur while obtaining device lock while transmitting +# TYPE node_softnet_cpu_collision_total counter +node_softnet_cpu_collision_total{cpu="0"} 0 +node_softnet_cpu_collision_total{cpu="1"} 0 +node_softnet_cpu_collision_total{cpu="2"} 0 +node_softnet_cpu_collision_total{cpu="3"} 0 # HELP node_softnet_dropped_total Number of dropped packets # TYPE node_softnet_dropped_total counter node_softnet_dropped_total{cpu="0"} 0 node_softnet_dropped_total{cpu="1"} 41 node_softnet_dropped_total{cpu="2"} 0 node_softnet_dropped_total{cpu="3"} 0 +# HELP node_softnet_flow_limit_count_total Number of times flow limit has been reached +# TYPE node_softnet_flow_limit_count_total counter +node_softnet_flow_limit_count_total{cpu="0"} 0 +node_softnet_flow_limit_count_total{cpu="1"} 0 +node_softnet_flow_limit_count_total{cpu="2"} 0 +node_softnet_flow_limit_count_total{cpu="3"} 0 # HELP node_softnet_processed_total Number of processed packets # TYPE node_softnet_processed_total counter node_softnet_processed_total{cpu="0"} 299641 node_softnet_processed_total{cpu="1"} 916354 node_softnet_processed_total{cpu="2"} 5.577791e+06 node_softnet_processed_total{cpu="3"} 3.113785e+06 +# HELP node_softnet_received_rps_total Number of times cpu woken up received_rps +# TYPE node_softnet_received_rps_total counter +node_softnet_received_rps_total{cpu="0"} 0 +node_softnet_received_rps_total{cpu="1"} 0 +node_softnet_received_rps_total{cpu="2"} 0 +node_softnet_received_rps_total{cpu="3"} 0 # HELP node_softnet_times_squeezed_total Number of times processing packets ran out of quota # TYPE node_softnet_times_squeezed_total counter node_softnet_times_squeezed_total{cpu="0"} 1 node_softnet_times_squeezed_total{cpu="1"} 10 node_softnet_times_squeezed_total{cpu="2"} 85 node_softnet_times_squeezed_total{cpu="3"} 50 +# HELP node_sysctl_fs_file_nr sysctl fs.file-nr +# TYPE node_sysctl_fs_file_nr untyped +node_sysctl_fs_file_nr{index="0"} 1024 +node_sysctl_fs_file_nr{index="1"} 0 +node_sysctl_fs_file_nr{index="2"} 1.631329e+06 +# HELP node_sysctl_fs_file_nr_current sysctl fs.file-nr, field 1 +# TYPE node_sysctl_fs_file_nr_current untyped +node_sysctl_fs_file_nr_current 0 +# HELP node_sysctl_fs_file_nr_max sysctl fs.file-nr, field 2 +# TYPE node_sysctl_fs_file_nr_max untyped +node_sysctl_fs_file_nr_max 1.631329e+06 +# HELP node_sysctl_fs_file_nr_total sysctl fs.file-nr, field 0 +# TYPE node_sysctl_fs_file_nr_total untyped +node_sysctl_fs_file_nr_total 1024 +# HELP node_sysctl_info sysctl info +# TYPE node_sysctl_info gauge +node_sysctl_info{index="0",name="kernel.seccomp.actions_avail",value="kill_process"} 1 +node_sysctl_info{index="1",name="kernel.seccomp.actions_avail",value="kill_thread"} 1 +node_sysctl_info{index="2",name="kernel.seccomp.actions_avail",value="trap"} 1 +node_sysctl_info{index="3",name="kernel.seccomp.actions_avail",value="errno"} 1 +node_sysctl_info{index="4",name="kernel.seccomp.actions_avail",value="user_notif"} 1 +node_sysctl_info{index="5",name="kernel.seccomp.actions_avail",value="trace"} 1 +node_sysctl_info{index="6",name="kernel.seccomp.actions_avail",value="log"} 1 +node_sysctl_info{index="7",name="kernel.seccomp.actions_avail",value="allow"} 1 +# HELP node_sysctl_kernel_threads_max sysctl kernel.threads-max +# TYPE node_sysctl_kernel_threads_max untyped +node_sysctl_kernel_threads_max 7801 +# HELP node_tape_io_now The number of I/Os currently outstanding to this device. +# TYPE node_tape_io_now gauge +node_tape_io_now{device="st0"} 1 +# HELP node_tape_io_others_total The number of I/Os issued to the tape drive other than read or write commands. The time taken to complete these commands uses the following calculation io_time_seconds_total-read_time_seconds_total-write_time_seconds_total +# TYPE node_tape_io_others_total counter +node_tape_io_others_total{device="st0"} 1409 +# HELP node_tape_io_time_seconds_total The amount of time spent waiting for all I/O to complete (including read and write). This includes tape movement commands such as seeking between file or set marks and implicit tape movement such as when rewind on close tape devices are used. +# TYPE node_tape_io_time_seconds_total counter +node_tape_io_time_seconds_total{device="st0"} 9247.01108772 +# HELP node_tape_read_bytes_total The number of bytes read from the tape drive. +# TYPE node_tape_read_bytes_total counter +node_tape_read_bytes_total{device="st0"} 9.79383912e+08 +# HELP node_tape_read_time_seconds_total The amount of time spent waiting for read requests to complete. +# TYPE node_tape_read_time_seconds_total counter +node_tape_read_time_seconds_total{device="st0"} 33.788355744 +# HELP node_tape_reads_completed_total The number of read requests issued to the tape drive. +# TYPE node_tape_reads_completed_total counter +node_tape_reads_completed_total{device="st0"} 3741 +# HELP node_tape_residual_total The number of times during a read or write we found the residual amount to be non-zero. This should mean that a program is issuing a read larger thean the block size on tape. For write not all data made it to tape. +# TYPE node_tape_residual_total counter +node_tape_residual_total{device="st0"} 19 +# HELP node_tape_write_time_seconds_total The amount of time spent waiting for write requests to complete. +# TYPE node_tape_write_time_seconds_total counter +node_tape_write_time_seconds_total{device="st0"} 5233.597394395 +# HELP node_tape_writes_completed_total The number of write requests issued to the tape drive. +# TYPE node_tape_writes_completed_total counter +node_tape_writes_completed_total{device="st0"} 5.3772916e+07 +# HELP node_tape_written_bytes_total The number of bytes written to the tape drive. +# TYPE node_tape_written_bytes_total counter +node_tape_written_bytes_total{device="st0"} 1.496246784e+12 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise @@ -2649,6 +3323,22 @@ node_textfile_scrape_error 0 # HELP node_thermal_zone_temp Zone temperature in Celsius # TYPE node_thermal_zone_temp gauge node_thermal_zone_temp{type="cpu-thermal",zone="0"} 12.376 +# HELP node_time_clocksource_available_info Available clocksources read from '/sys/devices/system/clocksource'. +# TYPE node_time_clocksource_available_info gauge +node_time_clocksource_available_info{clocksource="acpi_pm",device="0"} 1 +node_time_clocksource_available_info{clocksource="hpet",device="0"} 1 +node_time_clocksource_available_info{clocksource="tsc",device="0"} 1 +# HELP node_time_clocksource_current_info Current clocksource read from '/sys/devices/system/clocksource'. +# TYPE node_time_clocksource_current_info gauge +node_time_clocksource_current_info{clocksource="tsc",device="0"} 1 +# HELP node_time_seconds System time in seconds since epoch (1970). +# TYPE node_time_seconds gauge +# HELP node_time_zone_offset_seconds System time zone offset in seconds. +# TYPE node_time_zone_offset_seconds gauge +# HELP node_udp_queues Number of allocated memory in the kernel for UDP datagrams in bytes. +# TYPE node_udp_queues gauge +node_udp_queues{ip="v4",queue="rx"} 0 +node_udp_queues{ip="v4",queue="tx"} 21 # HELP node_vmstat_oom_kill /proc/vmstat information field oom_kill. # TYPE node_vmstat_oom_kill untyped node_vmstat_oom_kill 0 @@ -2670,6 +3360,31 @@ node_vmstat_pswpin 1476 # HELP node_vmstat_pswpout /proc/vmstat information field pswpout. # TYPE node_vmstat_pswpout untyped node_vmstat_pswpout 35045 +# HELP node_watchdog_access_cs0 Value of /sys/class/watchdog//access_cs0 +# TYPE node_watchdog_access_cs0 gauge +node_watchdog_access_cs0{name="watchdog0"} 0 +# HELP node_watchdog_bootstatus Value of /sys/class/watchdog//bootstatus +# TYPE node_watchdog_bootstatus gauge +node_watchdog_bootstatus{name="watchdog0"} 1 +# HELP node_watchdog_fw_version Value of /sys/class/watchdog//fw_version +# TYPE node_watchdog_fw_version gauge +node_watchdog_fw_version{name="watchdog0"} 2 +# HELP node_watchdog_info Info of /sys/class/watchdog/ +# TYPE node_watchdog_info gauge +node_watchdog_info{identity="",name="watchdog1",options="",pretimeout_governor="",state="",status=""} 1 +node_watchdog_info{identity="Software Watchdog",name="watchdog0",options="0x8380",pretimeout_governor="noop",state="active",status="0x8000"} 1 +# HELP node_watchdog_nowayout Value of /sys/class/watchdog//nowayout +# TYPE node_watchdog_nowayout gauge +node_watchdog_nowayout{name="watchdog0"} 0 +# HELP node_watchdog_pretimeout_seconds Value of /sys/class/watchdog//pretimeout +# TYPE node_watchdog_pretimeout_seconds gauge +node_watchdog_pretimeout_seconds{name="watchdog0"} 120 +# HELP node_watchdog_timeleft_seconds Value of /sys/class/watchdog//timeleft +# TYPE node_watchdog_timeleft_seconds gauge +node_watchdog_timeleft_seconds{name="watchdog0"} 300 +# HELP node_watchdog_timeout_seconds Value of /sys/class/watchdog//timeout +# TYPE node_watchdog_timeout_seconds gauge +node_watchdog_timeout_seconds{name="watchdog0"} 60 # HELP node_wifi_interface_frequency_hertz The current frequency a WiFi interface is operating at, in hertz. # TYPE node_wifi_interface_frequency_hertz gauge node_wifi_interface_frequency_hertz{device="wlan0"} 2.412e+09 @@ -2697,6 +3412,10 @@ node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="aa:bb:cc:d # TYPE node_wifi_station_receive_bytes_total counter node_wifi_station_receive_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 node_wifi_station_receive_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 +# HELP node_wifi_station_received_packets_total The total number of packets received by a station. +# TYPE node_wifi_station_received_packets_total counter +node_wifi_station_received_packets_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 +node_wifi_station_received_packets_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 # HELP node_wifi_station_signal_dbm The current WiFi signal strength, in decibel-milliwatts (dBm). # TYPE node_wifi_station_signal_dbm gauge node_wifi_station_signal_dbm{device="wlan0",mac_address="01:02:03:04:05:06"} -26 @@ -2717,6 +3436,94 @@ node_wifi_station_transmit_failed_total{device="wlan0",mac_address="aa:bb:cc:dd: # TYPE node_wifi_station_transmit_retries_total counter node_wifi_station_transmit_retries_total{device="wlan0",mac_address="01:02:03:04:05:06"} 20 node_wifi_station_transmit_retries_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 10 +# HELP node_wifi_station_transmitted_packets_total The total number of packets transmitted by a station. +# TYPE node_wifi_station_transmitted_packets_total counter +node_wifi_station_transmitted_packets_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 +node_wifi_station_transmitted_packets_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 +# HELP node_xfrm_acquire_error_packets_total State hasn’t been fully acquired before use +# TYPE node_xfrm_acquire_error_packets_total counter +node_xfrm_acquire_error_packets_total 24532 +# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed +# TYPE node_xfrm_fwd_hdr_error_packets_total counter +node_xfrm_fwd_hdr_error_packets_total 6654 +# HELP node_xfrm_in_buffer_error_packets_total No buffer is left +# TYPE node_xfrm_in_buffer_error_packets_total counter +node_xfrm_in_buffer_error_packets_total 2 +# HELP node_xfrm_in_error_packets_total All errors not matched by other +# TYPE node_xfrm_in_error_packets_total counter +node_xfrm_in_error_packets_total 1 +# HELP node_xfrm_in_hdr_error_packets_total Header error +# TYPE node_xfrm_in_hdr_error_packets_total counter +node_xfrm_in_hdr_error_packets_total 4 +# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found +# TYPE node_xfrm_in_no_pols_packets_total counter +node_xfrm_in_no_pols_packets_total 65432 +# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong +# TYPE node_xfrm_in_no_states_packets_total counter +node_xfrm_in_no_states_packets_total 3 +# HELP node_xfrm_in_pol_block_packets_total Policy discards +# TYPE node_xfrm_in_pol_block_packets_total counter +node_xfrm_in_pol_block_packets_total 100 +# HELP node_xfrm_in_pol_error_packets_total Policy error +# TYPE node_xfrm_in_pol_error_packets_total counter +node_xfrm_in_pol_error_packets_total 10000 +# HELP node_xfrm_in_state_expired_packets_total State is expired +# TYPE node_xfrm_in_state_expired_packets_total counter +node_xfrm_in_state_expired_packets_total 7 +# HELP node_xfrm_in_state_invalid_packets_total State is invalid +# TYPE node_xfrm_in_state_invalid_packets_total counter +node_xfrm_in_state_invalid_packets_total 55555 +# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch +# TYPE node_xfrm_in_state_mismatch_packets_total counter +node_xfrm_in_state_mismatch_packets_total 23451 +# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_in_state_mode_error_packets_total counter +node_xfrm_in_state_mode_error_packets_total 100 +# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong +# TYPE node_xfrm_in_state_proto_error_packets_total counter +node_xfrm_in_state_proto_error_packets_total 40 +# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window +# TYPE node_xfrm_in_state_seq_error_packets_total counter +node_xfrm_in_state_seq_error_packets_total 6000 +# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong +# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter +node_xfrm_in_tmpl_mismatch_packets_total 51 +# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error +# TYPE node_xfrm_out_bundle_check_error_packets_total counter +node_xfrm_out_bundle_check_error_packets_total 555 +# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error +# TYPE node_xfrm_out_bundle_gen_error_packets_total counter +node_xfrm_out_bundle_gen_error_packets_total 43321 +# HELP node_xfrm_out_error_packets_total All errors which is not matched others +# TYPE node_xfrm_out_error_packets_total counter +node_xfrm_out_error_packets_total 1e+06 +# HELP node_xfrm_out_no_states_packets_total No state is found +# TYPE node_xfrm_out_no_states_packets_total counter +node_xfrm_out_no_states_packets_total 869 +# HELP node_xfrm_out_pol_block_packets_total Policy discards +# TYPE node_xfrm_out_pol_block_packets_total counter +node_xfrm_out_pol_block_packets_total 43456 +# HELP node_xfrm_out_pol_dead_packets_total Policy is dead +# TYPE node_xfrm_out_pol_dead_packets_total counter +node_xfrm_out_pol_dead_packets_total 7656 +# HELP node_xfrm_out_pol_error_packets_total Policy error +# TYPE node_xfrm_out_pol_error_packets_total counter +node_xfrm_out_pol_error_packets_total 1454 +# HELP node_xfrm_out_state_expired_packets_total State is expired +# TYPE node_xfrm_out_state_expired_packets_total counter +node_xfrm_out_state_expired_packets_total 565 +# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired +# TYPE node_xfrm_out_state_invalid_packets_total counter +node_xfrm_out_state_invalid_packets_total 28765 +# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_out_state_mode_error_packets_total counter +node_xfrm_out_state_mode_error_packets_total 8 +# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error +# TYPE node_xfrm_out_state_proto_error_packets_total counter +node_xfrm_out_state_proto_error_packets_total 4542 +# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow +# TYPE node_xfrm_out_state_seq_error_packets_total counter +node_xfrm_out_state_seq_error_packets_total 543 # HELP node_xfs_allocation_btree_compares_total Number of allocation B-tree compares for a filesystem. # TYPE node_xfs_allocation_btree_compares_total counter node_xfs_allocation_btree_compares_total{device="sda1"} 0 @@ -2786,9 +3593,30 @@ node_xfs_extent_allocation_extents_allocated_total{device="sda1"} 1 # HELP node_xfs_extent_allocation_extents_freed_total Number of extents freed for a filesystem. # TYPE node_xfs_extent_allocation_extents_freed_total counter node_xfs_extent_allocation_extents_freed_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_attempts_total Number of times the OS looked for an XFS inode in the inode cache. +# TYPE node_xfs_inode_operation_attempts_total counter +node_xfs_inode_operation_attempts_total{device="sda1"} 5 +# HELP node_xfs_inode_operation_attribute_changes_total Number of times the OS explicitly changed the attributes of an XFS inode. +# TYPE node_xfs_inode_operation_attribute_changes_total counter +node_xfs_inode_operation_attribute_changes_total{device="sda1"} 1 +# HELP node_xfs_inode_operation_duplicates_total Number of times the OS tried to add a missing XFS inode to the inode cache, but found it had already been added by another process. +# TYPE node_xfs_inode_operation_duplicates_total counter +node_xfs_inode_operation_duplicates_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_found_total Number of times the OS looked for and found an XFS inode in the inode cache. +# TYPE node_xfs_inode_operation_found_total counter +node_xfs_inode_operation_found_total{device="sda1"} 1 +# HELP node_xfs_inode_operation_missed_total Number of times the OS looked for an XFS inode in the cache, but did not find it. +# TYPE node_xfs_inode_operation_missed_total counter +node_xfs_inode_operation_missed_total{device="sda1"} 4 +# HELP node_xfs_inode_operation_reclaims_total Number of times the OS reclaimed an XFS inode from the inode cache to free memory for another purpose. +# TYPE node_xfs_inode_operation_reclaims_total counter +node_xfs_inode_operation_reclaims_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_recycled_total Number of times the OS found an XFS inode in the cache, but could not use it as it was being recycled. +# TYPE node_xfs_inode_operation_recycled_total counter +node_xfs_inode_operation_recycled_total{device="sda1"} 0 # HELP node_xfs_read_calls_total Number of read(2) system calls made to files in a filesystem. # TYPE node_xfs_read_calls_total counter -node_xfs_read_calls_total{device="sda1"} 28 +node_xfs_read_calls_total{device="sda1"} 0 # HELP node_xfs_vnode_active_total Number of vnodes not on free lists for a filesystem. # TYPE node_xfs_vnode_active_total counter node_xfs_vnode_active_total{device="sda1"} 4 @@ -2812,7 +3640,7 @@ node_xfs_vnode_release_total{device="sda1"} 1 node_xfs_vnode_remove_total{device="sda1"} 1 # HELP node_xfs_write_calls_total Number of write(2) system calls made to files in a filesystem. # TYPE node_xfs_write_calls_total counter -node_xfs_write_calls_total{device="sda1"} 0 +node_xfs_write_calls_total{device="sda1"} 28 # HELP node_zfs_abd_linear_cnt kstat.zfs.misc.abdstats.linear_cnt # TYPE node_zfs_abd_linear_cnt untyped node_zfs_abd_linear_cnt 62 @@ -3062,6 +3890,9 @@ node_zfs_arc_l2_writes_lock_retry 0 # HELP node_zfs_arc_l2_writes_sent kstat.zfs.misc.arcstats.l2_writes_sent # TYPE node_zfs_arc_l2_writes_sent untyped node_zfs_arc_l2_writes_sent 0 +# HELP node_zfs_arc_memory_available_bytes kstat.zfs.misc.arcstats.memory_available_bytes +# TYPE node_zfs_arc_memory_available_bytes untyped +node_zfs_arc_memory_available_bytes -9.223372036854776e+17 # HELP node_zfs_arc_memory_direct_count kstat.zfs.misc.arcstats.memory_direct_count # TYPE node_zfs_arc_memory_direct_count untyped node_zfs_arc_memory_direct_count 542 @@ -3149,193 +3980,193 @@ node_zfs_arc_prefetch_metadata_misses 16071 # HELP node_zfs_arc_size kstat.zfs.misc.arcstats.size # TYPE node_zfs_arc_size untyped node_zfs_arc_size 1.603939792e+09 -# HELP node_zfs_dbuf_dbuf_cache_count kstat.zfs.misc.dbuf_stats.dbuf_cache_count +# HELP node_zfs_dbuf_dbuf_cache_count kstat.zfs.misc.dbufstats.dbuf_cache_count # TYPE node_zfs_dbuf_dbuf_cache_count untyped node_zfs_dbuf_dbuf_cache_count 27 -# HELP node_zfs_dbuf_dbuf_cache_hiwater_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_hiwater_bytes +# HELP node_zfs_dbuf_dbuf_cache_hiwater_bytes kstat.zfs.misc.dbufstats.dbuf_cache_hiwater_bytes # TYPE node_zfs_dbuf_dbuf_cache_hiwater_bytes untyped node_zfs_dbuf_dbuf_cache_hiwater_bytes 6.9117804e+07 -# HELP node_zfs_dbuf_dbuf_cache_level_0 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_0 +# HELP node_zfs_dbuf_dbuf_cache_level_0 kstat.zfs.misc.dbufstats.dbuf_cache_level_0 # TYPE node_zfs_dbuf_dbuf_cache_level_0 untyped node_zfs_dbuf_dbuf_cache_level_0 27 -# HELP node_zfs_dbuf_dbuf_cache_level_0_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_0_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_0_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_0_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_0_bytes untyped node_zfs_dbuf_dbuf_cache_level_0_bytes 302080 -# HELP node_zfs_dbuf_dbuf_cache_level_1 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_1 +# HELP node_zfs_dbuf_dbuf_cache_level_1 kstat.zfs.misc.dbufstats.dbuf_cache_level_1 # TYPE node_zfs_dbuf_dbuf_cache_level_1 untyped node_zfs_dbuf_dbuf_cache_level_1 0 -# HELP node_zfs_dbuf_dbuf_cache_level_10 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_10 +# HELP node_zfs_dbuf_dbuf_cache_level_10 kstat.zfs.misc.dbufstats.dbuf_cache_level_10 # TYPE node_zfs_dbuf_dbuf_cache_level_10 untyped node_zfs_dbuf_dbuf_cache_level_10 0 -# HELP node_zfs_dbuf_dbuf_cache_level_10_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_10_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_10_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_10_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_10_bytes untyped node_zfs_dbuf_dbuf_cache_level_10_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_11 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_11 +# HELP node_zfs_dbuf_dbuf_cache_level_11 kstat.zfs.misc.dbufstats.dbuf_cache_level_11 # TYPE node_zfs_dbuf_dbuf_cache_level_11 untyped node_zfs_dbuf_dbuf_cache_level_11 0 -# HELP node_zfs_dbuf_dbuf_cache_level_11_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_11_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_11_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_11_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_11_bytes untyped node_zfs_dbuf_dbuf_cache_level_11_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_1_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_1_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_1_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_1_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_1_bytes untyped node_zfs_dbuf_dbuf_cache_level_1_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_2 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_2 +# HELP node_zfs_dbuf_dbuf_cache_level_2 kstat.zfs.misc.dbufstats.dbuf_cache_level_2 # TYPE node_zfs_dbuf_dbuf_cache_level_2 untyped node_zfs_dbuf_dbuf_cache_level_2 0 -# HELP node_zfs_dbuf_dbuf_cache_level_2_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_2_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_2_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_2_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_2_bytes untyped node_zfs_dbuf_dbuf_cache_level_2_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_3 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_3 +# HELP node_zfs_dbuf_dbuf_cache_level_3 kstat.zfs.misc.dbufstats.dbuf_cache_level_3 # TYPE node_zfs_dbuf_dbuf_cache_level_3 untyped node_zfs_dbuf_dbuf_cache_level_3 0 -# HELP node_zfs_dbuf_dbuf_cache_level_3_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_3_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_3_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_3_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_3_bytes untyped node_zfs_dbuf_dbuf_cache_level_3_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_4 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_4 +# HELP node_zfs_dbuf_dbuf_cache_level_4 kstat.zfs.misc.dbufstats.dbuf_cache_level_4 # TYPE node_zfs_dbuf_dbuf_cache_level_4 untyped node_zfs_dbuf_dbuf_cache_level_4 0 -# HELP node_zfs_dbuf_dbuf_cache_level_4_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_4_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_4_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_4_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_4_bytes untyped node_zfs_dbuf_dbuf_cache_level_4_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_5 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_5 +# HELP node_zfs_dbuf_dbuf_cache_level_5 kstat.zfs.misc.dbufstats.dbuf_cache_level_5 # TYPE node_zfs_dbuf_dbuf_cache_level_5 untyped node_zfs_dbuf_dbuf_cache_level_5 0 -# HELP node_zfs_dbuf_dbuf_cache_level_5_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_5_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_5_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_5_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_5_bytes untyped node_zfs_dbuf_dbuf_cache_level_5_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_6 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_6 +# HELP node_zfs_dbuf_dbuf_cache_level_6 kstat.zfs.misc.dbufstats.dbuf_cache_level_6 # TYPE node_zfs_dbuf_dbuf_cache_level_6 untyped node_zfs_dbuf_dbuf_cache_level_6 0 -# HELP node_zfs_dbuf_dbuf_cache_level_6_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_6_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_6_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_6_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_6_bytes untyped node_zfs_dbuf_dbuf_cache_level_6_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_7 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_7 +# HELP node_zfs_dbuf_dbuf_cache_level_7 kstat.zfs.misc.dbufstats.dbuf_cache_level_7 # TYPE node_zfs_dbuf_dbuf_cache_level_7 untyped node_zfs_dbuf_dbuf_cache_level_7 0 -# HELP node_zfs_dbuf_dbuf_cache_level_7_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_7_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_7_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_7_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_7_bytes untyped node_zfs_dbuf_dbuf_cache_level_7_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_8 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_8 +# HELP node_zfs_dbuf_dbuf_cache_level_8 kstat.zfs.misc.dbufstats.dbuf_cache_level_8 # TYPE node_zfs_dbuf_dbuf_cache_level_8 untyped node_zfs_dbuf_dbuf_cache_level_8 0 -# HELP node_zfs_dbuf_dbuf_cache_level_8_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_8_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_8_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_8_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_8_bytes untyped node_zfs_dbuf_dbuf_cache_level_8_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_9 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_9 +# HELP node_zfs_dbuf_dbuf_cache_level_9 kstat.zfs.misc.dbufstats.dbuf_cache_level_9 # TYPE node_zfs_dbuf_dbuf_cache_level_9 untyped node_zfs_dbuf_dbuf_cache_level_9 0 -# HELP node_zfs_dbuf_dbuf_cache_level_9_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_9_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_9_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_9_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_9_bytes untyped node_zfs_dbuf_dbuf_cache_level_9_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_lowater_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_lowater_bytes +# HELP node_zfs_dbuf_dbuf_cache_lowater_bytes kstat.zfs.misc.dbufstats.dbuf_cache_lowater_bytes # TYPE node_zfs_dbuf_dbuf_cache_lowater_bytes untyped node_zfs_dbuf_dbuf_cache_lowater_bytes 5.6550932e+07 -# HELP node_zfs_dbuf_dbuf_cache_max_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_max_bytes +# HELP node_zfs_dbuf_dbuf_cache_max_bytes kstat.zfs.misc.dbufstats.dbuf_cache_max_bytes # TYPE node_zfs_dbuf_dbuf_cache_max_bytes untyped node_zfs_dbuf_dbuf_cache_max_bytes 6.2834368e+07 -# HELP node_zfs_dbuf_dbuf_cache_size kstat.zfs.misc.dbuf_stats.dbuf_cache_size +# HELP node_zfs_dbuf_dbuf_cache_size kstat.zfs.misc.dbufstats.dbuf_cache_size # TYPE node_zfs_dbuf_dbuf_cache_size untyped node_zfs_dbuf_dbuf_cache_size 302080 -# HELP node_zfs_dbuf_dbuf_cache_size_max kstat.zfs.misc.dbuf_stats.dbuf_cache_size_max +# HELP node_zfs_dbuf_dbuf_cache_size_max kstat.zfs.misc.dbufstats.dbuf_cache_size_max # TYPE node_zfs_dbuf_dbuf_cache_size_max untyped node_zfs_dbuf_dbuf_cache_size_max 394240 -# HELP node_zfs_dbuf_dbuf_cache_total_evicts kstat.zfs.misc.dbuf_stats.dbuf_cache_total_evicts +# HELP node_zfs_dbuf_dbuf_cache_total_evicts kstat.zfs.misc.dbufstats.dbuf_cache_total_evicts # TYPE node_zfs_dbuf_dbuf_cache_total_evicts untyped node_zfs_dbuf_dbuf_cache_total_evicts 0 -# HELP node_zfs_dbuf_hash_chain_max kstat.zfs.misc.dbuf_stats.hash_chain_max +# HELP node_zfs_dbuf_hash_chain_max kstat.zfs.misc.dbufstats.hash_chain_max # TYPE node_zfs_dbuf_hash_chain_max untyped node_zfs_dbuf_hash_chain_max 0 -# HELP node_zfs_dbuf_hash_chains kstat.zfs.misc.dbuf_stats.hash_chains +# HELP node_zfs_dbuf_hash_chains kstat.zfs.misc.dbufstats.hash_chains # TYPE node_zfs_dbuf_hash_chains untyped node_zfs_dbuf_hash_chains 0 -# HELP node_zfs_dbuf_hash_collisions kstat.zfs.misc.dbuf_stats.hash_collisions +# HELP node_zfs_dbuf_hash_collisions kstat.zfs.misc.dbufstats.hash_collisions # TYPE node_zfs_dbuf_hash_collisions untyped node_zfs_dbuf_hash_collisions 0 -# HELP node_zfs_dbuf_hash_dbuf_level_0 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_0 +# HELP node_zfs_dbuf_hash_dbuf_level_0 kstat.zfs.misc.dbufstats.hash_dbuf_level_0 # TYPE node_zfs_dbuf_hash_dbuf_level_0 untyped node_zfs_dbuf_hash_dbuf_level_0 37 -# HELP node_zfs_dbuf_hash_dbuf_level_0_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_0_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_0_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_0_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_0_bytes untyped node_zfs_dbuf_hash_dbuf_level_0_bytes 465920 -# HELP node_zfs_dbuf_hash_dbuf_level_1 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_1 +# HELP node_zfs_dbuf_hash_dbuf_level_1 kstat.zfs.misc.dbufstats.hash_dbuf_level_1 # TYPE node_zfs_dbuf_hash_dbuf_level_1 untyped node_zfs_dbuf_hash_dbuf_level_1 10 -# HELP node_zfs_dbuf_hash_dbuf_level_10 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_10 +# HELP node_zfs_dbuf_hash_dbuf_level_10 kstat.zfs.misc.dbufstats.hash_dbuf_level_10 # TYPE node_zfs_dbuf_hash_dbuf_level_10 untyped node_zfs_dbuf_hash_dbuf_level_10 0 -# HELP node_zfs_dbuf_hash_dbuf_level_10_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_10_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_10_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_10_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_10_bytes untyped node_zfs_dbuf_hash_dbuf_level_10_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_11 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_11 +# HELP node_zfs_dbuf_hash_dbuf_level_11 kstat.zfs.misc.dbufstats.hash_dbuf_level_11 # TYPE node_zfs_dbuf_hash_dbuf_level_11 untyped node_zfs_dbuf_hash_dbuf_level_11 0 -# HELP node_zfs_dbuf_hash_dbuf_level_11_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_11_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_11_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_11_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_11_bytes untyped node_zfs_dbuf_hash_dbuf_level_11_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_1_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_1_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_1_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_1_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_1_bytes untyped node_zfs_dbuf_hash_dbuf_level_1_bytes 1.31072e+06 -# HELP node_zfs_dbuf_hash_dbuf_level_2 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_2 +# HELP node_zfs_dbuf_hash_dbuf_level_2 kstat.zfs.misc.dbufstats.hash_dbuf_level_2 # TYPE node_zfs_dbuf_hash_dbuf_level_2 untyped node_zfs_dbuf_hash_dbuf_level_2 2 -# HELP node_zfs_dbuf_hash_dbuf_level_2_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_2_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_2_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_2_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_2_bytes untyped node_zfs_dbuf_hash_dbuf_level_2_bytes 262144 -# HELP node_zfs_dbuf_hash_dbuf_level_3 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_3 +# HELP node_zfs_dbuf_hash_dbuf_level_3 kstat.zfs.misc.dbufstats.hash_dbuf_level_3 # TYPE node_zfs_dbuf_hash_dbuf_level_3 untyped node_zfs_dbuf_hash_dbuf_level_3 2 -# HELP node_zfs_dbuf_hash_dbuf_level_3_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_3_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_3_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_3_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_3_bytes untyped node_zfs_dbuf_hash_dbuf_level_3_bytes 262144 -# HELP node_zfs_dbuf_hash_dbuf_level_4 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_4 +# HELP node_zfs_dbuf_hash_dbuf_level_4 kstat.zfs.misc.dbufstats.hash_dbuf_level_4 # TYPE node_zfs_dbuf_hash_dbuf_level_4 untyped node_zfs_dbuf_hash_dbuf_level_4 2 -# HELP node_zfs_dbuf_hash_dbuf_level_4_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_4_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_4_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_4_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_4_bytes untyped node_zfs_dbuf_hash_dbuf_level_4_bytes 262144 -# HELP node_zfs_dbuf_hash_dbuf_level_5 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_5 +# HELP node_zfs_dbuf_hash_dbuf_level_5 kstat.zfs.misc.dbufstats.hash_dbuf_level_5 # TYPE node_zfs_dbuf_hash_dbuf_level_5 untyped node_zfs_dbuf_hash_dbuf_level_5 2 -# HELP node_zfs_dbuf_hash_dbuf_level_5_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_5_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_5_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_5_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_5_bytes untyped node_zfs_dbuf_hash_dbuf_level_5_bytes 262144 -# HELP node_zfs_dbuf_hash_dbuf_level_6 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_6 +# HELP node_zfs_dbuf_hash_dbuf_level_6 kstat.zfs.misc.dbufstats.hash_dbuf_level_6 # TYPE node_zfs_dbuf_hash_dbuf_level_6 untyped node_zfs_dbuf_hash_dbuf_level_6 0 -# HELP node_zfs_dbuf_hash_dbuf_level_6_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_6_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_6_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_6_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_6_bytes untyped node_zfs_dbuf_hash_dbuf_level_6_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_7 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_7 +# HELP node_zfs_dbuf_hash_dbuf_level_7 kstat.zfs.misc.dbufstats.hash_dbuf_level_7 # TYPE node_zfs_dbuf_hash_dbuf_level_7 untyped node_zfs_dbuf_hash_dbuf_level_7 0 -# HELP node_zfs_dbuf_hash_dbuf_level_7_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_7_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_7_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_7_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_7_bytes untyped node_zfs_dbuf_hash_dbuf_level_7_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_8 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_8 +# HELP node_zfs_dbuf_hash_dbuf_level_8 kstat.zfs.misc.dbufstats.hash_dbuf_level_8 # TYPE node_zfs_dbuf_hash_dbuf_level_8 untyped node_zfs_dbuf_hash_dbuf_level_8 0 -# HELP node_zfs_dbuf_hash_dbuf_level_8_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_8_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_8_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_8_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_8_bytes untyped node_zfs_dbuf_hash_dbuf_level_8_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_9 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_9 +# HELP node_zfs_dbuf_hash_dbuf_level_9 kstat.zfs.misc.dbufstats.hash_dbuf_level_9 # TYPE node_zfs_dbuf_hash_dbuf_level_9 untyped node_zfs_dbuf_hash_dbuf_level_9 0 -# HELP node_zfs_dbuf_hash_dbuf_level_9_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_9_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_9_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_9_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_9_bytes untyped node_zfs_dbuf_hash_dbuf_level_9_bytes 0 -# HELP node_zfs_dbuf_hash_elements kstat.zfs.misc.dbuf_stats.hash_elements +# HELP node_zfs_dbuf_hash_elements kstat.zfs.misc.dbufstats.hash_elements # TYPE node_zfs_dbuf_hash_elements untyped node_zfs_dbuf_hash_elements 55 -# HELP node_zfs_dbuf_hash_elements_max kstat.zfs.misc.dbuf_stats.hash_elements_max +# HELP node_zfs_dbuf_hash_elements_max kstat.zfs.misc.dbufstats.hash_elements_max # TYPE node_zfs_dbuf_hash_elements_max untyped node_zfs_dbuf_hash_elements_max 55 -# HELP node_zfs_dbuf_hash_hits kstat.zfs.misc.dbuf_stats.hash_hits +# HELP node_zfs_dbuf_hash_hits kstat.zfs.misc.dbufstats.hash_hits # TYPE node_zfs_dbuf_hash_hits untyped node_zfs_dbuf_hash_hits 108807 -# HELP node_zfs_dbuf_hash_insert_race kstat.zfs.misc.dbuf_stats.hash_insert_race +# HELP node_zfs_dbuf_hash_insert_race kstat.zfs.misc.dbufstats.hash_insert_race # TYPE node_zfs_dbuf_hash_insert_race untyped node_zfs_dbuf_hash_insert_race 0 -# HELP node_zfs_dbuf_hash_misses kstat.zfs.misc.dbuf_stats.hash_misses +# HELP node_zfs_dbuf_hash_misses kstat.zfs.misc.dbufstats.hash_misses # TYPE node_zfs_dbuf_hash_misses untyped node_zfs_dbuf_hash_misses 1851 # HELP node_zfs_dmu_tx_dmu_tx_assigned kstat.zfs.misc.dmu_tx.dmu_tx_assigned @@ -3591,90 +4422,319 @@ node_zfs_zil_zil_itx_needcopy_count 0 # TYPE node_zfs_zpool_dataset_nread untyped node_zfs_zpool_dataset_nread{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nread{dataset="pool1/dataset1",zpool="pool1"} 28 +node_zfs_zpool_dataset_nread{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_nread{dataset="pool3/dataset with space",zpool="pool3"} 28 node_zfs_zpool_dataset_nread{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nread{dataset="poolz1/dataset1",zpool="poolz1"} 28 # HELP node_zfs_zpool_dataset_nunlinked kstat.zfs.misc.objset.nunlinked # TYPE node_zfs_zpool_dataset_nunlinked untyped node_zfs_zpool_dataset_nunlinked{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nunlinked{dataset="pool1/dataset1",zpool="pool1"} 3 +node_zfs_zpool_dataset_nunlinked{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_nunlinked{dataset="pool3/dataset with space",zpool="pool3"} 3 node_zfs_zpool_dataset_nunlinked{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nunlinked{dataset="poolz1/dataset1",zpool="poolz1"} 14 # HELP node_zfs_zpool_dataset_nunlinks kstat.zfs.misc.objset.nunlinks # TYPE node_zfs_zpool_dataset_nunlinks untyped node_zfs_zpool_dataset_nunlinks{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nunlinks{dataset="pool1/dataset1",zpool="pool1"} 3 +node_zfs_zpool_dataset_nunlinks{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_nunlinks{dataset="pool3/dataset with space",zpool="pool3"} 3 node_zfs_zpool_dataset_nunlinks{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nunlinks{dataset="poolz1/dataset1",zpool="poolz1"} 14 # HELP node_zfs_zpool_dataset_nwritten kstat.zfs.misc.objset.nwritten # TYPE node_zfs_zpool_dataset_nwritten untyped node_zfs_zpool_dataset_nwritten{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nwritten{dataset="pool1/dataset1",zpool="pool1"} 12302 +node_zfs_zpool_dataset_nwritten{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_nwritten{dataset="pool3/dataset with space",zpool="pool3"} 12302 node_zfs_zpool_dataset_nwritten{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nwritten{dataset="poolz1/dataset1",zpool="poolz1"} 32806 # HELP node_zfs_zpool_dataset_reads kstat.zfs.misc.objset.reads # TYPE node_zfs_zpool_dataset_reads untyped node_zfs_zpool_dataset_reads{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_reads{dataset="pool1/dataset1",zpool="pool1"} 2 +node_zfs_zpool_dataset_reads{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_reads{dataset="pool3/dataset with space",zpool="pool3"} 2 node_zfs_zpool_dataset_reads{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_reads{dataset="poolz1/dataset1",zpool="poolz1"} 2 # HELP node_zfs_zpool_dataset_writes kstat.zfs.misc.objset.writes # TYPE node_zfs_zpool_dataset_writes untyped node_zfs_zpool_dataset_writes{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_writes{dataset="pool1/dataset1",zpool="pool1"} 4 +node_zfs_zpool_dataset_writes{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_writes{dataset="pool3/dataset with space",zpool="pool3"} 4 node_zfs_zpool_dataset_writes{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_writes{dataset="poolz1/dataset1",zpool="poolz1"} 10 # HELP node_zfs_zpool_nread kstat.zfs.misc.io.nread # TYPE node_zfs_zpool_nread untyped node_zfs_zpool_nread{zpool="pool1"} 1.88416e+06 +node_zfs_zpool_nread{zpool="pool3"} 1.88416e+06 node_zfs_zpool_nread{zpool="poolz1"} 2.82624e+06 # HELP node_zfs_zpool_nwritten kstat.zfs.misc.io.nwritten # TYPE node_zfs_zpool_nwritten untyped node_zfs_zpool_nwritten{zpool="pool1"} 3.206144e+06 +node_zfs_zpool_nwritten{zpool="pool3"} 3.206144e+06 node_zfs_zpool_nwritten{zpool="poolz1"} 2.680501248e+09 # HELP node_zfs_zpool_rcnt kstat.zfs.misc.io.rcnt # TYPE node_zfs_zpool_rcnt untyped node_zfs_zpool_rcnt{zpool="pool1"} 0 +node_zfs_zpool_rcnt{zpool="pool3"} 0 node_zfs_zpool_rcnt{zpool="poolz1"} 0 # HELP node_zfs_zpool_reads kstat.zfs.misc.io.reads # TYPE node_zfs_zpool_reads untyped node_zfs_zpool_reads{zpool="pool1"} 22 +node_zfs_zpool_reads{zpool="pool3"} 22 node_zfs_zpool_reads{zpool="poolz1"} 33 # HELP node_zfs_zpool_rlentime kstat.zfs.misc.io.rlentime # TYPE node_zfs_zpool_rlentime untyped node_zfs_zpool_rlentime{zpool="pool1"} 1.04112268e+08 +node_zfs_zpool_rlentime{zpool="pool3"} 1.04112268e+08 node_zfs_zpool_rlentime{zpool="poolz1"} 6.472105124093e+12 # HELP node_zfs_zpool_rtime kstat.zfs.misc.io.rtime # TYPE node_zfs_zpool_rtime untyped node_zfs_zpool_rtime{zpool="pool1"} 2.4168078e+07 +node_zfs_zpool_rtime{zpool="pool3"} 2.4168078e+07 node_zfs_zpool_rtime{zpool="poolz1"} 9.82909164e+09 # HELP node_zfs_zpool_rupdate kstat.zfs.misc.io.rupdate # TYPE node_zfs_zpool_rupdate untyped node_zfs_zpool_rupdate{zpool="pool1"} 7.921048984922e+13 +node_zfs_zpool_rupdate{zpool="pool3"} 7.921048984922e+13 node_zfs_zpool_rupdate{zpool="poolz1"} 1.10734831944501e+14 +# HELP node_zfs_zpool_state kstat.zfs.misc.state +# TYPE node_zfs_zpool_state gauge +node_zfs_zpool_state{state="degraded",zpool="pool1"} 0 +node_zfs_zpool_state{state="degraded",zpool="pool2"} 0 +node_zfs_zpool_state{state="degraded",zpool="pool3"} 0 +node_zfs_zpool_state{state="degraded",zpool="poolz1"} 1 +node_zfs_zpool_state{state="faulted",zpool="pool1"} 0 +node_zfs_zpool_state{state="faulted",zpool="pool2"} 0 +node_zfs_zpool_state{state="faulted",zpool="pool3"} 0 +node_zfs_zpool_state{state="faulted",zpool="poolz1"} 0 +node_zfs_zpool_state{state="offline",zpool="pool1"} 0 +node_zfs_zpool_state{state="offline",zpool="pool2"} 0 +node_zfs_zpool_state{state="offline",zpool="pool3"} 0 +node_zfs_zpool_state{state="offline",zpool="poolz1"} 0 +node_zfs_zpool_state{state="online",zpool="pool1"} 1 +node_zfs_zpool_state{state="online",zpool="pool2"} 0 +node_zfs_zpool_state{state="online",zpool="pool3"} 1 +node_zfs_zpool_state{state="online",zpool="poolz1"} 0 +node_zfs_zpool_state{state="removed",zpool="pool1"} 0 +node_zfs_zpool_state{state="removed",zpool="pool2"} 0 +node_zfs_zpool_state{state="removed",zpool="pool3"} 0 +node_zfs_zpool_state{state="removed",zpool="poolz1"} 0 +node_zfs_zpool_state{state="suspended",zpool="pool1"} 0 +node_zfs_zpool_state{state="suspended",zpool="pool2"} 1 +node_zfs_zpool_state{state="suspended",zpool="pool3"} 0 +node_zfs_zpool_state{state="suspended",zpool="poolz1"} 0 +node_zfs_zpool_state{state="unavail",zpool="pool1"} 0 +node_zfs_zpool_state{state="unavail",zpool="pool2"} 0 +node_zfs_zpool_state{state="unavail",zpool="pool3"} 0 +node_zfs_zpool_state{state="unavail",zpool="poolz1"} 0 # HELP node_zfs_zpool_wcnt kstat.zfs.misc.io.wcnt # TYPE node_zfs_zpool_wcnt untyped node_zfs_zpool_wcnt{zpool="pool1"} 0 +node_zfs_zpool_wcnt{zpool="pool3"} 0 node_zfs_zpool_wcnt{zpool="poolz1"} 0 # HELP node_zfs_zpool_wlentime kstat.zfs.misc.io.wlentime # TYPE node_zfs_zpool_wlentime untyped node_zfs_zpool_wlentime{zpool="pool1"} 1.04112268e+08 +node_zfs_zpool_wlentime{zpool="pool3"} 1.04112268e+08 node_zfs_zpool_wlentime{zpool="poolz1"} 6.472105124093e+12 # HELP node_zfs_zpool_writes kstat.zfs.misc.io.writes # TYPE node_zfs_zpool_writes untyped node_zfs_zpool_writes{zpool="pool1"} 132 +node_zfs_zpool_writes{zpool="pool3"} 132 node_zfs_zpool_writes{zpool="poolz1"} 25294 # HELP node_zfs_zpool_wtime kstat.zfs.misc.io.wtime # TYPE node_zfs_zpool_wtime untyped node_zfs_zpool_wtime{zpool="pool1"} 7.155162e+06 +node_zfs_zpool_wtime{zpool="pool3"} 7.155162e+06 node_zfs_zpool_wtime{zpool="poolz1"} 9.673715628e+09 # HELP node_zfs_zpool_wupdate kstat.zfs.misc.io.wupdate # TYPE node_zfs_zpool_wupdate untyped node_zfs_zpool_wupdate{zpool="pool1"} 7.9210489694949e+13 +node_zfs_zpool_wupdate{zpool="pool3"} 7.9210489694949e+13 node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14 +# HELP node_zoneinfo_high_pages Zone watermark pages_high +# TYPE node_zoneinfo_high_pages gauge +node_zoneinfo_high_pages{node="0",zone="DMA"} 14 +node_zoneinfo_high_pages{node="0",zone="DMA32"} 2122 +node_zoneinfo_high_pages{node="0",zone="Device"} 0 +node_zoneinfo_high_pages{node="0",zone="Movable"} 0 +node_zoneinfo_high_pages{node="0",zone="Normal"} 31113 +# HELP node_zoneinfo_low_pages Zone watermark pages_low +# TYPE node_zoneinfo_low_pages gauge +node_zoneinfo_low_pages{node="0",zone="DMA"} 11 +node_zoneinfo_low_pages{node="0",zone="DMA32"} 1600 +node_zoneinfo_low_pages{node="0",zone="Device"} 0 +node_zoneinfo_low_pages{node="0",zone="Movable"} 0 +node_zoneinfo_low_pages{node="0",zone="Normal"} 23461 +# HELP node_zoneinfo_managed_pages Present pages managed by the buddy system +# TYPE node_zoneinfo_managed_pages gauge +node_zoneinfo_managed_pages{node="0",zone="DMA"} 3973 +node_zoneinfo_managed_pages{node="0",zone="DMA32"} 530339 +node_zoneinfo_managed_pages{node="0",zone="Device"} 0 +node_zoneinfo_managed_pages{node="0",zone="Movable"} 0 +node_zoneinfo_managed_pages{node="0",zone="Normal"} 7.654794e+06 +# HELP node_zoneinfo_min_pages Zone watermark pages_min +# TYPE node_zoneinfo_min_pages gauge +node_zoneinfo_min_pages{node="0",zone="DMA"} 8 +node_zoneinfo_min_pages{node="0",zone="DMA32"} 1078 +node_zoneinfo_min_pages{node="0",zone="Device"} 0 +node_zoneinfo_min_pages{node="0",zone="Movable"} 0 +node_zoneinfo_min_pages{node="0",zone="Normal"} 15809 +# HELP node_zoneinfo_nr_active_anon_pages Number of anonymous pages recently more used +# TYPE node_zoneinfo_nr_active_anon_pages gauge +node_zoneinfo_nr_active_anon_pages{node="0",zone="DMA"} 1.175853e+06 +# HELP node_zoneinfo_nr_active_file_pages Number of active pages with file-backing +# TYPE node_zoneinfo_nr_active_file_pages gauge +node_zoneinfo_nr_active_file_pages{node="0",zone="DMA"} 688810 +# HELP node_zoneinfo_nr_anon_pages Number of anonymous pages currently used by the system +# TYPE node_zoneinfo_nr_anon_pages gauge +node_zoneinfo_nr_anon_pages{node="0",zone="DMA"} 1.156608e+06 +# HELP node_zoneinfo_nr_anon_transparent_hugepages Number of anonymous transparent huge pages currently used by the system +# TYPE node_zoneinfo_nr_anon_transparent_hugepages gauge +node_zoneinfo_nr_anon_transparent_hugepages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_dirtied_total Page dirtyings since bootup +# TYPE node_zoneinfo_nr_dirtied_total counter +node_zoneinfo_nr_dirtied_total{node="0",zone="DMA"} 1.189097e+06 +# HELP node_zoneinfo_nr_dirty_pages Number of dirty pages +# TYPE node_zoneinfo_nr_dirty_pages gauge +node_zoneinfo_nr_dirty_pages{node="0",zone="DMA"} 103 +# HELP node_zoneinfo_nr_file_pages Number of file pages +# TYPE node_zoneinfo_nr_file_pages gauge +node_zoneinfo_nr_file_pages{node="0",zone="DMA"} 1.740118e+06 +# HELP node_zoneinfo_nr_free_pages Total number of free pages in the zone +# TYPE node_zoneinfo_nr_free_pages gauge +node_zoneinfo_nr_free_pages{node="0",zone="DMA"} 2949 +node_zoneinfo_nr_free_pages{node="0",zone="DMA32"} 528427 +node_zoneinfo_nr_free_pages{node="0",zone="Normal"} 4.539739e+06 +# HELP node_zoneinfo_nr_inactive_anon_pages Number of anonymous pages recently less used +# TYPE node_zoneinfo_nr_inactive_anon_pages gauge +node_zoneinfo_nr_inactive_anon_pages{node="0",zone="DMA"} 95612 +# HELP node_zoneinfo_nr_inactive_file_pages Number of inactive pages with file-backing +# TYPE node_zoneinfo_nr_inactive_file_pages gauge +node_zoneinfo_nr_inactive_file_pages{node="0",zone="DMA"} 723339 +# HELP node_zoneinfo_nr_isolated_anon_pages Temporary isolated pages from anon lru +# TYPE node_zoneinfo_nr_isolated_anon_pages gauge +node_zoneinfo_nr_isolated_anon_pages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_isolated_file_pages Temporary isolated pages from file lru +# TYPE node_zoneinfo_nr_isolated_file_pages gauge +node_zoneinfo_nr_isolated_file_pages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_kernel_stacks Number of kernel stacks +# TYPE node_zoneinfo_nr_kernel_stacks gauge +node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA"} 0 +node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA32"} 0 +node_zoneinfo_nr_kernel_stacks{node="0",zone="Normal"} 18864 +# HELP node_zoneinfo_nr_mapped_pages Number of mapped pages +# TYPE node_zoneinfo_nr_mapped_pages gauge +node_zoneinfo_nr_mapped_pages{node="0",zone="DMA"} 423143 +# HELP node_zoneinfo_nr_shmem_pages Number of shmem pages (included tmpfs/GEM pages) +# TYPE node_zoneinfo_nr_shmem_pages gauge +node_zoneinfo_nr_shmem_pages{node="0",zone="DMA"} 330517 +# HELP node_zoneinfo_nr_slab_reclaimable_pages Number of reclaimable slab pages +# TYPE node_zoneinfo_nr_slab_reclaimable_pages gauge +node_zoneinfo_nr_slab_reclaimable_pages{node="0",zone="DMA"} 121763 +# HELP node_zoneinfo_nr_slab_unreclaimable_pages Number of unreclaimable slab pages +# TYPE node_zoneinfo_nr_slab_unreclaimable_pages gauge +node_zoneinfo_nr_slab_unreclaimable_pages{node="0",zone="DMA"} 56182 +# HELP node_zoneinfo_nr_unevictable_pages Number of unevictable pages +# TYPE node_zoneinfo_nr_unevictable_pages gauge +node_zoneinfo_nr_unevictable_pages{node="0",zone="DMA"} 213111 +# HELP node_zoneinfo_nr_writeback_pages Number of writeback pages +# TYPE node_zoneinfo_nr_writeback_pages gauge +node_zoneinfo_nr_writeback_pages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_written_total Page writings since bootup +# TYPE node_zoneinfo_nr_written_total counter +node_zoneinfo_nr_written_total{node="0",zone="DMA"} 1.181554e+06 +# HELP node_zoneinfo_numa_foreign_total Was intended here, hit elsewhere +# TYPE node_zoneinfo_numa_foreign_total counter +node_zoneinfo_numa_foreign_total{node="0",zone="DMA"} 0 +node_zoneinfo_numa_foreign_total{node="0",zone="DMA32"} 0 +node_zoneinfo_numa_foreign_total{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_numa_hit_total Allocated in intended node +# TYPE node_zoneinfo_numa_hit_total counter +node_zoneinfo_numa_hit_total{node="0",zone="DMA"} 1 +node_zoneinfo_numa_hit_total{node="0",zone="DMA32"} 13 +node_zoneinfo_numa_hit_total{node="0",zone="Normal"} 6.2836441e+07 +# HELP node_zoneinfo_numa_interleave_total Interleaver preferred this zone +# TYPE node_zoneinfo_numa_interleave_total counter +node_zoneinfo_numa_interleave_total{node="0",zone="DMA"} 1 +node_zoneinfo_numa_interleave_total{node="0",zone="DMA32"} 1 +node_zoneinfo_numa_interleave_total{node="0",zone="Normal"} 23174 +# HELP node_zoneinfo_numa_local_total Allocation from local node +# TYPE node_zoneinfo_numa_local_total counter +node_zoneinfo_numa_local_total{node="0",zone="DMA"} 1 +node_zoneinfo_numa_local_total{node="0",zone="DMA32"} 13 +node_zoneinfo_numa_local_total{node="0",zone="Normal"} 6.2836441e+07 +# HELP node_zoneinfo_numa_miss_total Allocated in non intended node +# TYPE node_zoneinfo_numa_miss_total counter +node_zoneinfo_numa_miss_total{node="0",zone="DMA"} 0 +node_zoneinfo_numa_miss_total{node="0",zone="DMA32"} 0 +node_zoneinfo_numa_miss_total{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_numa_other_total Allocation from other node +# TYPE node_zoneinfo_numa_other_total counter +node_zoneinfo_numa_other_total{node="0",zone="DMA"} 0 +node_zoneinfo_numa_other_total{node="0",zone="DMA32"} 0 +node_zoneinfo_numa_other_total{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_present_pages Physical pages existing within the zone +# TYPE node_zoneinfo_present_pages gauge +node_zoneinfo_present_pages{node="0",zone="DMA"} 3997 +node_zoneinfo_present_pages{node="0",zone="DMA32"} 546847 +node_zoneinfo_present_pages{node="0",zone="Device"} 0 +node_zoneinfo_present_pages{node="0",zone="Movable"} 0 +node_zoneinfo_present_pages{node="0",zone="Normal"} 7.806976e+06 +# HELP node_zoneinfo_protection_0 Protection array 0. field +# TYPE node_zoneinfo_protection_0 gauge +node_zoneinfo_protection_0{node="0",zone="DMA"} 0 +node_zoneinfo_protection_0{node="0",zone="DMA32"} 0 +node_zoneinfo_protection_0{node="0",zone="Device"} 0 +node_zoneinfo_protection_0{node="0",zone="Movable"} 0 +node_zoneinfo_protection_0{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_1 Protection array 1. field +# TYPE node_zoneinfo_protection_1 gauge +node_zoneinfo_protection_1{node="0",zone="DMA"} 2039 +node_zoneinfo_protection_1{node="0",zone="DMA32"} 0 +node_zoneinfo_protection_1{node="0",zone="Device"} 0 +node_zoneinfo_protection_1{node="0",zone="Movable"} 0 +node_zoneinfo_protection_1{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_2 Protection array 2. field +# TYPE node_zoneinfo_protection_2 gauge +node_zoneinfo_protection_2{node="0",zone="DMA"} 31932 +node_zoneinfo_protection_2{node="0",zone="DMA32"} 29893 +node_zoneinfo_protection_2{node="0",zone="Device"} 0 +node_zoneinfo_protection_2{node="0",zone="Movable"} 0 +node_zoneinfo_protection_2{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_3 Protection array 3. field +# TYPE node_zoneinfo_protection_3 gauge +node_zoneinfo_protection_3{node="0",zone="DMA"} 31932 +node_zoneinfo_protection_3{node="0",zone="DMA32"} 29893 +node_zoneinfo_protection_3{node="0",zone="Device"} 0 +node_zoneinfo_protection_3{node="0",zone="Movable"} 0 +node_zoneinfo_protection_3{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_4 Protection array 4. field +# TYPE node_zoneinfo_protection_4 gauge +node_zoneinfo_protection_4{node="0",zone="DMA"} 31932 +node_zoneinfo_protection_4{node="0",zone="DMA32"} 29893 +node_zoneinfo_protection_4{node="0",zone="Device"} 0 +node_zoneinfo_protection_4{node="0",zone="Movable"} 0 +node_zoneinfo_protection_4{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_spanned_pages Total pages spanned by the zone, including holes +# TYPE node_zoneinfo_spanned_pages gauge +node_zoneinfo_spanned_pages{node="0",zone="DMA"} 4095 +node_zoneinfo_spanned_pages{node="0",zone="DMA32"} 1.04448e+06 +node_zoneinfo_spanned_pages{node="0",zone="Device"} 0 +node_zoneinfo_spanned_pages{node="0",zone="Movable"} 0 +node_zoneinfo_spanned_pages{node="0",zone="Normal"} 7.806976e+06 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge +# HELP process_network_receive_bytes_total Number of bytes received by the process over the network. +# TYPE process_network_receive_bytes_total counter +# HELP process_network_transmit_bytes_total Number of bytes sent by the process over the network. +# TYPE process_network_transmit_bytes_total counter # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge # HELP process_resident_memory_bytes Resident memory size in bytes. diff --git a/collector/fixtures/e2e-output-darwin.txt b/collector/fixtures/e2e-output-darwin.txt new file mode 100644 index 0000000000..cf29fb473c --- /dev/null +++ b/collector/fixtures/e2e-output-darwin.txt @@ -0,0 +1,295 @@ +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. +# TYPE go_gc_duration_seconds summary +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent. +# TYPE go_gc_gogc_percent gauge +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes. +# TYPE go_gc_gomemlimit_bytes gauge +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_alloc_bytes gauge +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. +# TYPE go_memstats_alloc_bytes_total counter +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. +# TYPE go_memstats_buck_hash_sys_bytes gauge +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_frees_total counter +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. +# TYPE go_memstats_gc_sys_bytes gauge +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_heap_alloc_bytes gauge +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_idle_bytes gauge +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes +# TYPE go_memstats_heap_inuse_bytes gauge +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. +# TYPE go_memstats_heap_objects gauge +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. +# TYPE go_memstats_heap_released_bytes gauge +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_sys_bytes gauge +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_mallocs_total counter +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. +# TYPE go_memstats_mcache_inuse_bytes gauge +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. +# TYPE go_memstats_mcache_sys_bytes gauge +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. +# TYPE go_memstats_mspan_inuse_bytes gauge +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. +# TYPE go_memstats_mspan_sys_bytes gauge +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. +# TYPE go_memstats_next_gc_bytes gauge +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. +# TYPE go_memstats_other_sys_bytes gauge +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. +# TYPE go_memstats_stack_inuse_bytes gauge +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. +# TYPE go_memstats_stack_sys_bytes gauge +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. +# TYPE go_memstats_sys_bytes gauge +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads. +# TYPE go_sched_gomaxprocs_threads gauge +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +# HELP node_buddyinfo_blocks Count of free blocks according to size. +# TYPE node_buddyinfo_blocks gauge +node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759 +node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381 +node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572 +node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093 +node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3 +node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791 +node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185 +node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475 +node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530 +node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2 +node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194 +node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567 +node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45 +node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102 +node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12 +node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4 +node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 +# HELP node_disk_read_errors_total The total number of read errors. +# TYPE node_disk_read_errors_total counter +node_disk_read_errors_total{device="disk0"} 0 +node_disk_read_errors_total{device="disk4"} 0 +# HELP node_disk_read_retries_total The total number of read retries. +# TYPE node_disk_read_retries_total counter +node_disk_read_retries_total{device="disk0"} 0 +node_disk_read_retries_total{device="disk4"} 0 +# HELP node_disk_write_errors_total The total number of write errors. +# TYPE node_disk_write_errors_total counter +node_disk_write_errors_total{device="disk0"} 0 +node_disk_write_errors_total{device="disk4"} 0 +# HELP node_disk_write_retries_total The total number of write retries. +# TYPE node_disk_write_retries_total counter +node_disk_write_retries_total{device="disk0"} 0 +node_disk_write_retries_total{device="disk4"} 0 +# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. +# TYPE node_exporter_build_info gauge +# HELP node_memory_swap_total_bytes Memory information field swap_total_bytes. +# TYPE node_memory_swap_total_bytes gauge +node_memory_swap_total_bytes 0 +# HELP node_memory_swap_used_bytes Memory information field swap_used_bytes. +# TYPE node_memory_swap_used_bytes gauge +node_memory_swap_used_bytes 0 +# HELP node_memory_total_bytes Memory information field total_bytes. +# TYPE node_memory_total_bytes gauge +node_memory_total_bytes 7.516192768e+09 +# HELP node_network_noproto_total Network device statistic noproto. +# TYPE node_network_noproto_total counter +node_network_noproto_total{device="lo0"} 0 +# HELP node_network_receive_drop_total Network device statistic receive_drop. +# TYPE node_network_receive_drop_total counter +node_network_receive_drop_total{device="lo0"} 0 +# HELP node_network_receive_errs_total Network device statistic receive_errs. +# TYPE node_network_receive_errs_total counter +node_network_receive_errs_total{device="lo0"} 0 +# HELP node_network_receive_packets_total Network device statistic receive_packets. +# TYPE node_network_receive_packets_total counter +# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. +# TYPE node_network_transmit_bytes_total counter +# HELP node_network_transmit_colls_total Network device statistic transmit_colls. +# TYPE node_network_transmit_colls_total counter +node_network_transmit_colls_total{device="lo0"} 0 +# HELP node_network_transmit_errs_total Network device statistic transmit_errs. +# TYPE node_network_transmit_errs_total counter +node_network_transmit_errs_total{device="lo0"} 0 +# HELP node_network_transmit_packets_total Network device statistic transmit_packets. +# TYPE node_network_transmit_packets_total counter +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. +# TYPE node_scrape_collector_duration_seconds gauge +# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. +# TYPE node_scrape_collector_success gauge +node_scrape_collector_success{collector="boottime"} 1 +node_scrape_collector_success{collector="buddyinfo"} 1 +node_scrape_collector_success{collector="cpu"} 1 +node_scrape_collector_success{collector="diskstats"} 1 +node_scrape_collector_success{collector="loadavg"} 1 +node_scrape_collector_success{collector="meminfo"} 1 +node_scrape_collector_success{collector="netdev"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="powersupplyclass"} 1 +node_scrape_collector_success{collector="textfile"} 1 +node_scrape_collector_success{collector="thermal"} 0 +node_scrape_collector_success{collector="time"} 1 +node_scrape_collector_success{collector="xfrm"} 1 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP node_time_seconds System time in seconds since epoch (1970). +# TYPE node_time_seconds gauge +# HELP node_time_zone_offset_seconds System time zone offset in seconds. +# TYPE node_time_zone_offset_seconds gauge +# HELP node_xfrm_acquire_error_packets_total State hasn’t been fully acquired before use +# TYPE node_xfrm_acquire_error_packets_total counter +node_xfrm_acquire_error_packets_total 24532 +# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed +# TYPE node_xfrm_fwd_hdr_error_packets_total counter +node_xfrm_fwd_hdr_error_packets_total 6654 +# HELP node_xfrm_in_buffer_error_packets_total No buffer is left +# TYPE node_xfrm_in_buffer_error_packets_total counter +node_xfrm_in_buffer_error_packets_total 2 +# HELP node_xfrm_in_error_packets_total All errors not matched by other +# TYPE node_xfrm_in_error_packets_total counter +node_xfrm_in_error_packets_total 1 +# HELP node_xfrm_in_hdr_error_packets_total Header error +# TYPE node_xfrm_in_hdr_error_packets_total counter +node_xfrm_in_hdr_error_packets_total 4 +# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found +# TYPE node_xfrm_in_no_pols_packets_total counter +node_xfrm_in_no_pols_packets_total 65432 +# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong +# TYPE node_xfrm_in_no_states_packets_total counter +node_xfrm_in_no_states_packets_total 3 +# HELP node_xfrm_in_pol_block_packets_total Policy discards +# TYPE node_xfrm_in_pol_block_packets_total counter +node_xfrm_in_pol_block_packets_total 100 +# HELP node_xfrm_in_pol_error_packets_total Policy error +# TYPE node_xfrm_in_pol_error_packets_total counter +node_xfrm_in_pol_error_packets_total 10000 +# HELP node_xfrm_in_state_expired_packets_total State is expired +# TYPE node_xfrm_in_state_expired_packets_total counter +node_xfrm_in_state_expired_packets_total 7 +# HELP node_xfrm_in_state_invalid_packets_total State is invalid +# TYPE node_xfrm_in_state_invalid_packets_total counter +node_xfrm_in_state_invalid_packets_total 55555 +# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch +# TYPE node_xfrm_in_state_mismatch_packets_total counter +node_xfrm_in_state_mismatch_packets_total 23451 +# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_in_state_mode_error_packets_total counter +node_xfrm_in_state_mode_error_packets_total 100 +# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong +# TYPE node_xfrm_in_state_proto_error_packets_total counter +node_xfrm_in_state_proto_error_packets_total 40 +# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window +# TYPE node_xfrm_in_state_seq_error_packets_total counter +node_xfrm_in_state_seq_error_packets_total 6000 +# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong +# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter +node_xfrm_in_tmpl_mismatch_packets_total 51 +# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error +# TYPE node_xfrm_out_bundle_check_error_packets_total counter +node_xfrm_out_bundle_check_error_packets_total 555 +# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error +# TYPE node_xfrm_out_bundle_gen_error_packets_total counter +node_xfrm_out_bundle_gen_error_packets_total 43321 +# HELP node_xfrm_out_error_packets_total All errors which is not matched others +# TYPE node_xfrm_out_error_packets_total counter +node_xfrm_out_error_packets_total 1e+06 +# HELP node_xfrm_out_no_states_packets_total No state is found +# TYPE node_xfrm_out_no_states_packets_total counter +node_xfrm_out_no_states_packets_total 869 +# HELP node_xfrm_out_pol_block_packets_total Policy discards +# TYPE node_xfrm_out_pol_block_packets_total counter +node_xfrm_out_pol_block_packets_total 43456 +# HELP node_xfrm_out_pol_dead_packets_total Policy is dead +# TYPE node_xfrm_out_pol_dead_packets_total counter +node_xfrm_out_pol_dead_packets_total 7656 +# HELP node_xfrm_out_pol_error_packets_total Policy error +# TYPE node_xfrm_out_pol_error_packets_total counter +node_xfrm_out_pol_error_packets_total 1454 +# HELP node_xfrm_out_state_expired_packets_total State is expired +# TYPE node_xfrm_out_state_expired_packets_total counter +node_xfrm_out_state_expired_packets_total 565 +# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired +# TYPE node_xfrm_out_state_invalid_packets_total counter +node_xfrm_out_state_invalid_packets_total 28765 +# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_out_state_mode_error_packets_total counter +node_xfrm_out_state_mode_error_packets_total 8 +# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error +# TYPE node_xfrm_out_state_proto_error_packets_total counter +node_xfrm_out_state_proto_error_packets_total 4542 +# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow +# TYPE node_xfrm_out_state_seq_error_packets_total counter +node_xfrm_out_state_seq_error_packets_total 543 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors_total counter +promhttp_metric_handler_errors_total{cause="encoding"} 0 +promhttp_metric_handler_errors_total{cause="gathering"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 0 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_1 untyped +testmetric1_1{foo="bar"} 10 +# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_2 untyped +testmetric1_2{foo="baz"} 20 +# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_1 untyped +testmetric2_1{foo="bar"} 30 +# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_2 untyped +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/e2e-output-dragonfly.txt b/collector/fixtures/e2e-output-dragonfly.txt new file mode 100644 index 0000000000..638d0afcac --- /dev/null +++ b/collector/fixtures/e2e-output-dragonfly.txt @@ -0,0 +1,251 @@ +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. +# TYPE go_gc_duration_seconds summary +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent. +# TYPE go_gc_gogc_percent gauge +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes. +# TYPE go_gc_gomemlimit_bytes gauge +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_alloc_bytes gauge +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. +# TYPE go_memstats_alloc_bytes_total counter +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. +# TYPE go_memstats_buck_hash_sys_bytes gauge +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_frees_total counter +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. +# TYPE go_memstats_gc_sys_bytes gauge +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_heap_alloc_bytes gauge +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_idle_bytes gauge +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes +# TYPE go_memstats_heap_inuse_bytes gauge +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. +# TYPE go_memstats_heap_objects gauge +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. +# TYPE go_memstats_heap_released_bytes gauge +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_sys_bytes gauge +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_mallocs_total counter +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. +# TYPE go_memstats_mcache_inuse_bytes gauge +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. +# TYPE go_memstats_mcache_sys_bytes gauge +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. +# TYPE go_memstats_mspan_inuse_bytes gauge +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. +# TYPE go_memstats_mspan_sys_bytes gauge +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. +# TYPE go_memstats_next_gc_bytes gauge +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. +# TYPE go_memstats_other_sys_bytes gauge +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. +# TYPE go_memstats_stack_inuse_bytes gauge +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. +# TYPE go_memstats_stack_sys_bytes gauge +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. +# TYPE go_memstats_sys_bytes gauge +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads. +# TYPE go_sched_gomaxprocs_threads gauge +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +# HELP node_buddyinfo_blocks Count of free blocks according to size. +# TYPE node_buddyinfo_blocks gauge +node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759 +node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381 +node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572 +node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093 +node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3 +node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791 +node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185 +node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475 +node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530 +node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2 +node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194 +node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567 +node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45 +node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102 +node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12 +node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4 +node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 +# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. +# TYPE node_exporter_build_info gauge +# HELP node_network_receive_drop_total Network device statistic receive_drop. +# TYPE node_network_receive_drop_total counter +node_network_receive_drop_total{device="lo0"} 0 +# HELP node_network_receive_errs_total Network device statistic receive_errs. +# TYPE node_network_receive_errs_total counter +node_network_receive_errs_total{device="lo0"} 0 +# HELP node_network_receive_packets_total Network device statistic receive_packets. +# TYPE node_network_receive_packets_total counter +# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. +# TYPE node_network_transmit_bytes_total counter +# HELP node_network_transmit_drop_total Network device statistic transmit_drop. +# TYPE node_network_transmit_drop_total counter +node_network_transmit_drop_total{device="lo0"} 0 +# HELP node_network_transmit_errs_total Network device statistic transmit_errs. +# TYPE node_network_transmit_errs_total counter +node_network_transmit_errs_total{device="lo0"} 0 +# HELP node_network_transmit_packets_total Network device statistic transmit_packets. +# TYPE node_network_transmit_packets_total counter +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. +# TYPE node_scrape_collector_duration_seconds gauge +# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. +# TYPE node_scrape_collector_success gauge +node_scrape_collector_success{collector="boottime"} 1 +node_scrape_collector_success{collector="buddyinfo"} 1 +node_scrape_collector_success{collector="cpu"} 1 +node_scrape_collector_success{collector="exec"} 1 +node_scrape_collector_success{collector="loadavg"} 1 +node_scrape_collector_success{collector="meminfo"} 0 +node_scrape_collector_success{collector="netdev"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="textfile"} 1 +node_scrape_collector_success{collector="time"} 1 +node_scrape_collector_success{collector="xfrm"} 1 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP node_time_seconds System time in seconds since epoch (1970). +# TYPE node_time_seconds gauge +# HELP node_time_zone_offset_seconds System time zone offset in seconds. +# TYPE node_time_zone_offset_seconds gauge +# HELP node_xfrm_acquire_error_packets_total State hasn’t been fully acquired before use +# TYPE node_xfrm_acquire_error_packets_total counter +node_xfrm_acquire_error_packets_total 24532 +# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed +# TYPE node_xfrm_fwd_hdr_error_packets_total counter +node_xfrm_fwd_hdr_error_packets_total 6654 +# HELP node_xfrm_in_buffer_error_packets_total No buffer is left +# TYPE node_xfrm_in_buffer_error_packets_total counter +node_xfrm_in_buffer_error_packets_total 2 +# HELP node_xfrm_in_error_packets_total All errors not matched by other +# TYPE node_xfrm_in_error_packets_total counter +node_xfrm_in_error_packets_total 1 +# HELP node_xfrm_in_hdr_error_packets_total Header error +# TYPE node_xfrm_in_hdr_error_packets_total counter +node_xfrm_in_hdr_error_packets_total 4 +# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found +# TYPE node_xfrm_in_no_pols_packets_total counter +node_xfrm_in_no_pols_packets_total 65432 +# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong +# TYPE node_xfrm_in_no_states_packets_total counter +node_xfrm_in_no_states_packets_total 3 +# HELP node_xfrm_in_pol_block_packets_total Policy discards +# TYPE node_xfrm_in_pol_block_packets_total counter +node_xfrm_in_pol_block_packets_total 100 +# HELP node_xfrm_in_pol_error_packets_total Policy error +# TYPE node_xfrm_in_pol_error_packets_total counter +node_xfrm_in_pol_error_packets_total 10000 +# HELP node_xfrm_in_state_expired_packets_total State is expired +# TYPE node_xfrm_in_state_expired_packets_total counter +node_xfrm_in_state_expired_packets_total 7 +# HELP node_xfrm_in_state_invalid_packets_total State is invalid +# TYPE node_xfrm_in_state_invalid_packets_total counter +node_xfrm_in_state_invalid_packets_total 55555 +# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch +# TYPE node_xfrm_in_state_mismatch_packets_total counter +node_xfrm_in_state_mismatch_packets_total 23451 +# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_in_state_mode_error_packets_total counter +node_xfrm_in_state_mode_error_packets_total 100 +# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong +# TYPE node_xfrm_in_state_proto_error_packets_total counter +node_xfrm_in_state_proto_error_packets_total 40 +# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window +# TYPE node_xfrm_in_state_seq_error_packets_total counter +node_xfrm_in_state_seq_error_packets_total 6000 +# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong +# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter +node_xfrm_in_tmpl_mismatch_packets_total 51 +# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error +# TYPE node_xfrm_out_bundle_check_error_packets_total counter +node_xfrm_out_bundle_check_error_packets_total 555 +# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error +# TYPE node_xfrm_out_bundle_gen_error_packets_total counter +node_xfrm_out_bundle_gen_error_packets_total 43321 +# HELP node_xfrm_out_error_packets_total All errors which is not matched others +# TYPE node_xfrm_out_error_packets_total counter +node_xfrm_out_error_packets_total 1e+06 +# HELP node_xfrm_out_no_states_packets_total No state is found +# TYPE node_xfrm_out_no_states_packets_total counter +node_xfrm_out_no_states_packets_total 869 +# HELP node_xfrm_out_pol_block_packets_total Policy discards +# TYPE node_xfrm_out_pol_block_packets_total counter +node_xfrm_out_pol_block_packets_total 43456 +# HELP node_xfrm_out_pol_dead_packets_total Policy is dead +# TYPE node_xfrm_out_pol_dead_packets_total counter +node_xfrm_out_pol_dead_packets_total 7656 +# HELP node_xfrm_out_pol_error_packets_total Policy error +# TYPE node_xfrm_out_pol_error_packets_total counter +node_xfrm_out_pol_error_packets_total 1454 +# HELP node_xfrm_out_state_expired_packets_total State is expired +# TYPE node_xfrm_out_state_expired_packets_total counter +node_xfrm_out_state_expired_packets_total 565 +# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired +# TYPE node_xfrm_out_state_invalid_packets_total counter +node_xfrm_out_state_invalid_packets_total 28765 +# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_out_state_mode_error_packets_total counter +node_xfrm_out_state_mode_error_packets_total 8 +# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error +# TYPE node_xfrm_out_state_proto_error_packets_total counter +node_xfrm_out_state_proto_error_packets_total 4542 +# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow +# TYPE node_xfrm_out_state_seq_error_packets_total counter +node_xfrm_out_state_seq_error_packets_total 543 +# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors_total counter +promhttp_metric_handler_errors_total{cause="encoding"} 0 +promhttp_metric_handler_errors_total{cause="gathering"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 0 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_1 untyped +testmetric1_1{foo="bar"} 10 +# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_2 untyped +testmetric1_2{foo="baz"} 20 +# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_1 untyped +testmetric2_1{foo="bar"} 30 +# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_2 untyped +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/e2e-output-freebsd.txt b/collector/fixtures/e2e-output-freebsd.txt new file mode 100644 index 0000000000..fad76e6fad --- /dev/null +++ b/collector/fixtures/e2e-output-freebsd.txt @@ -0,0 +1,287 @@ +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. +# TYPE go_gc_duration_seconds summary +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent. +# TYPE go_gc_gogc_percent gauge +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes. +# TYPE go_gc_gomemlimit_bytes gauge +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_alloc_bytes gauge +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. +# TYPE go_memstats_alloc_bytes_total counter +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. +# TYPE go_memstats_buck_hash_sys_bytes gauge +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_frees_total counter +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. +# TYPE go_memstats_gc_sys_bytes gauge +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_heap_alloc_bytes gauge +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_idle_bytes gauge +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes +# TYPE go_memstats_heap_inuse_bytes gauge +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. +# TYPE go_memstats_heap_objects gauge +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. +# TYPE go_memstats_heap_released_bytes gauge +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_sys_bytes gauge +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_mallocs_total counter +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. +# TYPE go_memstats_mcache_inuse_bytes gauge +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. +# TYPE go_memstats_mcache_sys_bytes gauge +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. +# TYPE go_memstats_mspan_inuse_bytes gauge +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. +# TYPE go_memstats_mspan_sys_bytes gauge +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. +# TYPE go_memstats_next_gc_bytes gauge +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. +# TYPE go_memstats_other_sys_bytes gauge +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. +# TYPE go_memstats_stack_inuse_bytes gauge +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. +# TYPE go_memstats_stack_sys_bytes gauge +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. +# TYPE go_memstats_sys_bytes gauge +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads. +# TYPE go_sched_gomaxprocs_threads gauge +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +# HELP node_buddyinfo_blocks Count of free blocks according to size. +# TYPE node_buddyinfo_blocks gauge +node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759 +node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381 +node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572 +node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093 +node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3 +node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791 +node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185 +node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475 +node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530 +node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2 +node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194 +node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567 +node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45 +node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102 +node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12 +node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4 +node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 +# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. +# TYPE node_exporter_build_info gauge +# HELP node_memory_swap_in_bytes_total Bytes paged in from swap devices +# TYPE node_memory_swap_in_bytes_total counter +node_memory_swap_in_bytes_total 0 +# HELP node_memory_swap_out_bytes_total Bytes paged out to swap devices +# TYPE node_memory_swap_out_bytes_total counter +node_memory_swap_out_bytes_total 0 +# HELP node_memory_swap_size_bytes Total swap memory size +# TYPE node_memory_swap_size_bytes gauge +node_memory_swap_size_bytes 1.073741824e+09 +# HELP node_memory_swap_used_bytes Currently allocated swap +# TYPE node_memory_swap_used_bytes gauge +node_memory_swap_used_bytes 0 +# HELP node_memory_user_wired_bytes Locked in memory by user, mlock, etc +# TYPE node_memory_user_wired_bytes gauge +node_memory_user_wired_bytes 0 +# HELP node_netisr_bindthreads netisr threads bound to CPUs +# TYPE node_netisr_bindthreads gauge +node_netisr_bindthreads 0 +# HELP node_netisr_defaultqlimit netisr default queue limit +# TYPE node_netisr_defaultqlimit gauge +node_netisr_defaultqlimit 256 +# HELP node_netisr_maxprot netisr maximum protocols +# TYPE node_netisr_maxprot gauge +node_netisr_maxprot 16 +# HELP node_netisr_maxqlimit netisr maximum queue limit +# TYPE node_netisr_maxqlimit gauge +node_netisr_maxqlimit 10240 +# HELP node_netisr_maxthreads netisr maximum thread count +# TYPE node_netisr_maxthreads gauge +node_netisr_maxthreads 1 +# HELP node_netisr_numthreads netisr current thread count +# TYPE node_netisr_numthreads gauge +node_netisr_numthreads 1 +# HELP node_network_receive_drop_total Network device statistic receive_drop. +# TYPE node_network_receive_drop_total counter +node_network_receive_drop_total{device="lo0"} 0 +# HELP node_network_receive_errs_total Network device statistic receive_errs. +# TYPE node_network_receive_errs_total counter +node_network_receive_errs_total{device="lo0"} 0 +# HELP node_network_receive_packets_total Network device statistic receive_packets. +# TYPE node_network_receive_packets_total counter +# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. +# TYPE node_network_transmit_bytes_total counter +# HELP node_network_transmit_drop_total Network device statistic transmit_drop. +# TYPE node_network_transmit_drop_total counter +node_network_transmit_drop_total{device="lo0"} 0 +# HELP node_network_transmit_errs_total Network device statistic transmit_errs. +# TYPE node_network_transmit_errs_total counter +node_network_transmit_errs_total{device="lo0"} 0 +# HELP node_network_transmit_packets_total Network device statistic transmit_packets. +# TYPE node_network_transmit_packets_total counter +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. +# TYPE node_scrape_collector_duration_seconds gauge +# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. +# TYPE node_scrape_collector_success gauge +node_scrape_collector_success{collector="boottime"} 1 +node_scrape_collector_success{collector="buddyinfo"} 1 +node_scrape_collector_success{collector="cpu"} 1 +node_scrape_collector_success{collector="exec"} 1 +node_scrape_collector_success{collector="loadavg"} 1 +node_scrape_collector_success{collector="meminfo"} 1 +node_scrape_collector_success{collector="netdev"} 1 +node_scrape_collector_success{collector="netisr"} 1 +node_scrape_collector_success{collector="netstat"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="textfile"} 1 +node_scrape_collector_success{collector="time"} 1 +node_scrape_collector_success{collector="xfrm"} 1 +node_scrape_collector_success{collector="zfs"} 1 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP node_time_seconds System time in seconds since epoch (1970). +# TYPE node_time_seconds gauge +# HELP node_time_zone_offset_seconds System time zone offset in seconds. +# TYPE node_time_zone_offset_seconds gauge +# HELP node_xfrm_acquire_error_packets_total State hasn’t been fully acquired before use +# TYPE node_xfrm_acquire_error_packets_total counter +node_xfrm_acquire_error_packets_total 24532 +# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed +# TYPE node_xfrm_fwd_hdr_error_packets_total counter +node_xfrm_fwd_hdr_error_packets_total 6654 +# HELP node_xfrm_in_buffer_error_packets_total No buffer is left +# TYPE node_xfrm_in_buffer_error_packets_total counter +node_xfrm_in_buffer_error_packets_total 2 +# HELP node_xfrm_in_error_packets_total All errors not matched by other +# TYPE node_xfrm_in_error_packets_total counter +node_xfrm_in_error_packets_total 1 +# HELP node_xfrm_in_hdr_error_packets_total Header error +# TYPE node_xfrm_in_hdr_error_packets_total counter +node_xfrm_in_hdr_error_packets_total 4 +# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found +# TYPE node_xfrm_in_no_pols_packets_total counter +node_xfrm_in_no_pols_packets_total 65432 +# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong +# TYPE node_xfrm_in_no_states_packets_total counter +node_xfrm_in_no_states_packets_total 3 +# HELP node_xfrm_in_pol_block_packets_total Policy discards +# TYPE node_xfrm_in_pol_block_packets_total counter +node_xfrm_in_pol_block_packets_total 100 +# HELP node_xfrm_in_pol_error_packets_total Policy error +# TYPE node_xfrm_in_pol_error_packets_total counter +node_xfrm_in_pol_error_packets_total 10000 +# HELP node_xfrm_in_state_expired_packets_total State is expired +# TYPE node_xfrm_in_state_expired_packets_total counter +node_xfrm_in_state_expired_packets_total 7 +# HELP node_xfrm_in_state_invalid_packets_total State is invalid +# TYPE node_xfrm_in_state_invalid_packets_total counter +node_xfrm_in_state_invalid_packets_total 55555 +# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch +# TYPE node_xfrm_in_state_mismatch_packets_total counter +node_xfrm_in_state_mismatch_packets_total 23451 +# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_in_state_mode_error_packets_total counter +node_xfrm_in_state_mode_error_packets_total 100 +# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong +# TYPE node_xfrm_in_state_proto_error_packets_total counter +node_xfrm_in_state_proto_error_packets_total 40 +# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window +# TYPE node_xfrm_in_state_seq_error_packets_total counter +node_xfrm_in_state_seq_error_packets_total 6000 +# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong +# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter +node_xfrm_in_tmpl_mismatch_packets_total 51 +# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error +# TYPE node_xfrm_out_bundle_check_error_packets_total counter +node_xfrm_out_bundle_check_error_packets_total 555 +# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error +# TYPE node_xfrm_out_bundle_gen_error_packets_total counter +node_xfrm_out_bundle_gen_error_packets_total 43321 +# HELP node_xfrm_out_error_packets_total All errors which is not matched others +# TYPE node_xfrm_out_error_packets_total counter +node_xfrm_out_error_packets_total 1e+06 +# HELP node_xfrm_out_no_states_packets_total No state is found +# TYPE node_xfrm_out_no_states_packets_total counter +node_xfrm_out_no_states_packets_total 869 +# HELP node_xfrm_out_pol_block_packets_total Policy discards +# TYPE node_xfrm_out_pol_block_packets_total counter +node_xfrm_out_pol_block_packets_total 43456 +# HELP node_xfrm_out_pol_dead_packets_total Policy is dead +# TYPE node_xfrm_out_pol_dead_packets_total counter +node_xfrm_out_pol_dead_packets_total 7656 +# HELP node_xfrm_out_pol_error_packets_total Policy error +# TYPE node_xfrm_out_pol_error_packets_total counter +node_xfrm_out_pol_error_packets_total 1454 +# HELP node_xfrm_out_state_expired_packets_total State is expired +# TYPE node_xfrm_out_state_expired_packets_total counter +node_xfrm_out_state_expired_packets_total 565 +# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired +# TYPE node_xfrm_out_state_invalid_packets_total counter +node_xfrm_out_state_invalid_packets_total 28765 +# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_out_state_mode_error_packets_total counter +node_xfrm_out_state_mode_error_packets_total 8 +# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error +# TYPE node_xfrm_out_state_proto_error_packets_total counter +node_xfrm_out_state_proto_error_packets_total 4542 +# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow +# TYPE node_xfrm_out_state_seq_error_packets_total counter +node_xfrm_out_state_seq_error_packets_total 543 +# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors_total counter +promhttp_metric_handler_errors_total{cause="encoding"} 0 +promhttp_metric_handler_errors_total{cause="gathering"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 0 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_1 untyped +testmetric1_1{foo="bar"} 10 +# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_2 untyped +testmetric1_2{foo="baz"} 20 +# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_1 untyped +testmetric2_1{foo="bar"} 30 +# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_2 untyped +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/e2e-output-netbsd.txt b/collector/fixtures/e2e-output-netbsd.txt new file mode 100644 index 0000000000..f6e73e2a07 --- /dev/null +++ b/collector/fixtures/e2e-output-netbsd.txt @@ -0,0 +1,209 @@ +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. +# TYPE go_gc_duration_seconds summary +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent. +# TYPE go_gc_gogc_percent gauge +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes. +# TYPE go_gc_gomemlimit_bytes gauge +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_alloc_bytes gauge +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. +# TYPE go_memstats_alloc_bytes_total counter +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. +# TYPE go_memstats_buck_hash_sys_bytes gauge +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_frees_total counter +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. +# TYPE go_memstats_gc_sys_bytes gauge +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_heap_alloc_bytes gauge +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_idle_bytes gauge +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes +# TYPE go_memstats_heap_inuse_bytes gauge +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. +# TYPE go_memstats_heap_objects gauge +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. +# TYPE go_memstats_heap_released_bytes gauge +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_sys_bytes gauge +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_mallocs_total counter +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. +# TYPE go_memstats_mcache_inuse_bytes gauge +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. +# TYPE go_memstats_mcache_sys_bytes gauge +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. +# TYPE go_memstats_mspan_inuse_bytes gauge +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. +# TYPE go_memstats_mspan_sys_bytes gauge +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. +# TYPE go_memstats_next_gc_bytes gauge +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. +# TYPE go_memstats_other_sys_bytes gauge +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. +# TYPE go_memstats_stack_inuse_bytes gauge +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. +# TYPE go_memstats_stack_sys_bytes gauge +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. +# TYPE go_memstats_sys_bytes gauge +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads. +# TYPE go_sched_gomaxprocs_threads gauge +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. +# TYPE node_exporter_build_info gauge +# HELP node_memory_swap_size_bytes Memory information field swap_size_bytes. +# TYPE node_memory_swap_size_bytes gauge +node_memory_swap_size_bytes 6.442426368e+09 +# HELP node_memory_swap_used_bytes Memory information field swap_used_bytes. +# TYPE node_memory_swap_used_bytes gauge +node_memory_swap_used_bytes 0 +# HELP node_memory_swapped_in_pages_bytes_total Memory information field swapped_in_pages_bytes_total. +# TYPE node_memory_swapped_in_pages_bytes_total counter +node_memory_swapped_in_pages_bytes_total 0 +# HELP node_memory_swapped_out_pages_bytes_total Memory information field swapped_out_pages_bytes_total. +# TYPE node_memory_swapped_out_pages_bytes_total counter +node_memory_swapped_out_pages_bytes_total 0 +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. +# TYPE node_scrape_collector_duration_seconds gauge +# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. +# TYPE node_scrape_collector_success gauge +node_scrape_collector_success{collector="boottime"} 1 +node_scrape_collector_success{collector="cpu"} 0 +node_scrape_collector_success{collector="loadavg"} 1 +node_scrape_collector_success{collector="meminfo"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="textfile"} 1 +node_scrape_collector_success{collector="time"} 1 +node_scrape_collector_success{collector="xfrm"} 1 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP node_time_seconds System time in seconds since epoch (1970). +# TYPE node_time_seconds gauge +# HELP node_time_zone_offset_seconds System time zone offset in seconds. +# TYPE node_time_zone_offset_seconds gauge +# HELP node_xfrm_acquire_error_packets_total State hasn’t been fully acquired before use +# TYPE node_xfrm_acquire_error_packets_total counter +node_xfrm_acquire_error_packets_total 24532 +# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed +# TYPE node_xfrm_fwd_hdr_error_packets_total counter +node_xfrm_fwd_hdr_error_packets_total 6654 +# HELP node_xfrm_in_buffer_error_packets_total No buffer is left +# TYPE node_xfrm_in_buffer_error_packets_total counter +node_xfrm_in_buffer_error_packets_total 2 +# HELP node_xfrm_in_error_packets_total All errors not matched by other +# TYPE node_xfrm_in_error_packets_total counter +node_xfrm_in_error_packets_total 1 +# HELP node_xfrm_in_hdr_error_packets_total Header error +# TYPE node_xfrm_in_hdr_error_packets_total counter +node_xfrm_in_hdr_error_packets_total 4 +# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found +# TYPE node_xfrm_in_no_pols_packets_total counter +node_xfrm_in_no_pols_packets_total 65432 +# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong +# TYPE node_xfrm_in_no_states_packets_total counter +node_xfrm_in_no_states_packets_total 3 +# HELP node_xfrm_in_pol_block_packets_total Policy discards +# TYPE node_xfrm_in_pol_block_packets_total counter +node_xfrm_in_pol_block_packets_total 100 +# HELP node_xfrm_in_pol_error_packets_total Policy error +# TYPE node_xfrm_in_pol_error_packets_total counter +node_xfrm_in_pol_error_packets_total 10000 +# HELP node_xfrm_in_state_expired_packets_total State is expired +# TYPE node_xfrm_in_state_expired_packets_total counter +node_xfrm_in_state_expired_packets_total 7 +# HELP node_xfrm_in_state_invalid_packets_total State is invalid +# TYPE node_xfrm_in_state_invalid_packets_total counter +node_xfrm_in_state_invalid_packets_total 55555 +# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch +# TYPE node_xfrm_in_state_mismatch_packets_total counter +node_xfrm_in_state_mismatch_packets_total 23451 +# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_in_state_mode_error_packets_total counter +node_xfrm_in_state_mode_error_packets_total 100 +# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong +# TYPE node_xfrm_in_state_proto_error_packets_total counter +node_xfrm_in_state_proto_error_packets_total 40 +# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window +# TYPE node_xfrm_in_state_seq_error_packets_total counter +node_xfrm_in_state_seq_error_packets_total 6000 +# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong +# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter +node_xfrm_in_tmpl_mismatch_packets_total 51 +# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error +# TYPE node_xfrm_out_bundle_check_error_packets_total counter +node_xfrm_out_bundle_check_error_packets_total 555 +# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error +# TYPE node_xfrm_out_bundle_gen_error_packets_total counter +node_xfrm_out_bundle_gen_error_packets_total 43321 +# HELP node_xfrm_out_error_packets_total All errors which is not matched others +# TYPE node_xfrm_out_error_packets_total counter +node_xfrm_out_error_packets_total 1e+06 +# HELP node_xfrm_out_no_states_packets_total No state is found +# TYPE node_xfrm_out_no_states_packets_total counter +node_xfrm_out_no_states_packets_total 869 +# HELP node_xfrm_out_pol_block_packets_total Policy discards +# TYPE node_xfrm_out_pol_block_packets_total counter +node_xfrm_out_pol_block_packets_total 43456 +# HELP node_xfrm_out_pol_dead_packets_total Policy is dead +# TYPE node_xfrm_out_pol_dead_packets_total counter +node_xfrm_out_pol_dead_packets_total 7656 +# HELP node_xfrm_out_pol_error_packets_total Policy error +# TYPE node_xfrm_out_pol_error_packets_total counter +node_xfrm_out_pol_error_packets_total 1454 +# HELP node_xfrm_out_state_expired_packets_total State is expired +# TYPE node_xfrm_out_state_expired_packets_total counter +node_xfrm_out_state_expired_packets_total 565 +# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired +# TYPE node_xfrm_out_state_invalid_packets_total counter +node_xfrm_out_state_invalid_packets_total 28765 +# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_out_state_mode_error_packets_total counter +node_xfrm_out_state_mode_error_packets_total 8 +# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error +# TYPE node_xfrm_out_state_proto_error_packets_total counter +node_xfrm_out_state_proto_error_packets_total 4542 +# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow +# TYPE node_xfrm_out_state_seq_error_packets_total counter +node_xfrm_out_state_seq_error_packets_total 543 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors_total counter +promhttp_metric_handler_errors_total{cause="encoding"} 0 +promhttp_metric_handler_errors_total{cause="gathering"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 0 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_1 untyped +testmetric1_1{foo="bar"} 10 +# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_2 untyped +testmetric1_2{foo="baz"} 20 +# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_1 untyped +testmetric2_1{foo="bar"} 30 +# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_2 untyped +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/e2e-output-openbsd.txt b/collector/fixtures/e2e-output-openbsd.txt new file mode 100644 index 0000000000..8dc23cec4c --- /dev/null +++ b/collector/fixtures/e2e-output-openbsd.txt @@ -0,0 +1,276 @@ +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. +# TYPE go_gc_duration_seconds summary +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent. +# TYPE go_gc_gogc_percent gauge +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes. +# TYPE go_gc_gomemlimit_bytes gauge +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_alloc_bytes gauge +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. +# TYPE go_memstats_alloc_bytes_total counter +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. +# TYPE go_memstats_buck_hash_sys_bytes gauge +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_frees_total counter +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. +# TYPE go_memstats_gc_sys_bytes gauge +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_heap_alloc_bytes gauge +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_idle_bytes gauge +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes +# TYPE go_memstats_heap_inuse_bytes gauge +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. +# TYPE go_memstats_heap_objects gauge +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. +# TYPE go_memstats_heap_released_bytes gauge +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_sys_bytes gauge +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_mallocs_total counter +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. +# TYPE go_memstats_mcache_inuse_bytes gauge +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. +# TYPE go_memstats_mcache_sys_bytes gauge +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. +# TYPE go_memstats_mspan_inuse_bytes gauge +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. +# TYPE go_memstats_mspan_sys_bytes gauge +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. +# TYPE go_memstats_next_gc_bytes gauge +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. +# TYPE go_memstats_other_sys_bytes gauge +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. +# TYPE go_memstats_stack_inuse_bytes gauge +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. +# TYPE go_memstats_stack_sys_bytes gauge +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. +# TYPE go_memstats_sys_bytes gauge +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads. +# TYPE go_sched_gomaxprocs_threads gauge +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +# HELP node_buddyinfo_blocks Count of free blocks according to size. +# TYPE node_buddyinfo_blocks gauge +node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759 +node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381 +node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572 +node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093 +node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3 +node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791 +node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185 +node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475 +node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530 +node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2 +node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194 +node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567 +node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45 +node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102 +node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12 +node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4 +node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 +# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. +# TYPE node_exporter_build_info gauge +# HELP node_memory_swap_size_bytes Memory information field swap_size_bytes. +# TYPE node_memory_swap_size_bytes gauge +node_memory_swap_size_bytes 6.693941248e+09 +# HELP node_memory_swap_used_bytes Memory information field swap_used_bytes. +# TYPE node_memory_swap_used_bytes gauge +node_memory_swap_used_bytes 0 +# HELP node_memory_swapped_in_pages_bytes_total Memory information field swapped_in_pages_bytes_total. +# TYPE node_memory_swapped_in_pages_bytes_total counter +node_memory_swapped_in_pages_bytes_total 0 +# HELP node_memory_swapped_out_pages_bytes_total Memory information field swapped_out_pages_bytes_total. +# TYPE node_memory_swapped_out_pages_bytes_total counter +node_memory_swapped_out_pages_bytes_total 0 +# HELP node_network_noproto_total Network device statistic noproto. +# TYPE node_network_noproto_total counter +node_network_noproto_total{device="lo0"} 0 +node_network_noproto_total{device="pflog0"} 0 +# HELP node_network_receive_drop_total Network device statistic receive_drop. +# TYPE node_network_receive_drop_total counter +node_network_receive_drop_total{device="lo0"} 0 +node_network_receive_drop_total{device="pflog0"} 0 +# HELP node_network_receive_errs_total Network device statistic receive_errs. +# TYPE node_network_receive_errs_total counter +node_network_receive_errs_total{device="lo0"} 0 +node_network_receive_errs_total{device="pflog0"} 0 +# HELP node_network_receive_packets_total Network device statistic receive_packets. +# TYPE node_network_receive_packets_total counter +# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. +# TYPE node_network_transmit_bytes_total counter +# HELP node_network_transmit_colls_total Network device statistic transmit_colls. +# TYPE node_network_transmit_colls_total counter +node_network_transmit_colls_total{device="lo0"} 0 +node_network_transmit_colls_total{device="pflog0"} 0 +# HELP node_network_transmit_drop_total Network device statistic transmit_drop. +# TYPE node_network_transmit_drop_total counter +node_network_transmit_drop_total{device="lo0"} 0 +node_network_transmit_drop_total{device="pflog0"} 0 +# HELP node_network_transmit_errs_total Network device statistic transmit_errs. +# TYPE node_network_transmit_errs_total counter +node_network_transmit_errs_total{device="lo0"} 0 +node_network_transmit_errs_total{device="pflog0"} 0 +# HELP node_network_transmit_packets_total Network device statistic transmit_packets. +# TYPE node_network_transmit_packets_total counter +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. +# TYPE node_scrape_collector_duration_seconds gauge +# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. +# TYPE node_scrape_collector_success gauge +node_scrape_collector_success{collector="boottime"} 1 +node_scrape_collector_success{collector="buddyinfo"} 1 +node_scrape_collector_success{collector="cpu"} 1 +node_scrape_collector_success{collector="diskstats"} 1 +node_scrape_collector_success{collector="interrupts"} 1 +node_scrape_collector_success{collector="loadavg"} 1 +node_scrape_collector_success{collector="meminfo"} 1 +node_scrape_collector_success{collector="netdev"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="textfile"} 1 +node_scrape_collector_success{collector="time"} 1 +node_scrape_collector_success{collector="xfrm"} 1 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP node_time_seconds System time in seconds since epoch (1970). +# TYPE node_time_seconds gauge +# HELP node_time_zone_offset_seconds System time zone offset in seconds. +# TYPE node_time_zone_offset_seconds gauge +# HELP node_xfrm_acquire_error_packets_total State hasn’t been fully acquired before use +# TYPE node_xfrm_acquire_error_packets_total counter +node_xfrm_acquire_error_packets_total 24532 +# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed +# TYPE node_xfrm_fwd_hdr_error_packets_total counter +node_xfrm_fwd_hdr_error_packets_total 6654 +# HELP node_xfrm_in_buffer_error_packets_total No buffer is left +# TYPE node_xfrm_in_buffer_error_packets_total counter +node_xfrm_in_buffer_error_packets_total 2 +# HELP node_xfrm_in_error_packets_total All errors not matched by other +# TYPE node_xfrm_in_error_packets_total counter +node_xfrm_in_error_packets_total 1 +# HELP node_xfrm_in_hdr_error_packets_total Header error +# TYPE node_xfrm_in_hdr_error_packets_total counter +node_xfrm_in_hdr_error_packets_total 4 +# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found +# TYPE node_xfrm_in_no_pols_packets_total counter +node_xfrm_in_no_pols_packets_total 65432 +# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong +# TYPE node_xfrm_in_no_states_packets_total counter +node_xfrm_in_no_states_packets_total 3 +# HELP node_xfrm_in_pol_block_packets_total Policy discards +# TYPE node_xfrm_in_pol_block_packets_total counter +node_xfrm_in_pol_block_packets_total 100 +# HELP node_xfrm_in_pol_error_packets_total Policy error +# TYPE node_xfrm_in_pol_error_packets_total counter +node_xfrm_in_pol_error_packets_total 10000 +# HELP node_xfrm_in_state_expired_packets_total State is expired +# TYPE node_xfrm_in_state_expired_packets_total counter +node_xfrm_in_state_expired_packets_total 7 +# HELP node_xfrm_in_state_invalid_packets_total State is invalid +# TYPE node_xfrm_in_state_invalid_packets_total counter +node_xfrm_in_state_invalid_packets_total 55555 +# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch +# TYPE node_xfrm_in_state_mismatch_packets_total counter +node_xfrm_in_state_mismatch_packets_total 23451 +# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_in_state_mode_error_packets_total counter +node_xfrm_in_state_mode_error_packets_total 100 +# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong +# TYPE node_xfrm_in_state_proto_error_packets_total counter +node_xfrm_in_state_proto_error_packets_total 40 +# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window +# TYPE node_xfrm_in_state_seq_error_packets_total counter +node_xfrm_in_state_seq_error_packets_total 6000 +# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong +# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter +node_xfrm_in_tmpl_mismatch_packets_total 51 +# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error +# TYPE node_xfrm_out_bundle_check_error_packets_total counter +node_xfrm_out_bundle_check_error_packets_total 555 +# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error +# TYPE node_xfrm_out_bundle_gen_error_packets_total counter +node_xfrm_out_bundle_gen_error_packets_total 43321 +# HELP node_xfrm_out_error_packets_total All errors which is not matched others +# TYPE node_xfrm_out_error_packets_total counter +node_xfrm_out_error_packets_total 1e+06 +# HELP node_xfrm_out_no_states_packets_total No state is found +# TYPE node_xfrm_out_no_states_packets_total counter +node_xfrm_out_no_states_packets_total 869 +# HELP node_xfrm_out_pol_block_packets_total Policy discards +# TYPE node_xfrm_out_pol_block_packets_total counter +node_xfrm_out_pol_block_packets_total 43456 +# HELP node_xfrm_out_pol_dead_packets_total Policy is dead +# TYPE node_xfrm_out_pol_dead_packets_total counter +node_xfrm_out_pol_dead_packets_total 7656 +# HELP node_xfrm_out_pol_error_packets_total Policy error +# TYPE node_xfrm_out_pol_error_packets_total counter +node_xfrm_out_pol_error_packets_total 1454 +# HELP node_xfrm_out_state_expired_packets_total State is expired +# TYPE node_xfrm_out_state_expired_packets_total counter +node_xfrm_out_state_expired_packets_total 565 +# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired +# TYPE node_xfrm_out_state_invalid_packets_total counter +node_xfrm_out_state_invalid_packets_total 28765 +# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_out_state_mode_error_packets_total counter +node_xfrm_out_state_mode_error_packets_total 8 +# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error +# TYPE node_xfrm_out_state_proto_error_packets_total counter +node_xfrm_out_state_proto_error_packets_total 4542 +# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow +# TYPE node_xfrm_out_state_seq_error_packets_total counter +node_xfrm_out_state_seq_error_packets_total 543 +# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors_total counter +promhttp_metric_handler_errors_total{cause="encoding"} 0 +promhttp_metric_handler_errors_total{cause="gathering"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 0 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_1 untyped +testmetric1_1{foo="bar"} 10 +# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_2 untyped +testmetric1_2{foo="baz"} 20 +# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_1 untyped +testmetric2_1{foo="bar"} 30 +# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_2 untyped +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/e2e-output-solaris.txt b/collector/fixtures/e2e-output-solaris.txt new file mode 100644 index 0000000000..2f0f1d47ba --- /dev/null +++ b/collector/fixtures/e2e-output-solaris.txt @@ -0,0 +1,234 @@ +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. +# TYPE go_gc_duration_seconds summary +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent. +# TYPE go_gc_gogc_percent gauge +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes. +# TYPE go_gc_gomemlimit_bytes gauge +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_alloc_bytes gauge +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. +# TYPE go_memstats_alloc_bytes_total counter +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. +# TYPE go_memstats_buck_hash_sys_bytes gauge +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_frees_total counter +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. +# TYPE go_memstats_gc_sys_bytes gauge +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_heap_alloc_bytes gauge +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_idle_bytes gauge +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes +# TYPE go_memstats_heap_inuse_bytes gauge +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. +# TYPE go_memstats_heap_objects gauge +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. +# TYPE go_memstats_heap_released_bytes gauge +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_sys_bytes gauge +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_mallocs_total counter +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. +# TYPE go_memstats_mcache_inuse_bytes gauge +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. +# TYPE go_memstats_mcache_sys_bytes gauge +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. +# TYPE go_memstats_mspan_inuse_bytes gauge +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. +# TYPE go_memstats_mspan_sys_bytes gauge +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. +# TYPE go_memstats_next_gc_bytes gauge +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. +# TYPE go_memstats_other_sys_bytes gauge +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. +# TYPE go_memstats_stack_inuse_bytes gauge +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. +# TYPE go_memstats_stack_sys_bytes gauge +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. +# TYPE go_memstats_sys_bytes gauge +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads. +# TYPE go_sched_gomaxprocs_threads gauge +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +# HELP node_buddyinfo_blocks Count of free blocks according to size. +# TYPE node_buddyinfo_blocks gauge +node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759 +node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381 +node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572 +node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093 +node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3 +node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791 +node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185 +node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475 +node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530 +node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2 +node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194 +node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567 +node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45 +node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102 +node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12 +node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4 +node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 +# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. +# TYPE node_exporter_build_info gauge +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. +# TYPE node_scrape_collector_duration_seconds gauge +# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. +# TYPE node_scrape_collector_success gauge +node_scrape_collector_success{collector="boottime"} 1 +node_scrape_collector_success{collector="buddyinfo"} 1 +node_scrape_collector_success{collector="cpu"} 0 +node_scrape_collector_success{collector="cpufreq"} 1 +node_scrape_collector_success{collector="loadavg"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="textfile"} 1 +node_scrape_collector_success{collector="time"} 1 +node_scrape_collector_success{collector="xfrm"} 1 +node_scrape_collector_success{collector="zfs"} 0 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP node_time_seconds System time in seconds since epoch (1970). +# TYPE node_time_seconds gauge +# HELP node_time_zone_offset_seconds System time zone offset in seconds. +# TYPE node_time_zone_offset_seconds gauge +# HELP node_xfrm_acquire_error_packets_total State hasn’t been fully acquired before use +# TYPE node_xfrm_acquire_error_packets_total counter +node_xfrm_acquire_error_packets_total 24532 +# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed +# TYPE node_xfrm_fwd_hdr_error_packets_total counter +node_xfrm_fwd_hdr_error_packets_total 6654 +# HELP node_xfrm_in_buffer_error_packets_total No buffer is left +# TYPE node_xfrm_in_buffer_error_packets_total counter +node_xfrm_in_buffer_error_packets_total 2 +# HELP node_xfrm_in_error_packets_total All errors not matched by other +# TYPE node_xfrm_in_error_packets_total counter +node_xfrm_in_error_packets_total 1 +# HELP node_xfrm_in_hdr_error_packets_total Header error +# TYPE node_xfrm_in_hdr_error_packets_total counter +node_xfrm_in_hdr_error_packets_total 4 +# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found +# TYPE node_xfrm_in_no_pols_packets_total counter +node_xfrm_in_no_pols_packets_total 65432 +# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong +# TYPE node_xfrm_in_no_states_packets_total counter +node_xfrm_in_no_states_packets_total 3 +# HELP node_xfrm_in_pol_block_packets_total Policy discards +# TYPE node_xfrm_in_pol_block_packets_total counter +node_xfrm_in_pol_block_packets_total 100 +# HELP node_xfrm_in_pol_error_packets_total Policy error +# TYPE node_xfrm_in_pol_error_packets_total counter +node_xfrm_in_pol_error_packets_total 10000 +# HELP node_xfrm_in_state_expired_packets_total State is expired +# TYPE node_xfrm_in_state_expired_packets_total counter +node_xfrm_in_state_expired_packets_total 7 +# HELP node_xfrm_in_state_invalid_packets_total State is invalid +# TYPE node_xfrm_in_state_invalid_packets_total counter +node_xfrm_in_state_invalid_packets_total 55555 +# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch +# TYPE node_xfrm_in_state_mismatch_packets_total counter +node_xfrm_in_state_mismatch_packets_total 23451 +# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_in_state_mode_error_packets_total counter +node_xfrm_in_state_mode_error_packets_total 100 +# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong +# TYPE node_xfrm_in_state_proto_error_packets_total counter +node_xfrm_in_state_proto_error_packets_total 40 +# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window +# TYPE node_xfrm_in_state_seq_error_packets_total counter +node_xfrm_in_state_seq_error_packets_total 6000 +# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong +# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter +node_xfrm_in_tmpl_mismatch_packets_total 51 +# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error +# TYPE node_xfrm_out_bundle_check_error_packets_total counter +node_xfrm_out_bundle_check_error_packets_total 555 +# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error +# TYPE node_xfrm_out_bundle_gen_error_packets_total counter +node_xfrm_out_bundle_gen_error_packets_total 43321 +# HELP node_xfrm_out_error_packets_total All errors which is not matched others +# TYPE node_xfrm_out_error_packets_total counter +node_xfrm_out_error_packets_total 1e+06 +# HELP node_xfrm_out_no_states_packets_total No state is found +# TYPE node_xfrm_out_no_states_packets_total counter +node_xfrm_out_no_states_packets_total 869 +# HELP node_xfrm_out_pol_block_packets_total Policy discards +# TYPE node_xfrm_out_pol_block_packets_total counter +node_xfrm_out_pol_block_packets_total 43456 +# HELP node_xfrm_out_pol_dead_packets_total Policy is dead +# TYPE node_xfrm_out_pol_dead_packets_total counter +node_xfrm_out_pol_dead_packets_total 7656 +# HELP node_xfrm_out_pol_error_packets_total Policy error +# TYPE node_xfrm_out_pol_error_packets_total counter +node_xfrm_out_pol_error_packets_total 1454 +# HELP node_xfrm_out_state_expired_packets_total State is expired +# TYPE node_xfrm_out_state_expired_packets_total counter +node_xfrm_out_state_expired_packets_total 565 +# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired +# TYPE node_xfrm_out_state_invalid_packets_total counter +node_xfrm_out_state_invalid_packets_total 28765 +# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_out_state_mode_error_packets_total counter +node_xfrm_out_state_mode_error_packets_total 8 +# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error +# TYPE node_xfrm_out_state_proto_error_packets_total counter +node_xfrm_out_state_proto_error_packets_total 4542 +# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow +# TYPE node_xfrm_out_state_seq_error_packets_total counter +node_xfrm_out_state_seq_error_packets_total 543 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors_total counter +promhttp_metric_handler_errors_total{cause="encoding"} 0 +promhttp_metric_handler_errors_total{cause="gathering"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 0 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_1 untyped +testmetric1_1{foo="bar"} 10 +# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_2 untyped +testmetric1_2{foo="baz"} 20 +# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_1 untyped +testmetric2_1{foo="bar"} 30 +# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_2 untyped +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/e2e-output.txt b/collector/fixtures/e2e-output.txt index 0edc65bbd9..6f2b0376cf 100644 --- a/collector/fixtures/e2e-output.txt +++ b/collector/fixtures/e2e-output.txt @@ -1,57 +1,59 @@ -# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. # TYPE go_gc_duration_seconds summary +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent. +# TYPE go_gc_gogc_percent gauge +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes. +# TYPE go_gc_gomemlimit_bytes gauge # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge # HELP go_info Information about the Go environment. # TYPE go_info gauge -# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. # TYPE go_memstats_alloc_bytes gauge -# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. # TYPE go_memstats_alloc_bytes_total counter -# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. # TYPE go_memstats_buck_hash_sys_bytes gauge -# HELP go_memstats_frees_total Total number of frees. +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. # TYPE go_memstats_frees_total counter -# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. -# TYPE go_memstats_gc_cpu_fraction gauge -# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. # TYPE go_memstats_gc_sys_bytes gauge -# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. # TYPE go_memstats_heap_alloc_bytes gauge -# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. # TYPE go_memstats_heap_idle_bytes gauge -# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes # TYPE go_memstats_heap_inuse_bytes gauge -# HELP go_memstats_heap_objects Number of allocated objects. +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. # TYPE go_memstats_heap_objects gauge -# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. # TYPE go_memstats_heap_released_bytes gauge -# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. # TYPE go_memstats_heap_sys_bytes gauge # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge -# HELP go_memstats_lookups_total Total number of pointer lookups. -# TYPE go_memstats_lookups_total counter -# HELP go_memstats_mallocs_total Total number of mallocs. +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. # TYPE go_memstats_mallocs_total counter -# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. # TYPE go_memstats_mcache_inuse_bytes gauge -# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. # TYPE go_memstats_mcache_sys_bytes gauge -# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. # TYPE go_memstats_mspan_inuse_bytes gauge -# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. # TYPE go_memstats_mspan_sys_bytes gauge -# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. # TYPE go_memstats_next_gc_bytes gauge -# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. # TYPE go_memstats_other_sys_bytes gauge -# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. # TYPE go_memstats_stack_inuse_bytes gauge -# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. # TYPE go_memstats_stack_sys_bytes gauge -# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. # TYPE go_memstats_sys_bytes gauge +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads. +# TYPE go_sched_gomaxprocs_threads gauge # HELP go_threads Number of OS threads created. # TYPE go_threads gauge # HELP node_arp_entries ARP entries by device @@ -99,13 +101,16 @@ node_bcache_cache_misses_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-8 node_bcache_cache_read_races_total{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_readaheads_total Count of times readahead occurred. # TYPE node_bcache_cache_readaheads_total counter -node_bcache_cache_readaheads_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +node_bcache_cache_readaheads_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 13 # HELP node_bcache_congested Congestion. # TYPE node_bcache_congested gauge node_bcache_congested{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_dirty_data_bytes Amount of dirty data for this backing device in the cache. # TYPE node_bcache_dirty_data_bytes gauge node_bcache_dirty_data_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_dirty_target_bytes Current dirty data target threshold for this backing device in bytes. +# TYPE node_bcache_dirty_target_bytes gauge +node_bcache_dirty_target_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 2.189426688e+10 # HELP node_bcache_io_errors Number of errors that have occurred, decayed by io_error_halflife. # TYPE node_bcache_io_errors gauge node_bcache_io_errors{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 @@ -124,6 +129,18 @@ node_bcache_root_usage_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_tree_depth Depth of the btree. # TYPE node_bcache_tree_depth gauge node_bcache_tree_depth{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_writeback_change Last writeback rate change step for this backing device. +# TYPE node_bcache_writeback_change gauge +node_bcache_writeback_change{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 329204 +# HELP node_bcache_writeback_rate Current writeback rate for this backing device in bytes. +# TYPE node_bcache_writeback_rate gauge +node_bcache_writeback_rate{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1.150976e+06 +# HELP node_bcache_writeback_rate_integral_term Current result of integral controller, part of writeback rate +# TYPE node_bcache_writeback_rate_integral_term gauge +node_bcache_writeback_rate_integral_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 808960 +# HELP node_bcache_writeback_rate_proportional_term Current result of proportional controller, part of writeback rate +# TYPE node_bcache_writeback_rate_proportional_term gauge +node_bcache_writeback_rate_proportional_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 437748 # HELP node_bcache_written_bytes_total Sum of all data that has been written to the cache. # TYPE node_bcache_written_bytes_total counter node_bcache_written_bytes_total{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 @@ -148,6 +165,14 @@ node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid1",uuid="0abb2 node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 node_btrfs_allocation_ratio{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2 node_btrfs_allocation_ratio{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 +# HELP node_btrfs_commit_seconds_total Sum of the duration of all commits, in seconds. +# TYPE node_btrfs_commit_seconds_total counter +node_btrfs_commit_seconds_total{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 47836.09 +node_btrfs_commit_seconds_total{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +# HELP node_btrfs_commits_total The total number of commits that have occurred. +# TYPE node_btrfs_commits_total counter +node_btrfs_commits_total{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 258051 +node_btrfs_commits_total{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 # HELP node_btrfs_device_size_bytes Size of a device that is part of the filesystem. # TYPE node_btrfs_device_size_bytes gauge node_btrfs_device_size_bytes{device="loop22",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 @@ -164,6 +189,14 @@ node_btrfs_global_rsv_size_bytes{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1. # TYPE node_btrfs_info gauge node_btrfs_info{label="",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1 node_btrfs_info{label="fixture",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 +# HELP node_btrfs_last_commit_seconds Duration of the most recent commit, in seconds. +# TYPE node_btrfs_last_commit_seconds gauge +node_btrfs_last_commit_seconds{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 +node_btrfs_last_commit_seconds{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +# HELP node_btrfs_max_commit_seconds Duration of the slowest commit, in seconds. +# TYPE node_btrfs_max_commit_seconds gauge +node_btrfs_max_commit_seconds{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 51.462 +node_btrfs_max_commit_seconds{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 # HELP node_btrfs_reserved_bytes Amount of space reserved for a data type # TYPE node_btrfs_reserved_bytes gauge node_btrfs_reserved_bytes{block_group_type="data",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 @@ -223,6 +256,34 @@ node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 +# HELP node_cgroups_cgroups Current cgroup number of the subsystem. +# TYPE node_cgroups_cgroups gauge +node_cgroups_cgroups{subsys_name="blkio"} 170 +node_cgroups_cgroups{subsys_name="cpu"} 172 +node_cgroups_cgroups{subsys_name="cpuacct"} 172 +node_cgroups_cgroups{subsys_name="cpuset"} 47 +node_cgroups_cgroups{subsys_name="devices"} 170 +node_cgroups_cgroups{subsys_name="freezer"} 47 +node_cgroups_cgroups{subsys_name="hugetlb"} 47 +node_cgroups_cgroups{subsys_name="memory"} 234 +node_cgroups_cgroups{subsys_name="net_cls"} 47 +node_cgroups_cgroups{subsys_name="perf_event"} 47 +node_cgroups_cgroups{subsys_name="pids"} 170 +node_cgroups_cgroups{subsys_name="rdma"} 1 +# HELP node_cgroups_enabled Current cgroup number of the subsystem. +# TYPE node_cgroups_enabled gauge +node_cgroups_enabled{subsys_name="blkio"} 1 +node_cgroups_enabled{subsys_name="cpu"} 1 +node_cgroups_enabled{subsys_name="cpuacct"} 1 +node_cgroups_enabled{subsys_name="cpuset"} 1 +node_cgroups_enabled{subsys_name="devices"} 1 +node_cgroups_enabled{subsys_name="freezer"} 1 +node_cgroups_enabled{subsys_name="hugetlb"} 1 +node_cgroups_enabled{subsys_name="memory"} 1 +node_cgroups_enabled{subsys_name="net_cls"} 1 +node_cgroups_enabled{subsys_name="perf_event"} 1 +node_cgroups_enabled{subsys_name="pids"} 1 +node_cgroups_enabled{subsys_name="rdma"} 1 # HELP node_context_switches_total Total number of context switches. # TYPE node_context_switches_total counter node_context_switches_total 3.8014093e+07 @@ -232,13 +293,35 @@ node_cooling_device_cur_state{name="0",type="Processor"} 0 # HELP node_cooling_device_max_state Maximum throttle state of the cooling device # TYPE node_cooling_device_max_state gauge node_cooling_device_max_state{name="0",type="Processor"} 3 -# HELP node_cpu_core_throttles_total Number of times this cpu core has been throttled. +# HELP node_cpu_bug_info The `bugs` field of CPU information from /proc/cpuinfo taken from the first core. +# TYPE node_cpu_bug_info gauge +node_cpu_bug_info{bug="cpu_meltdown"} 1 +node_cpu_bug_info{bug="mds"} 1 +node_cpu_bug_info{bug="spectre_v1"} 1 +node_cpu_bug_info{bug="spectre_v2"} 1 +# HELP node_cpu_core_throttles_total Number of times this CPU core has been throttled. # TYPE node_cpu_core_throttles_total counter node_cpu_core_throttles_total{core="0",package="0"} 5 node_cpu_core_throttles_total{core="0",package="1"} 0 node_cpu_core_throttles_total{core="1",package="0"} 0 node_cpu_core_throttles_total{core="1",package="1"} 9 -# HELP node_cpu_guest_seconds_total Seconds the cpus spent in guests (VMs) for each mode. +# HELP node_cpu_flag_info The `flags` field of CPU information from /proc/cpuinfo taken from the first core. +# TYPE node_cpu_flag_info gauge +node_cpu_flag_info{flag="aes"} 1 +node_cpu_flag_info{flag="avx"} 1 +node_cpu_flag_info{flag="avx2"} 1 +node_cpu_flag_info{flag="constant_tsc"} 1 +# HELP node_cpu_frequency_hertz CPU frequency in hertz from /proc/cpuinfo. +# TYPE node_cpu_frequency_hertz gauge +node_cpu_frequency_hertz{core="0",cpu="0",package="0"} 7.99998e+08 +node_cpu_frequency_hertz{core="0",cpu="4",package="0"} 7.99989e+08 +node_cpu_frequency_hertz{core="1",cpu="1",package="0"} 8.00037e+08 +node_cpu_frequency_hertz{core="1",cpu="5",package="0"} 8.00083e+08 +node_cpu_frequency_hertz{core="2",cpu="2",package="0"} 8.0001e+08 +node_cpu_frequency_hertz{core="2",cpu="6",package="0"} 8.00017e+08 +node_cpu_frequency_hertz{core="3",cpu="3",package="0"} 8.00028e+08 +node_cpu_frequency_hertz{core="3",cpu="7",package="0"} 8.0003e+08 +# HELP node_cpu_guest_seconds_total Seconds the CPUs spent in guests (VMs) for each mode. # TYPE node_cpu_guest_seconds_total counter node_cpu_guest_seconds_total{cpu="0",mode="nice"} 0.01 node_cpu_guest_seconds_total{cpu="0",mode="user"} 0.02 @@ -266,29 +349,46 @@ node_cpu_info{cachesize="8192 KB",core="2",cpu="2",family="6",microcode="0xb4",m node_cpu_info{cachesize="8192 KB",core="2",cpu="6",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 node_cpu_info{cachesize="8192 KB",core="3",cpu="3",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 node_cpu_info{cachesize="8192 KB",core="3",cpu="7",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 -# HELP node_cpu_package_throttles_total Number of times this cpu package has been throttled. +# HELP node_cpu_isolated Whether each core is isolated, information from /sys/devices/system/cpu/isolated. +# TYPE node_cpu_isolated gauge +node_cpu_isolated{cpu="1"} 1 +node_cpu_isolated{cpu="3"} 1 +node_cpu_isolated{cpu="4"} 1 +node_cpu_isolated{cpu="5"} 1 +node_cpu_isolated{cpu="9"} 1 +# HELP node_cpu_package_throttles_total Number of times this CPU package has been throttled. # TYPE node_cpu_package_throttles_total counter node_cpu_package_throttles_total{package="0"} 30 node_cpu_package_throttles_total{package="1"} 6 -# HELP node_cpu_scaling_frequency_hertz Current scaled cpu thread frequency in hertz. +# HELP node_cpu_scaling_frequency_hertz Current scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_hertz gauge node_cpu_scaling_frequency_hertz{cpu="0"} 1.699981e+09 node_cpu_scaling_frequency_hertz{cpu="1"} 1.699981e+09 node_cpu_scaling_frequency_hertz{cpu="2"} 8e+06 node_cpu_scaling_frequency_hertz{cpu="3"} 8e+06 -# HELP node_cpu_scaling_frequency_max_hertz Maximum scaled cpu thread frequency in hertz. +# HELP node_cpu_scaling_frequency_max_hertz Maximum scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_max_hertz gauge node_cpu_scaling_frequency_max_hertz{cpu="0"} 3.7e+09 node_cpu_scaling_frequency_max_hertz{cpu="1"} 3.7e+09 node_cpu_scaling_frequency_max_hertz{cpu="2"} 4.2e+09 node_cpu_scaling_frequency_max_hertz{cpu="3"} 4.2e+09 -# HELP node_cpu_scaling_frequency_min_hertz Minimum scaled cpu thread frequency in hertz. +# HELP node_cpu_scaling_frequency_min_hertz Minimum scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_min_hertz gauge node_cpu_scaling_frequency_min_hertz{cpu="0"} 8e+08 node_cpu_scaling_frequency_min_hertz{cpu="1"} 8e+08 node_cpu_scaling_frequency_min_hertz{cpu="2"} 1e+06 node_cpu_scaling_frequency_min_hertz{cpu="3"} 1e+06 -# HELP node_cpu_seconds_total Seconds the cpus spent in each mode. +# HELP node_cpu_scaling_governor Current enabled CPU frequency governor. +# TYPE node_cpu_scaling_governor gauge +node_cpu_scaling_governor{cpu="0",governor="performance"} 0 +node_cpu_scaling_governor{cpu="0",governor="powersave"} 1 +node_cpu_scaling_governor{cpu="1",governor="performance"} 0 +node_cpu_scaling_governor{cpu="1",governor="powersave"} 1 +node_cpu_scaling_governor{cpu="2",governor="performance"} 0 +node_cpu_scaling_governor{cpu="2",governor="powersave"} 1 +node_cpu_scaling_governor{cpu="3",governor="performance"} 0 +node_cpu_scaling_governor{cpu="3",governor="powersave"} 1 +# HELP node_cpu_seconds_total Seconds the CPUs spent in each mode. # TYPE node_cpu_seconds_total counter node_cpu_seconds_total{cpu="0",mode="idle"} 10870.69 node_cpu_seconds_total{cpu="0",mode="iowait"} 2.2 @@ -354,6 +454,36 @@ node_cpu_seconds_total{cpu="7",mode="softirq"} 0.31 node_cpu_seconds_total{cpu="7",mode="steal"} 0 node_cpu_seconds_total{cpu="7",mode="system"} 101.64 node_cpu_seconds_total{cpu="7",mode="user"} 290.98 +# HELP node_cpu_vulnerabilities_info Details of each CPU vulnerability reported by sysfs. The value of the series is an int encoded state of the vulnerability. The same state is stored as a string in the label +# TYPE node_cpu_vulnerabilities_info gauge +node_cpu_vulnerabilities_info{codename="itlb_multihit",mitigation="",state="not affected"} 1 +node_cpu_vulnerabilities_info{codename="mds",mitigation="",state="vulnerable"} 1 +node_cpu_vulnerabilities_info{codename="retbleed",mitigation="untrained return thunk; SMT enabled with STIBP protection",state="mitigation"} 1 +node_cpu_vulnerabilities_info{codename="spectre_v1",mitigation="usercopy/swapgs barriers and __user pointer sanitization",state="mitigation"} 1 +node_cpu_vulnerabilities_info{codename="spectre_v2",mitigation="Retpolines, IBPB: conditional, STIBP: always-on, RSB filling, PBRSB-eIBRS: Not affected",state="mitigation"} 1 +# HELP node_disk_ata_rotation_rate_rpm ATA disk rotation rate in RPMs (0 for SSDs). +# TYPE node_disk_ata_rotation_rate_rpm gauge +node_disk_ata_rotation_rate_rpm{device="sda"} 7200 +node_disk_ata_rotation_rate_rpm{device="sdb"} 0 +node_disk_ata_rotation_rate_rpm{device="sdc"} 0 +# HELP node_disk_ata_write_cache ATA disk has a write cache. +# TYPE node_disk_ata_write_cache gauge +node_disk_ata_write_cache{device="sda"} 1 +node_disk_ata_write_cache{device="sdb"} 1 +node_disk_ata_write_cache{device="sdc"} 1 +# HELP node_disk_ata_write_cache_enabled ATA disk has its write cache enabled. +# TYPE node_disk_ata_write_cache_enabled gauge +node_disk_ata_write_cache_enabled{device="sda"} 0 +node_disk_ata_write_cache_enabled{device="sdb"} 1 +node_disk_ata_write_cache_enabled{device="sdc"} 0 +# HELP node_disk_device_mapper_info Info about disk device mapper. +# TYPE node_disk_device_mapper_info gauge +node_disk_device_mapper_info{device="dm-0",lv_layer="",lv_name="",name="nvme0n1_crypt",uuid="CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt",vg_name=""} 1 +node_disk_device_mapper_info{device="dm-1",lv_layer="",lv_name="swap_1",name="system-swap_1",uuid="LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-2",lv_layer="",lv_name="root",name="system-root",uuid="LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-3",lv_layer="",lv_name="var",name="system-var",uuid="LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-4",lv_layer="",lv_name="tmp",name="system-tmp",uuid="LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H",vg_name="system"} 1 +node_disk_device_mapper_info{device="dm-5",lv_layer="",lv_name="home",name="system-home",uuid="LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf",vg_name="system"} 1 # HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. # TYPE node_disk_discard_time_seconds_total counter node_disk_discard_time_seconds_total{device="sdb"} 11.13 @@ -370,12 +500,41 @@ node_disk_discards_completed_total{device="sdc"} 18851 # TYPE node_disk_discards_merged_total counter node_disk_discards_merged_total{device="sdb"} 0 node_disk_discards_merged_total{device="sdc"} 0 +# HELP node_disk_filesystem_info Info about disk filesystem. +# TYPE node_disk_filesystem_info gauge +node_disk_filesystem_info{device="dm-0",type="LVM2_member",usage="raid",uuid="c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB",version="LVM2 001"} 1 +node_disk_filesystem_info{device="dm-1",type="swap",usage="other",uuid="5272bb60-04b5-49cd-b730-be57c7604450",version="1"} 1 +node_disk_filesystem_info{device="dm-2",type="ext4",usage="filesystem",uuid="3deafd0d-faff-4695-8d15-51061ae1f51b",version="1.0"} 1 +node_disk_filesystem_info{device="dm-3",type="ext4",usage="filesystem",uuid="5c772222-f7d4-4c8e-87e8-e97df6b7a45e",version="1.0"} 1 +node_disk_filesystem_info{device="dm-4",type="ext4",usage="filesystem",uuid="a9479d44-60e1-4015-a1e5-bb065e6dd11b",version="1.0"} 1 +node_disk_filesystem_info{device="dm-5",type="ext4",usage="filesystem",uuid="b05b726a-c718-4c4d-8641-7c73a7696d83",version="1.0"} 1 +node_disk_filesystem_info{device="mmcblk0p1",type="vfat",usage="filesystem",uuid="6284-658D",version="FAT32"} 1 +node_disk_filesystem_info{device="mmcblk0p2",type="ext4",usage="filesystem",uuid="83324ce8-a6f3-4e35-ad64-dbb3d6b87a32",version="1.0"} 1 +node_disk_filesystem_info{device="sda",type="LVM2_member",usage="raid",uuid="cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw",version="LVM2 001"} 1 +node_disk_filesystem_info{device="sdc",type="LVM2_member",usage="raid",uuid="QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb",version="LVM2 001"} 1 # HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. # TYPE node_disk_flush_requests_time_seconds_total counter node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944 # HELP node_disk_flush_requests_total The total number of flush requests completed successfully # TYPE node_disk_flush_requests_total counter node_disk_flush_requests_total{device="sdc"} 1555 +# HELP node_disk_info Info of /sys/block/. +# TYPE node_disk_info gauge +node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 +node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="0x83e36d93",wwn=""} 1 +node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="0x83e36d93",wwn=""} 1 +node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="0x83e36d93",wwn=""} 1 +node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",rotational="0",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1 +node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",rotational="1",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1 +node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",rotational="0",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1 +node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",rotational="0",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1 +node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",rotational="0",serial="AAAABBBBCCCC1",wwn=""} 1 +node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",rotational="0",serial="",wwn=""} 1 # HELP node_disk_io_now The number of I/Os currently in progress. # TYPE node_disk_io_now gauge node_disk_io_now{device="dm-0"} 0 @@ -563,6 +722,9 @@ node_disk_written_bytes_total{device="sdb"} 1.01012736e+09 node_disk_written_bytes_total{device="sdc"} 8.852736e+07 node_disk_written_bytes_total{device="sr0"} 0 node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 +# HELP node_dmi_info A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, product_sku, product_uuid, product_version, system_vendor if provided by DMI. +# TYPE node_dmi_info gauge +node_dmi_info{bios_date="04/12/2021",bios_release="2.2",bios_vendor="Dell Inc.",bios_version="2.2.4",board_name="07PXPY",board_serial=".7N62AI2.GRTCL6944100GP.",board_vendor="Dell Inc.",board_version="A01",chassis_asset_tag="",chassis_serial="7N62AI2",chassis_vendor="Dell Inc.",chassis_version="",product_family="PowerEdge",product_name="PowerEdge R6515",product_serial="7N62AI2",product_sku="SKU=NotProvided;ModelName=PowerEdge R6515",product_uuid="83340ca8-cb49-4474-8c29-d2088ca84dd9",product_version="�[�",system_vendor="Dell Inc."} 1 # HELP node_drbd_activitylog_writes_total Number of updates of the activity log area of the meta data. # TYPE node_drbd_activitylog_writes_total counter node_drbd_activitylog_writes_total{device="drbd1"} 1100 @@ -627,8 +789,69 @@ node_edac_uncorrectable_errors_total{controller="0"} 5 # HELP node_entropy_available_bits Bits of available entropy. # TYPE node_entropy_available_bits gauge node_entropy_available_bits 1337 -# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which node_exporter was built. +# HELP node_entropy_pool_size_bits Bits of entropy pool. +# TYPE node_entropy_pool_size_bits gauge +node_entropy_pool_size_bits 4096 +# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. # TYPE node_exporter_build_info gauge +# HELP node_fibrechannel_dumped_frames_total Number of dumped frames +# TYPE node_fibrechannel_dumped_frames_total counter +node_fibrechannel_dumped_frames_total{fc_host="host1"} 0 +# HELP node_fibrechannel_error_frames_total Number of errors in frames +# TYPE node_fibrechannel_error_frames_total counter +node_fibrechannel_error_frames_total{fc_host="host0"} 0 +node_fibrechannel_error_frames_total{fc_host="host1"} 19 +# HELP node_fibrechannel_fcp_packet_aborts_total Number of aborted packets +# TYPE node_fibrechannel_fcp_packet_aborts_total counter +node_fibrechannel_fcp_packet_aborts_total{fc_host="host0"} 19 +# HELP node_fibrechannel_info Non-numeric data from /sys/class/fc_host/, value is always 1. +# TYPE node_fibrechannel_info gauge +node_fibrechannel_info{dev_loss_tmo="",fabric_name="",fc_host="host1",port_id="",port_name="",port_state="",port_type="",speed="8 Gbit",supported_classes="",supported_speeds="",symbolic_name=""} 1 +node_fibrechannel_info{dev_loss_tmo="30",fabric_name="0",fc_host="host0",port_id="000002",port_name="1000e0071bce95f2",port_state="Online",port_type="Point-To-Point (direct nport connection)",speed="16 Gbit",supported_classes="Class 3",supported_speeds="4 Gbit, 8 Gbit, 16 Gbit",symbolic_name="Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux"} 1 +# HELP node_fibrechannel_invalid_crc_total Invalid Cyclic Redundancy Check count +# TYPE node_fibrechannel_invalid_crc_total counter +node_fibrechannel_invalid_crc_total{fc_host="host0"} 2 +node_fibrechannel_invalid_crc_total{fc_host="host1"} 32 +# HELP node_fibrechannel_invalid_tx_words_total Number of invalid words transmitted by host port +# TYPE node_fibrechannel_invalid_tx_words_total counter +node_fibrechannel_invalid_tx_words_total{fc_host="host0"} 8 +node_fibrechannel_invalid_tx_words_total{fc_host="host1"} 128 +# HELP node_fibrechannel_link_failure_total Number of times the host port link has failed +# TYPE node_fibrechannel_link_failure_total counter +node_fibrechannel_link_failure_total{fc_host="host0"} 9 +node_fibrechannel_link_failure_total{fc_host="host1"} 144 +# HELP node_fibrechannel_loss_of_signal_total Number of times signal has been lost +# TYPE node_fibrechannel_loss_of_signal_total counter +node_fibrechannel_loss_of_signal_total{fc_host="host0"} 17 +node_fibrechannel_loss_of_signal_total{fc_host="host1"} 272 +# HELP node_fibrechannel_loss_of_sync_total Number of failures on either bit or transmission word boundaries +# TYPE node_fibrechannel_loss_of_sync_total counter +node_fibrechannel_loss_of_sync_total{fc_host="host0"} 16 +node_fibrechannel_loss_of_sync_total{fc_host="host1"} 256 +# HELP node_fibrechannel_nos_total Number Not_Operational Primitive Sequence received by host port +# TYPE node_fibrechannel_nos_total counter +node_fibrechannel_nos_total{fc_host="host0"} 18 +node_fibrechannel_nos_total{fc_host="host1"} 288 +# HELP node_fibrechannel_rx_frames_total Number of frames received +# TYPE node_fibrechannel_rx_frames_total counter +node_fibrechannel_rx_frames_total{fc_host="host0"} 3 +node_fibrechannel_rx_frames_total{fc_host="host1"} 48 +# HELP node_fibrechannel_rx_words_total Number of words received by host port +# TYPE node_fibrechannel_rx_words_total counter +node_fibrechannel_rx_words_total{fc_host="host0"} 4 +node_fibrechannel_rx_words_total{fc_host="host1"} 64 +# HELP node_fibrechannel_seconds_since_last_reset_total Number of seconds since last host port reset +# TYPE node_fibrechannel_seconds_since_last_reset_total counter +node_fibrechannel_seconds_since_last_reset_total{fc_host="host0"} 7 +node_fibrechannel_seconds_since_last_reset_total{fc_host="host1"} 112 +# HELP node_fibrechannel_tx_frames_total Number of frames transmitted by host port +# TYPE node_fibrechannel_tx_frames_total counter +node_fibrechannel_tx_frames_total{fc_host="host0"} 5 +node_fibrechannel_tx_frames_total{fc_host="host1"} 80 +# HELP node_fibrechannel_tx_words_total Number of words transmitted by host port +# TYPE node_fibrechannel_tx_words_total counter +node_fibrechannel_tx_words_total{fc_host="host0"} 6 +node_fibrechannel_tx_words_total{fc_host="host1"} 96 # HELP node_filefd_allocated File descriptor statistics: allocated. # TYPE node_filefd_allocated gauge node_filefd_allocated 1024 @@ -680,6 +903,10 @@ node_hwmon_fan_target_rpm{chip="nct6779",sensor="fan2"} 27000 # HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance # TYPE node_hwmon_fan_tolerance gauge node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0 +# HELP node_hwmon_freq_freq_mhz Hardware monitor for GPU frequency in MHz +# TYPE node_hwmon_freq_freq_mhz gauge +node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="mclk"} 300 +node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="sclk"} 214 # HELP node_hwmon_in_alarm Hardware sensor alarm status (in) # TYPE node_hwmon_in_alarm gauge node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0 @@ -793,18 +1020,20 @@ node_hwmon_pwm_weight_temp_step_tol{chip="nct6779",sensor="pwm1"} 0 # TYPE node_hwmon_sensor_label gauge node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1 node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1 -node_hwmon_sensor_label{chip="platform_applesmc_768",label="left_side",sensor="fan1"} 1 -node_hwmon_sensor_label{chip="platform_applesmc_768",label="right_side",sensor="fan2"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_0",sensor="temp2"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_1",sensor="temp3"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_2",sensor="temp4"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_3",sensor="temp5"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_0",label="physical_id_0",sensor="temp1"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_0",sensor="temp2"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_1",sensor="temp3"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_2",sensor="temp4"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_3",sensor="temp5"} 1 -node_hwmon_sensor_label{chip="platform_coretemp_1",label="physical_id_0",sensor="temp1"} 1 +node_hwmon_sensor_label{chip="hwmon4",label="mclk",sensor="freq2"} 1 +node_hwmon_sensor_label{chip="hwmon4",label="sclk",sensor="freq1"} 1 +node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side",sensor="fan1"} 1 +node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side",sensor="fan2"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 1",sensor="temp3"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 2",sensor="temp4"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 3",sensor="temp5"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="Physical id 0",sensor="temp1"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 0",sensor="temp2"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 1",sensor="temp3"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 2",sensor="temp4"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 3",sensor="temp5"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="Physical id 0",sensor="temp1"} 1 # HELP node_hwmon_temp_celsius Hardware monitor for temperature (input) # TYPE node_hwmon_temp_celsius gauge node_hwmon_temp_celsius{chip="hwmon4",sensor="temp1"} 55 @@ -1163,6 +1392,184 @@ node_ksmd_run 1 # HELP node_ksmd_sleep_seconds ksmd 'sleep_millisecs' file. # TYPE node_ksmd_sleep_seconds gauge node_ksmd_sleep_seconds 0.02 +# HELP node_lnstat_allocs_total linux network cache stats +# TYPE node_lnstat_allocs_total counter +node_lnstat_allocs_total{cpu="0",subsystem="arp_cache"} 1 +node_lnstat_allocs_total{cpu="0",subsystem="ndisc_cache"} 240 +node_lnstat_allocs_total{cpu="1",subsystem="arp_cache"} 13 +node_lnstat_allocs_total{cpu="1",subsystem="ndisc_cache"} 252 +# HELP node_lnstat_delete_list_total linux network cache stats +# TYPE node_lnstat_delete_list_total counter +node_lnstat_delete_list_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_delete_list_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_delete_list_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_delete_list_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_delete_total linux network cache stats +# TYPE node_lnstat_delete_total counter +node_lnstat_delete_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_delete_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_delete_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_delete_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_destroys_total linux network cache stats +# TYPE node_lnstat_destroys_total counter +node_lnstat_destroys_total{cpu="0",subsystem="arp_cache"} 2 +node_lnstat_destroys_total{cpu="0",subsystem="ndisc_cache"} 241 +node_lnstat_destroys_total{cpu="1",subsystem="arp_cache"} 14 +node_lnstat_destroys_total{cpu="1",subsystem="ndisc_cache"} 253 +# HELP node_lnstat_drop_total linux network cache stats +# TYPE node_lnstat_drop_total counter +node_lnstat_drop_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_drop_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_drop_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_drop_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_early_drop_total linux network cache stats +# TYPE node_lnstat_early_drop_total counter +node_lnstat_early_drop_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_early_drop_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_early_drop_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_early_drop_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_entries_total linux network cache stats +# TYPE node_lnstat_entries_total counter +node_lnstat_entries_total{cpu="0",subsystem="arp_cache"} 20 +node_lnstat_entries_total{cpu="0",subsystem="ndisc_cache"} 36 +node_lnstat_entries_total{cpu="0",subsystem="nf_conntrack"} 33 +node_lnstat_entries_total{cpu="1",subsystem="arp_cache"} 20 +node_lnstat_entries_total{cpu="1",subsystem="ndisc_cache"} 36 +node_lnstat_entries_total{cpu="1",subsystem="nf_conntrack"} 33 +node_lnstat_entries_total{cpu="2",subsystem="nf_conntrack"} 33 +node_lnstat_entries_total{cpu="3",subsystem="nf_conntrack"} 33 +# HELP node_lnstat_expect_create_total linux network cache stats +# TYPE node_lnstat_expect_create_total counter +node_lnstat_expect_create_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_expect_create_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_expect_create_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_expect_create_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_expect_delete_total linux network cache stats +# TYPE node_lnstat_expect_delete_total counter +node_lnstat_expect_delete_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_expect_delete_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_expect_delete_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_expect_delete_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_expect_new_total linux network cache stats +# TYPE node_lnstat_expect_new_total counter +node_lnstat_expect_new_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_expect_new_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_expect_new_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_expect_new_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_forced_gc_runs_total linux network cache stats +# TYPE node_lnstat_forced_gc_runs_total counter +node_lnstat_forced_gc_runs_total{cpu="0",subsystem="arp_cache"} 10 +node_lnstat_forced_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 249 +node_lnstat_forced_gc_runs_total{cpu="1",subsystem="arp_cache"} 22 +node_lnstat_forced_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 261 +# HELP node_lnstat_found_total linux network cache stats +# TYPE node_lnstat_found_total counter +node_lnstat_found_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_found_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_found_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_found_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_hash_grows_total linux network cache stats +# TYPE node_lnstat_hash_grows_total counter +node_lnstat_hash_grows_total{cpu="0",subsystem="arp_cache"} 3 +node_lnstat_hash_grows_total{cpu="0",subsystem="ndisc_cache"} 242 +node_lnstat_hash_grows_total{cpu="1",subsystem="arp_cache"} 15 +node_lnstat_hash_grows_total{cpu="1",subsystem="ndisc_cache"} 254 +# HELP node_lnstat_hits_total linux network cache stats +# TYPE node_lnstat_hits_total counter +node_lnstat_hits_total{cpu="0",subsystem="arp_cache"} 5 +node_lnstat_hits_total{cpu="0",subsystem="ndisc_cache"} 244 +node_lnstat_hits_total{cpu="1",subsystem="arp_cache"} 17 +node_lnstat_hits_total{cpu="1",subsystem="ndisc_cache"} 256 +# HELP node_lnstat_icmp_error_total linux network cache stats +# TYPE node_lnstat_icmp_error_total counter +node_lnstat_icmp_error_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_icmp_error_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_icmp_error_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_icmp_error_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_ignore_total linux network cache stats +# TYPE node_lnstat_ignore_total counter +node_lnstat_ignore_total{cpu="0",subsystem="nf_conntrack"} 22666 +node_lnstat_ignore_total{cpu="1",subsystem="nf_conntrack"} 22180 +node_lnstat_ignore_total{cpu="2",subsystem="nf_conntrack"} 22740 +node_lnstat_ignore_total{cpu="3",subsystem="nf_conntrack"} 22152 +# HELP node_lnstat_insert_failed_total linux network cache stats +# TYPE node_lnstat_insert_failed_total counter +node_lnstat_insert_failed_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_insert_failed_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_insert_failed_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_insert_failed_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_insert_total linux network cache stats +# TYPE node_lnstat_insert_total counter +node_lnstat_insert_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_insert_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_insert_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_insert_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_invalid_total linux network cache stats +# TYPE node_lnstat_invalid_total counter +node_lnstat_invalid_total{cpu="0",subsystem="nf_conntrack"} 3 +node_lnstat_invalid_total{cpu="1",subsystem="nf_conntrack"} 2 +node_lnstat_invalid_total{cpu="2",subsystem="nf_conntrack"} 1 +node_lnstat_invalid_total{cpu="3",subsystem="nf_conntrack"} 47 +# HELP node_lnstat_lookups_total linux network cache stats +# TYPE node_lnstat_lookups_total counter +node_lnstat_lookups_total{cpu="0",subsystem="arp_cache"} 4 +node_lnstat_lookups_total{cpu="0",subsystem="ndisc_cache"} 243 +node_lnstat_lookups_total{cpu="1",subsystem="arp_cache"} 16 +node_lnstat_lookups_total{cpu="1",subsystem="ndisc_cache"} 255 +# HELP node_lnstat_new_total linux network cache stats +# TYPE node_lnstat_new_total counter +node_lnstat_new_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_new_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_new_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_new_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_periodic_gc_runs_total linux network cache stats +# TYPE node_lnstat_periodic_gc_runs_total counter +node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="arp_cache"} 9 +node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 248 +node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="arp_cache"} 21 +node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 260 +# HELP node_lnstat_rcv_probes_mcast_total linux network cache stats +# TYPE node_lnstat_rcv_probes_mcast_total counter +node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="arp_cache"} 7 +node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="ndisc_cache"} 246 +node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="arp_cache"} 19 +node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="ndisc_cache"} 258 +# HELP node_lnstat_rcv_probes_ucast_total linux network cache stats +# TYPE node_lnstat_rcv_probes_ucast_total counter +node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="arp_cache"} 8 +node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="ndisc_cache"} 247 +node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="arp_cache"} 20 +node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="ndisc_cache"} 259 +# HELP node_lnstat_res_failed_total linux network cache stats +# TYPE node_lnstat_res_failed_total counter +node_lnstat_res_failed_total{cpu="0",subsystem="arp_cache"} 6 +node_lnstat_res_failed_total{cpu="0",subsystem="ndisc_cache"} 245 +node_lnstat_res_failed_total{cpu="1",subsystem="arp_cache"} 18 +node_lnstat_res_failed_total{cpu="1",subsystem="ndisc_cache"} 257 +# HELP node_lnstat_search_restart_total linux network cache stats +# TYPE node_lnstat_search_restart_total counter +node_lnstat_search_restart_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_search_restart_total{cpu="1",subsystem="nf_conntrack"} 2 +node_lnstat_search_restart_total{cpu="2",subsystem="nf_conntrack"} 1 +node_lnstat_search_restart_total{cpu="3",subsystem="nf_conntrack"} 4 +# HELP node_lnstat_searched_total linux network cache stats +# TYPE node_lnstat_searched_total counter +node_lnstat_searched_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_searched_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_searched_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_searched_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_table_fulls_total linux network cache stats +# TYPE node_lnstat_table_fulls_total counter +node_lnstat_table_fulls_total{cpu="0",subsystem="arp_cache"} 12 +node_lnstat_table_fulls_total{cpu="0",subsystem="ndisc_cache"} 251 +node_lnstat_table_fulls_total{cpu="1",subsystem="arp_cache"} 24 +node_lnstat_table_fulls_total{cpu="1",subsystem="ndisc_cache"} 263 +# HELP node_lnstat_unresolved_discards_total linux network cache stats +# TYPE node_lnstat_unresolved_discards_total counter +node_lnstat_unresolved_discards_total{cpu="0",subsystem="arp_cache"} 11 +node_lnstat_unresolved_discards_total{cpu="0",subsystem="ndisc_cache"} 250 +node_lnstat_unresolved_discards_total{cpu="1",subsystem="arp_cache"} 23 +node_lnstat_unresolved_discards_total{cpu="1",subsystem="ndisc_cache"} 262 # HELP node_load1 1m load average. # TYPE node_load1 gauge node_load1 0.21 @@ -1183,6 +1590,7 @@ node_md_blocks{device="md12"} 3.886394368e+09 node_md_blocks{device="md120"} 2.095104e+06 node_md_blocks{device="md126"} 1.855870976e+09 node_md_blocks{device="md127"} 3.12319552e+08 +node_md_blocks{device="md201"} 1.993728e+06 node_md_blocks{device="md219"} 7932 node_md_blocks{device="md3"} 5.853468288e+09 node_md_blocks{device="md4"} 4.883648e+06 @@ -1201,6 +1609,7 @@ node_md_blocks_synced{device="md12"} 3.886394368e+09 node_md_blocks_synced{device="md120"} 2.095104e+06 node_md_blocks_synced{device="md126"} 1.855870976e+09 node_md_blocks_synced{device="md127"} 3.12319552e+08 +node_md_blocks_synced{device="md201"} 114176 node_md_blocks_synced{device="md219"} 7932 node_md_blocks_synced{device="md3"} 5.853468288e+09 node_md_blocks_synced{device="md4"} 4.883648e+06 @@ -1208,6 +1617,14 @@ node_md_blocks_synced{device="md6"} 1.6775552e+07 node_md_blocks_synced{device="md7"} 7.813735424e+09 node_md_blocks_synced{device="md8"} 1.6775552e+07 node_md_blocks_synced{device="md9"} 0 +# HELP node_md_degraded Number of degraded disks on device. +# TYPE node_md_degraded gauge +node_md_degraded{device="md0"} 0 +node_md_degraded{device="md1"} 0 +node_md_degraded{device="md10"} 0 +node_md_degraded{device="md4"} 0 +node_md_degraded{device="md5"} 1 +node_md_degraded{device="md6"} 1 # HELP node_md_disks Number of active/failed/spare disks of device. # TYPE node_md_disks gauge node_md_disks{device="md0",state="active"} 2 @@ -1237,6 +1654,9 @@ node_md_disks{device="md126",state="spare"} 0 node_md_disks{device="md127",state="active"} 2 node_md_disks{device="md127",state="failed"} 0 node_md_disks{device="md127",state="spare"} 0 +node_md_disks{device="md201",state="active"} 2 +node_md_disks{device="md201",state="failed"} 0 +node_md_disks{device="md201",state="spare"} 0 node_md_disks{device="md219",state="active"} 0 node_md_disks{device="md219",state="failed"} 0 node_md_disks{device="md219",state="spare"} 3 @@ -1269,6 +1689,7 @@ node_md_disks_required{device="md12"} 2 node_md_disks_required{device="md120"} 2 node_md_disks_required{device="md126"} 2 node_md_disks_required{device="md127"} 2 +node_md_disks_required{device="md201"} 2 node_md_disks_required{device="md219"} 0 node_md_disks_required{device="md3"} 8 node_md_disks_required{device="md4"} 0 @@ -1276,69 +1697,98 @@ node_md_disks_required{device="md6"} 2 node_md_disks_required{device="md7"} 4 node_md_disks_required{device="md8"} 2 node_md_disks_required{device="md9"} 4 +# HELP node_md_raid_disks Number of raid disks on device. +# TYPE node_md_raid_disks gauge +node_md_raid_disks{device="md0"} 2 +node_md_raid_disks{device="md1"} 2 +node_md_raid_disks{device="md10"} 4 +node_md_raid_disks{device="md4"} 3 +node_md_raid_disks{device="md5"} 3 +node_md_raid_disks{device="md6"} 4 # HELP node_md_state Indicates the state of md-device. # TYPE node_md_state gauge node_md_state{device="md0",state="active"} 1 +node_md_state{device="md0",state="check"} 0 node_md_state{device="md0",state="inactive"} 0 node_md_state{device="md0",state="recovering"} 0 node_md_state{device="md0",state="resync"} 0 node_md_state{device="md00",state="active"} 1 +node_md_state{device="md00",state="check"} 0 node_md_state{device="md00",state="inactive"} 0 node_md_state{device="md00",state="recovering"} 0 node_md_state{device="md00",state="resync"} 0 node_md_state{device="md10",state="active"} 1 +node_md_state{device="md10",state="check"} 0 node_md_state{device="md10",state="inactive"} 0 node_md_state{device="md10",state="recovering"} 0 node_md_state{device="md10",state="resync"} 0 node_md_state{device="md101",state="active"} 1 +node_md_state{device="md101",state="check"} 0 node_md_state{device="md101",state="inactive"} 0 node_md_state{device="md101",state="recovering"} 0 node_md_state{device="md101",state="resync"} 0 node_md_state{device="md11",state="active"} 0 +node_md_state{device="md11",state="check"} 0 node_md_state{device="md11",state="inactive"} 0 node_md_state{device="md11",state="recovering"} 0 node_md_state{device="md11",state="resync"} 1 node_md_state{device="md12",state="active"} 1 +node_md_state{device="md12",state="check"} 0 node_md_state{device="md12",state="inactive"} 0 node_md_state{device="md12",state="recovering"} 0 node_md_state{device="md12",state="resync"} 0 node_md_state{device="md120",state="active"} 1 +node_md_state{device="md120",state="check"} 0 node_md_state{device="md120",state="inactive"} 0 node_md_state{device="md120",state="recovering"} 0 node_md_state{device="md120",state="resync"} 0 node_md_state{device="md126",state="active"} 1 +node_md_state{device="md126",state="check"} 0 node_md_state{device="md126",state="inactive"} 0 node_md_state{device="md126",state="recovering"} 0 node_md_state{device="md126",state="resync"} 0 node_md_state{device="md127",state="active"} 1 +node_md_state{device="md127",state="check"} 0 node_md_state{device="md127",state="inactive"} 0 node_md_state{device="md127",state="recovering"} 0 node_md_state{device="md127",state="resync"} 0 +node_md_state{device="md201",state="active"} 0 +node_md_state{device="md201",state="check"} 1 +node_md_state{device="md201",state="inactive"} 0 +node_md_state{device="md201",state="recovering"} 0 +node_md_state{device="md201",state="resync"} 0 node_md_state{device="md219",state="active"} 0 +node_md_state{device="md219",state="check"} 0 node_md_state{device="md219",state="inactive"} 1 node_md_state{device="md219",state="recovering"} 0 node_md_state{device="md219",state="resync"} 0 node_md_state{device="md3",state="active"} 1 +node_md_state{device="md3",state="check"} 0 node_md_state{device="md3",state="inactive"} 0 node_md_state{device="md3",state="recovering"} 0 node_md_state{device="md3",state="resync"} 0 node_md_state{device="md4",state="active"} 0 +node_md_state{device="md4",state="check"} 0 node_md_state{device="md4",state="inactive"} 1 node_md_state{device="md4",state="recovering"} 0 node_md_state{device="md4",state="resync"} 0 node_md_state{device="md6",state="active"} 0 +node_md_state{device="md6",state="check"} 0 node_md_state{device="md6",state="inactive"} 0 node_md_state{device="md6",state="recovering"} 1 node_md_state{device="md6",state="resync"} 0 node_md_state{device="md7",state="active"} 1 +node_md_state{device="md7",state="check"} 0 node_md_state{device="md7",state="inactive"} 0 node_md_state{device="md7",state="recovering"} 0 node_md_state{device="md7",state="resync"} 0 node_md_state{device="md8",state="active"} 0 +node_md_state{device="md8",state="check"} 0 node_md_state{device="md8",state="inactive"} 0 node_md_state{device="md8",state="recovering"} 0 node_md_state{device="md8",state="resync"} 1 node_md_state{device="md9",state="active"} 0 +node_md_state{device="md9",state="check"} 0 node_md_state{device="md9",state="inactive"} 0 node_md_state{device="md9",state="recovering"} 0 node_md_state{device="md9",state="resync"} 1 @@ -1943,6 +2393,15 @@ node_netstat_TcpExt_SyncookiesRecv 0 # HELP node_netstat_TcpExt_SyncookiesSent Statistic TcpExtSyncookiesSent. # TYPE node_netstat_TcpExt_SyncookiesSent untyped node_netstat_TcpExt_SyncookiesSent 0 +# HELP node_netstat_TcpExt_TCPOFOQueue Statistic TcpExtTCPOFOQueue. +# TYPE node_netstat_TcpExt_TCPOFOQueue untyped +node_netstat_TcpExt_TCPOFOQueue 42 +# HELP node_netstat_TcpExt_TCPRcvQDrop Statistic TcpExtTCPRcvQDrop. +# TYPE node_netstat_TcpExt_TCPRcvQDrop untyped +node_netstat_TcpExt_TCPRcvQDrop 131 +# HELP node_netstat_TcpExt_TCPTimeouts Statistic TcpExtTCPTimeouts. +# TYPE node_netstat_TcpExt_TCPTimeouts untyped +node_netstat_TcpExt_TCPTimeouts 115 # HELP node_netstat_Tcp_ActiveOpens Statistic TcpActiveOpens. # TYPE node_netstat_Tcp_ActiveOpens untyped node_netstat_Tcp_ActiveOpens 3556 @@ -2009,254 +2468,127 @@ node_netstat_Udp_RcvbufErrors 9 # HELP node_netstat_Udp_SndbufErrors Statistic UdpSndbufErrors. # TYPE node_netstat_Udp_SndbufErrors untyped node_netstat_Udp_SndbufErrors 8 -# HELP node_network_address_assign_type address_assign_type value of /sys/class/net/. +# HELP node_network_address_assign_type Network device property: address_assign_type # TYPE node_network_address_assign_type gauge +node_network_address_assign_type{device="bond0"} 3 node_network_address_assign_type{device="eth0"} 3 -# HELP node_network_carrier carrier value of /sys/class/net/. +# HELP node_network_carrier Network device property: carrier # TYPE node_network_carrier gauge +node_network_carrier{device="bond0"} 1 node_network_carrier{device="eth0"} 1 -# HELP node_network_carrier_changes_total carrier_changes_total value of /sys/class/net/. +# HELP node_network_carrier_changes_total Network device property: carrier_changes_total # TYPE node_network_carrier_changes_total counter +node_network_carrier_changes_total{device="bond0"} 2 node_network_carrier_changes_total{device="eth0"} 2 -# HELP node_network_carrier_down_changes_total carrier_down_changes_total value of /sys/class/net/. +# HELP node_network_carrier_down_changes_total Network device property: carrier_down_changes_total # TYPE node_network_carrier_down_changes_total counter +node_network_carrier_down_changes_total{device="bond0"} 1 node_network_carrier_down_changes_total{device="eth0"} 1 -# HELP node_network_carrier_up_changes_total carrier_up_changes_total value of /sys/class/net/. +# HELP node_network_carrier_up_changes_total Network device property: carrier_up_changes_total # TYPE node_network_carrier_up_changes_total counter +node_network_carrier_up_changes_total{device="bond0"} 1 node_network_carrier_up_changes_total{device="eth0"} 1 -# HELP node_network_device_id device_id value of /sys/class/net/. +# HELP node_network_device_id Network device property: device_id # TYPE node_network_device_id gauge +node_network_device_id{device="bond0"} 32 node_network_device_id{device="eth0"} 32 -# HELP node_network_dormant dormant value of /sys/class/net/. +# HELP node_network_dormant Network device property: dormant # TYPE node_network_dormant gauge +node_network_dormant{device="bond0"} 1 node_network_dormant{device="eth0"} 1 -# HELP node_network_flags flags value of /sys/class/net/. +# HELP node_network_flags Network device property: flags # TYPE node_network_flags gauge +node_network_flags{device="bond0"} 4867 node_network_flags{device="eth0"} 4867 -# HELP node_network_iface_id iface_id value of /sys/class/net/. +# HELP node_network_iface_id Network device property: iface_id # TYPE node_network_iface_id gauge +node_network_iface_id{device="bond0"} 2 node_network_iface_id{device="eth0"} 2 -# HELP node_network_iface_link iface_link value of /sys/class/net/. +# HELP node_network_iface_link Network device property: iface_link # TYPE node_network_iface_link gauge +node_network_iface_link{device="bond0"} 2 node_network_iface_link{device="eth0"} 2 -# HELP node_network_iface_link_mode iface_link_mode value of /sys/class/net/. +# HELP node_network_iface_link_mode Network device property: iface_link_mode # TYPE node_network_iface_link_mode gauge +node_network_iface_link_mode{device="bond0"} 1 node_network_iface_link_mode{device="eth0"} 1 # HELP node_network_info Non-numeric data from /sys/class/net/, value is always 1. # TYPE node_network_info gauge -node_network_info{address="01:01:01:01:01:01",broadcast="ff:ff:ff:ff:ff:ff",device="eth0",duplex="full",ifalias="",operstate="up"} 1 -# HELP node_network_mtu_bytes mtu_bytes value of /sys/class/net/. +node_network_info{address="01:01:01:01:01:01",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="bond0",duplex="full",ifalias="",operstate="up"} 1 +node_network_info{address="01:01:01:01:01:01",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="eth0",duplex="full",ifalias="",operstate="up"} 1 +# HELP node_network_mtu_bytes Network device property: mtu_bytes # TYPE node_network_mtu_bytes gauge +node_network_mtu_bytes{device="bond0"} 1500 node_network_mtu_bytes{device="eth0"} 1500 -# HELP node_network_name_assign_type name_assign_type value of /sys/class/net/. +# HELP node_network_name_assign_type Network device property: name_assign_type # TYPE node_network_name_assign_type gauge +node_network_name_assign_type{device="bond0"} 2 node_network_name_assign_type{device="eth0"} 2 -# HELP node_network_net_dev_group net_dev_group value of /sys/class/net/. +# HELP node_network_net_dev_group Network device property: net_dev_group # TYPE node_network_net_dev_group gauge +node_network_net_dev_group{device="bond0"} 0 node_network_net_dev_group{device="eth0"} 0 -# HELP node_network_protocol_type protocol_type value of /sys/class/net/. +# HELP node_network_protocol_type Network device property: protocol_type # TYPE node_network_protocol_type gauge +node_network_protocol_type{device="bond0"} 1 node_network_protocol_type{device="eth0"} 1 # HELP node_network_receive_bytes_total Network device statistic receive_bytes. # TYPE node_network_receive_bytes_total counter -node_network_receive_bytes_total{device="docker0"} 6.4910168e+07 -node_network_receive_bytes_total{device="eth0"} 6.8210035552e+10 -node_network_receive_bytes_total{device="flannel.1"} 1.8144009813e+10 -node_network_receive_bytes_total{device="ibr10:30"} 0 -node_network_receive_bytes_total{device="lo"} 4.35303245e+08 -node_network_receive_bytes_total{device="lxcbr0"} 0 -node_network_receive_bytes_total{device="tun0"} 1888 -node_network_receive_bytes_total{device="veth4B09XN"} 648 -node_network_receive_bytes_total{device="wlan0"} 1.0437182923e+10 -node_network_receive_bytes_total{device="💩0"} 5.7750104e+07 # HELP node_network_receive_compressed_total Network device statistic receive_compressed. # TYPE node_network_receive_compressed_total counter -node_network_receive_compressed_total{device="docker0"} 0 -node_network_receive_compressed_total{device="eth0"} 0 -node_network_receive_compressed_total{device="flannel.1"} 0 -node_network_receive_compressed_total{device="ibr10:30"} 0 node_network_receive_compressed_total{device="lo"} 0 -node_network_receive_compressed_total{device="lxcbr0"} 0 -node_network_receive_compressed_total{device="tun0"} 0 -node_network_receive_compressed_total{device="veth4B09XN"} 0 -node_network_receive_compressed_total{device="wlan0"} 0 -node_network_receive_compressed_total{device="💩0"} 0 # HELP node_network_receive_drop_total Network device statistic receive_drop. # TYPE node_network_receive_drop_total counter -node_network_receive_drop_total{device="docker0"} 0 -node_network_receive_drop_total{device="eth0"} 0 -node_network_receive_drop_total{device="flannel.1"} 0 -node_network_receive_drop_total{device="ibr10:30"} 0 node_network_receive_drop_total{device="lo"} 0 -node_network_receive_drop_total{device="lxcbr0"} 0 -node_network_receive_drop_total{device="tun0"} 0 -node_network_receive_drop_total{device="veth4B09XN"} 0 -node_network_receive_drop_total{device="wlan0"} 0 -node_network_receive_drop_total{device="💩0"} 0 # HELP node_network_receive_errs_total Network device statistic receive_errs. # TYPE node_network_receive_errs_total counter -node_network_receive_errs_total{device="docker0"} 0 -node_network_receive_errs_total{device="eth0"} 0 -node_network_receive_errs_total{device="flannel.1"} 0 -node_network_receive_errs_total{device="ibr10:30"} 0 node_network_receive_errs_total{device="lo"} 0 -node_network_receive_errs_total{device="lxcbr0"} 0 -node_network_receive_errs_total{device="tun0"} 0 -node_network_receive_errs_total{device="veth4B09XN"} 0 -node_network_receive_errs_total{device="wlan0"} 0 -node_network_receive_errs_total{device="💩0"} 0 # HELP node_network_receive_fifo_total Network device statistic receive_fifo. # TYPE node_network_receive_fifo_total counter -node_network_receive_fifo_total{device="docker0"} 0 -node_network_receive_fifo_total{device="eth0"} 0 -node_network_receive_fifo_total{device="flannel.1"} 0 -node_network_receive_fifo_total{device="ibr10:30"} 0 node_network_receive_fifo_total{device="lo"} 0 -node_network_receive_fifo_total{device="lxcbr0"} 0 -node_network_receive_fifo_total{device="tun0"} 0 -node_network_receive_fifo_total{device="veth4B09XN"} 0 -node_network_receive_fifo_total{device="wlan0"} 0 -node_network_receive_fifo_total{device="💩0"} 0 # HELP node_network_receive_frame_total Network device statistic receive_frame. # TYPE node_network_receive_frame_total counter -node_network_receive_frame_total{device="docker0"} 0 -node_network_receive_frame_total{device="eth0"} 0 -node_network_receive_frame_total{device="flannel.1"} 0 -node_network_receive_frame_total{device="ibr10:30"} 0 node_network_receive_frame_total{device="lo"} 0 -node_network_receive_frame_total{device="lxcbr0"} 0 -node_network_receive_frame_total{device="tun0"} 0 -node_network_receive_frame_total{device="veth4B09XN"} 0 -node_network_receive_frame_total{device="wlan0"} 0 -node_network_receive_frame_total{device="💩0"} 0 # HELP node_network_receive_multicast_total Network device statistic receive_multicast. # TYPE node_network_receive_multicast_total counter -node_network_receive_multicast_total{device="docker0"} 0 -node_network_receive_multicast_total{device="eth0"} 0 -node_network_receive_multicast_total{device="flannel.1"} 0 -node_network_receive_multicast_total{device="ibr10:30"} 0 node_network_receive_multicast_total{device="lo"} 0 -node_network_receive_multicast_total{device="lxcbr0"} 0 -node_network_receive_multicast_total{device="tun0"} 0 -node_network_receive_multicast_total{device="veth4B09XN"} 0 -node_network_receive_multicast_total{device="wlan0"} 0 -node_network_receive_multicast_total{device="💩0"} 72 +# HELP node_network_receive_nohandler_total Network device statistic receive_nohandler. +# TYPE node_network_receive_nohandler_total counter +node_network_receive_nohandler_total{device="lo"} 0 # HELP node_network_receive_packets_total Network device statistic receive_packets. # TYPE node_network_receive_packets_total counter -node_network_receive_packets_total{device="docker0"} 1.065585e+06 -node_network_receive_packets_total{device="eth0"} 5.20993275e+08 -node_network_receive_packets_total{device="flannel.1"} 2.28499337e+08 -node_network_receive_packets_total{device="ibr10:30"} 0 -node_network_receive_packets_total{device="lo"} 1.832522e+06 -node_network_receive_packets_total{device="lxcbr0"} 0 -node_network_receive_packets_total{device="tun0"} 24 -node_network_receive_packets_total{device="veth4B09XN"} 8 -node_network_receive_packets_total{device="wlan0"} 1.3899359e+07 -node_network_receive_packets_total{device="💩0"} 105557 -# HELP node_network_speed_bytes speed_bytes value of /sys/class/net/. +# HELP node_network_speed_bytes Network device property: speed_bytes # TYPE node_network_speed_bytes gauge node_network_speed_bytes{device="eth0"} 1.25e+08 # HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. # TYPE node_network_transmit_bytes_total counter -node_network_transmit_bytes_total{device="docker0"} 2.681662018e+09 -node_network_transmit_bytes_total{device="eth0"} 9.315587528e+09 -node_network_transmit_bytes_total{device="flannel.1"} 2.0758990068e+10 -node_network_transmit_bytes_total{device="ibr10:30"} 0 -node_network_transmit_bytes_total{device="lo"} 4.35303245e+08 -node_network_transmit_bytes_total{device="lxcbr0"} 2.630299e+06 -node_network_transmit_bytes_total{device="tun0"} 67120 -node_network_transmit_bytes_total{device="veth4B09XN"} 1.943284e+06 -node_network_transmit_bytes_total{device="wlan0"} 2.85164936e+09 -node_network_transmit_bytes_total{device="💩0"} 4.04570255e+08 # HELP node_network_transmit_carrier_total Network device statistic transmit_carrier. # TYPE node_network_transmit_carrier_total counter -node_network_transmit_carrier_total{device="docker0"} 0 -node_network_transmit_carrier_total{device="eth0"} 0 -node_network_transmit_carrier_total{device="flannel.1"} 0 -node_network_transmit_carrier_total{device="ibr10:30"} 0 node_network_transmit_carrier_total{device="lo"} 0 -node_network_transmit_carrier_total{device="lxcbr0"} 0 -node_network_transmit_carrier_total{device="tun0"} 0 -node_network_transmit_carrier_total{device="veth4B09XN"} 0 -node_network_transmit_carrier_total{device="wlan0"} 0 -node_network_transmit_carrier_total{device="💩0"} 0 # HELP node_network_transmit_colls_total Network device statistic transmit_colls. # TYPE node_network_transmit_colls_total counter -node_network_transmit_colls_total{device="docker0"} 0 -node_network_transmit_colls_total{device="eth0"} 0 -node_network_transmit_colls_total{device="flannel.1"} 0 -node_network_transmit_colls_total{device="ibr10:30"} 0 node_network_transmit_colls_total{device="lo"} 0 -node_network_transmit_colls_total{device="lxcbr0"} 0 -node_network_transmit_colls_total{device="tun0"} 0 -node_network_transmit_colls_total{device="veth4B09XN"} 0 -node_network_transmit_colls_total{device="wlan0"} 0 -node_network_transmit_colls_total{device="💩0"} 0 # HELP node_network_transmit_compressed_total Network device statistic transmit_compressed. # TYPE node_network_transmit_compressed_total counter -node_network_transmit_compressed_total{device="docker0"} 0 -node_network_transmit_compressed_total{device="eth0"} 0 -node_network_transmit_compressed_total{device="flannel.1"} 0 -node_network_transmit_compressed_total{device="ibr10:30"} 0 node_network_transmit_compressed_total{device="lo"} 0 -node_network_transmit_compressed_total{device="lxcbr0"} 0 -node_network_transmit_compressed_total{device="tun0"} 0 -node_network_transmit_compressed_total{device="veth4B09XN"} 0 -node_network_transmit_compressed_total{device="wlan0"} 0 -node_network_transmit_compressed_total{device="💩0"} 0 # HELP node_network_transmit_drop_total Network device statistic transmit_drop. # TYPE node_network_transmit_drop_total counter -node_network_transmit_drop_total{device="docker0"} 0 -node_network_transmit_drop_total{device="eth0"} 0 -node_network_transmit_drop_total{device="flannel.1"} 64 -node_network_transmit_drop_total{device="ibr10:30"} 0 node_network_transmit_drop_total{device="lo"} 0 -node_network_transmit_drop_total{device="lxcbr0"} 0 -node_network_transmit_drop_total{device="tun0"} 0 -node_network_transmit_drop_total{device="veth4B09XN"} 0 -node_network_transmit_drop_total{device="wlan0"} 0 -node_network_transmit_drop_total{device="💩0"} 0 # HELP node_network_transmit_errs_total Network device statistic transmit_errs. # TYPE node_network_transmit_errs_total counter -node_network_transmit_errs_total{device="docker0"} 0 -node_network_transmit_errs_total{device="eth0"} 0 -node_network_transmit_errs_total{device="flannel.1"} 0 -node_network_transmit_errs_total{device="ibr10:30"} 0 node_network_transmit_errs_total{device="lo"} 0 -node_network_transmit_errs_total{device="lxcbr0"} 0 -node_network_transmit_errs_total{device="tun0"} 0 -node_network_transmit_errs_total{device="veth4B09XN"} 0 -node_network_transmit_errs_total{device="wlan0"} 0 -node_network_transmit_errs_total{device="💩0"} 0 # HELP node_network_transmit_fifo_total Network device statistic transmit_fifo. # TYPE node_network_transmit_fifo_total counter -node_network_transmit_fifo_total{device="docker0"} 0 -node_network_transmit_fifo_total{device="eth0"} 0 -node_network_transmit_fifo_total{device="flannel.1"} 0 -node_network_transmit_fifo_total{device="ibr10:30"} 0 node_network_transmit_fifo_total{device="lo"} 0 -node_network_transmit_fifo_total{device="lxcbr0"} 0 -node_network_transmit_fifo_total{device="tun0"} 0 -node_network_transmit_fifo_total{device="veth4B09XN"} 0 -node_network_transmit_fifo_total{device="wlan0"} 0 -node_network_transmit_fifo_total{device="💩0"} 0 # HELP node_network_transmit_packets_total Network device statistic transmit_packets. # TYPE node_network_transmit_packets_total counter -node_network_transmit_packets_total{device="docker0"} 1.929779e+06 -node_network_transmit_packets_total{device="eth0"} 4.3451486e+07 -node_network_transmit_packets_total{device="flannel.1"} 2.58369223e+08 -node_network_transmit_packets_total{device="ibr10:30"} 0 -node_network_transmit_packets_total{device="lo"} 1.832522e+06 -node_network_transmit_packets_total{device="lxcbr0"} 28339 -node_network_transmit_packets_total{device="tun0"} 934 -node_network_transmit_packets_total{device="veth4B09XN"} 10640 -node_network_transmit_packets_total{device="wlan0"} 1.17262e+07 -node_network_transmit_packets_total{device="💩0"} 304261 -# HELP node_network_transmit_queue_length transmit_queue_length value of /sys/class/net/. +# HELP node_network_transmit_queue_length Network device property: transmit_queue_length # TYPE node_network_transmit_queue_length gauge +node_network_transmit_queue_length{device="bond0"} 1000 node_network_transmit_queue_length{device="eth0"} 1000 # HELP node_network_up Value is 1 if operstate is 'up', 0 otherwise. # TYPE node_network_up gauge +node_network_up{device="bond0"} 1 node_network_up{device="eth0"} 1 # HELP node_nf_conntrack_entries Number of currently allocated flow entries for connection tracking. # TYPE node_nf_conntrack_entries gauge @@ -2264,6 +2596,30 @@ node_nf_conntrack_entries 123 # HELP node_nf_conntrack_entries_limit Maximum size of connection tracking table. # TYPE node_nf_conntrack_entries_limit gauge node_nf_conntrack_entries_limit 65536 +# HELP node_nf_conntrack_stat_drop Number of packets dropped due to conntrack failure. +# TYPE node_nf_conntrack_stat_drop gauge +node_nf_conntrack_stat_drop 0 +# HELP node_nf_conntrack_stat_early_drop Number of dropped conntrack entries to make room for new ones, if maximum table size was reached. +# TYPE node_nf_conntrack_stat_early_drop gauge +node_nf_conntrack_stat_early_drop 0 +# HELP node_nf_conntrack_stat_found Number of searched entries which were successful. +# TYPE node_nf_conntrack_stat_found gauge +node_nf_conntrack_stat_found 0 +# HELP node_nf_conntrack_stat_ignore Number of packets seen which are already connected to a conntrack entry. +# TYPE node_nf_conntrack_stat_ignore gauge +node_nf_conntrack_stat_ignore 89738 +# HELP node_nf_conntrack_stat_insert Number of entries inserted into the list. +# TYPE node_nf_conntrack_stat_insert gauge +node_nf_conntrack_stat_insert 0 +# HELP node_nf_conntrack_stat_insert_failed Number of entries for which list insertion was attempted but failed. +# TYPE node_nf_conntrack_stat_insert_failed gauge +node_nf_conntrack_stat_insert_failed 0 +# HELP node_nf_conntrack_stat_invalid Number of packets seen which can not be tracked. +# TYPE node_nf_conntrack_stat_invalid gauge +node_nf_conntrack_stat_invalid 53 +# HELP node_nf_conntrack_stat_search_restart Number of conntrack table lookups which had to be restarted due to hashtable resizes. +# TYPE node_nf_conntrack_stat_search_restart gauge +node_nf_conntrack_stat_search_restart 7 # HELP node_nfs_connections_total Total number of NFSd TCP connections. # TYPE node_nfs_connections_total counter node_nfs_connections_total 45 @@ -2478,13 +2834,16 @@ node_nfsd_requests_total{method="SecInfo",proto="4"} 0 node_nfsd_requests_total{method="SetAttr",proto="2"} 0 node_nfsd_requests_total{method="SetAttr",proto="3"} 0 node_nfsd_requests_total{method="SetAttr",proto="4"} 0 +node_nfsd_requests_total{method="SetClientID",proto="4"} 3 +node_nfsd_requests_total{method="SetClientIDConfirm",proto="4"} 3 node_nfsd_requests_total{method="SymLink",proto="2"} 0 node_nfsd_requests_total{method="SymLink",proto="3"} 0 -node_nfsd_requests_total{method="Verify",proto="4"} 3 +node_nfsd_requests_total{method="Verify",proto="4"} 0 +node_nfsd_requests_total{method="WdelegGetattr",proto="4"} 15 node_nfsd_requests_total{method="WrCache",proto="2"} 0 node_nfsd_requests_total{method="Write",proto="2"} 0 node_nfsd_requests_total{method="Write",proto="3"} 0 -node_nfsd_requests_total{method="Write",proto="4"} 3 +node_nfsd_requests_total{method="Write",proto="4"} 0 # HELP node_nfsd_rpc_errors_total Total number of NFSd RPC errors by error type. # TYPE node_nfsd_rpc_errors_total counter node_nfsd_rpc_errors_total{error="auth"} 2 @@ -2496,6 +2855,91 @@ node_nfsd_server_rpcs_total 18628 # HELP node_nfsd_server_threads Total number of NFSd kernel threads that are running. # TYPE node_nfsd_server_threads gauge node_nfsd_server_threads 8 +# HELP node_nvme_info Non-numeric data from /sys/class/nvme/, value is always 1. +# TYPE node_nvme_info gauge +node_nvme_info{device="nvme0",firmware_revision="1B2QEXP7",model="Samsung SSD 970 PRO 512GB",serial="S680HF8N190894I",state="live"} 1 +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_pcidevice_current_link_transfers_per_second Value of current link's transfers per second (T/s) +# TYPE node_pcidevice_current_link_transfers_per_second gauge +node_pcidevice_current_link_transfers_per_second{bus="00",device="02",function="1",segment="0000"} 8e+09 +node_pcidevice_current_link_transfers_per_second{bus="01",device="00",function="0",segment="0000"} 8e+09 +node_pcidevice_current_link_transfers_per_second{bus="45",device="00",function="0",segment="0000"} 5e+09 +# HELP node_pcidevice_current_link_width Value of current link's width (number of lanes) +# TYPE node_pcidevice_current_link_width gauge +node_pcidevice_current_link_width{bus="00",device="02",function="1",segment="0000"} 4 +node_pcidevice_current_link_width{bus="01",device="00",function="0",segment="0000"} 4 +node_pcidevice_current_link_width{bus="45",device="00",function="0",segment="0000"} 4 +# HELP node_pcidevice_d3cold_allowed Whether the PCIe device supports D3cold power state (0/1). +# TYPE node_pcidevice_d3cold_allowed gauge +node_pcidevice_d3cold_allowed{bus="00",device="02",function="1",segment="0000"} 1 +node_pcidevice_d3cold_allowed{bus="01",device="00",function="0",segment="0000"} 1 +node_pcidevice_d3cold_allowed{bus="45",device="00",function="0",segment="0000"} 1 +# HELP node_pcidevice_info Non-numeric data from /sys/bus/pci/devices/, value is always 1. +# TYPE node_pcidevice_info gauge +node_pcidevice_info{bus="00",class_id="0x060400",device="02",device_id="0x1634",function="1",parent_bus="*",parent_device="*",parent_function="*",parent_segment="*",revision="0x00",segment="0000",subsystem_device_id="0x5095",subsystem_vendor_id="0x17aa",vendor_id="0x1022"} 1 +node_pcidevice_info{bus="01",class_id="0x010802",device="00",device_id="0x540a",function="0",parent_bus="00",parent_device="02",parent_function="1",parent_segment="0000",revision="0x01",segment="0000",subsystem_device_id="0x5021",subsystem_vendor_id="0xc0a9",vendor_id="0xc0a9"} 1 +node_pcidevice_info{bus="45",class_id="0x020000",device="00",device_id="0x1521",function="0",parent_bus="40",parent_device="01",parent_function="3",parent_segment="0000",revision="0x01",segment="0000",subsystem_device_id="0x00a3",subsystem_vendor_id="0x8086",vendor_id="0x8086"} 1 +# HELP node_pcidevice_max_link_transfers_per_second Value of maximum link's transfers per second (T/s) +# TYPE node_pcidevice_max_link_transfers_per_second gauge +node_pcidevice_max_link_transfers_per_second{bus="00",device="02",function="1",segment="0000"} 8e+09 +node_pcidevice_max_link_transfers_per_second{bus="01",device="00",function="0",segment="0000"} 1.6e+10 +node_pcidevice_max_link_transfers_per_second{bus="45",device="00",function="0",segment="0000"} 5e+09 +# HELP node_pcidevice_max_link_width Value of maximum link's width (number of lanes) +# TYPE node_pcidevice_max_link_width gauge +node_pcidevice_max_link_width{bus="00",device="02",function="1",segment="0000"} 8 +node_pcidevice_max_link_width{bus="01",device="00",function="0",segment="0000"} 4 +node_pcidevice_max_link_width{bus="45",device="00",function="0",segment="0000"} 4 +# HELP node_pcidevice_numa_node NUMA node number for the PCI device. -1 indicates unknown or not available. +# TYPE node_pcidevice_numa_node gauge +node_pcidevice_numa_node{bus="45",device="00",function="0",segment="0000"} 0 +# HELP node_pcidevice_power_state PCIe device power state, one of: D0, D1, D2, D3hot, D3cold, unknown or error. +# TYPE node_pcidevice_power_state gauge +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D0"} 1 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D1"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D2"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D3cold"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D3hot"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="error"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="unknown"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D0"} 1 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D1"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D2"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D3cold"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D3hot"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="error"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="unknown"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D0"} 1 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D1"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D2"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D3cold"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D3hot"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="error"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="unknown"} 0 +# HELP node_pcidevice_sriov_drivers_autoprobe Whether SR-IOV drivers autoprobe is enabled for the device (0/1). +# TYPE node_pcidevice_sriov_drivers_autoprobe gauge +node_pcidevice_sriov_drivers_autoprobe{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_drivers_autoprobe{bus="01",device="00",function="0",segment="0000"} 1 +node_pcidevice_sriov_drivers_autoprobe{bus="45",device="00",function="0",segment="0000"} 1 +# HELP node_pcidevice_sriov_numvfs Number of Virtual Functions (VFs) currently enabled for SR-IOV. +# TYPE node_pcidevice_sriov_numvfs gauge +node_pcidevice_sriov_numvfs{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_numvfs{bus="01",device="00",function="0",segment="0000"} 4 +node_pcidevice_sriov_numvfs{bus="45",device="00",function="0",segment="0000"} 0 +# HELP node_pcidevice_sriov_totalvfs Total number of Virtual Functions (VFs) supported by the device. +# TYPE node_pcidevice_sriov_totalvfs gauge +node_pcidevice_sriov_totalvfs{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_totalvfs{bus="01",device="00",function="0",segment="0000"} 8 +node_pcidevice_sriov_totalvfs{bus="45",device="00",function="0",segment="0000"} 7 +# HELP node_pcidevice_sriov_vf_total_msix Total number of MSI-X vectors for Virtual Functions. +# TYPE node_pcidevice_sriov_vf_total_msix gauge +node_pcidevice_sriov_vf_total_msix{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_vf_total_msix{bus="01",device="00",function="0",segment="0000"} 16 +node_pcidevice_sriov_vf_total_msix{bus="45",device="00",function="0",segment="0000"} 0 # HELP node_power_supply_capacity capacity value of /sys/class/power_supply/. # TYPE node_power_supply_capacity gauge node_power_supply_capacity{power_supply="BAT0"} 81 @@ -2514,7 +2958,7 @@ node_power_supply_energy_watthour{power_supply="BAT0"} 36.58 # HELP node_power_supply_info info of /sys/class/power_supply/. # TYPE node_power_supply_info gauge node_power_supply_info{power_supply="AC",type="Mains"} 1 -node_power_supply_info{capacity_level="Normal",manufacturer="LGC",model_name="LNV-45N1",power_supply="BAT0",serial_number="38109",status="Discharging",technology="Li-ion",type="Battery"} 1 +node_power_supply_info{capacity_level="Normal",manufacturer="LGC",model_name="LNV-45N1��",power_supply="BAT0",serial_number="38109",status="Discharging",technology="Li-ion",type="Battery"} 1 # HELP node_power_supply_online online value of /sys/class/power_supply/. # TYPE node_power_supply_online gauge node_power_supply_online{power_supply="AC"} 0 @@ -2539,6 +2983,9 @@ node_pressure_io_stalled_seconds_total 159.229614 # HELP node_pressure_io_waiting_seconds_total Total time in seconds that processes have waited due to IO congestion # TYPE node_pressure_io_waiting_seconds_total counter node_pressure_io_waiting_seconds_total 159.886802 +# HELP node_pressure_irq_stalled_seconds_total Total time in seconds no process could make progress due to IRQ congestion +# TYPE node_pressure_irq_stalled_seconds_total counter +node_pressure_irq_stalled_seconds_total 0.008494 # HELP node_pressure_memory_stalled_seconds_total Total time in seconds no process could make progress due to memory congestion # TYPE node_pressure_memory_stalled_seconds_total counter node_pressure_memory_stalled_seconds_total 0 @@ -2553,23 +3000,32 @@ node_processes_max_processes 123 node_processes_max_threads 7801 # HELP node_processes_pids Number of PIDs # TYPE node_processes_pids gauge -node_processes_pids 1 +node_processes_pids 3 # HELP node_processes_state Number of processes in each state. # TYPE node_processes_state gauge -node_processes_state{state="S"} 1 +node_processes_state{state="I"} 1 +node_processes_state{state="S"} 2 # HELP node_processes_threads Allocated threads in system # TYPE node_processes_threads gauge -node_processes_threads 1 +node_processes_threads 3 # HELP node_procs_blocked Number of processes blocked waiting for I/O to complete. # TYPE node_procs_blocked gauge node_procs_blocked 0 # HELP node_procs_running Number of processes in runnable state. # TYPE node_procs_running gauge node_procs_running 2 +# HELP node_qdisc_backlog Number of bytes currently in queue to be sent. +# TYPE node_qdisc_backlog gauge +node_qdisc_backlog{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_backlog{device="wlan0",kind="fq"} 0 # HELP node_qdisc_bytes_total Number of bytes sent. # TYPE node_qdisc_bytes_total counter node_qdisc_bytes_total{device="eth0",kind="pfifo_fast"} 83 node_qdisc_bytes_total{device="wlan0",kind="fq"} 42 +# HELP node_qdisc_current_queue_length Number of packets currently in queue to be sent. +# TYPE node_qdisc_current_queue_length gauge +node_qdisc_current_queue_length{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_current_queue_length{device="wlan0",kind="fq"} 0 # HELP node_qdisc_drops_total Number of packets dropped. # TYPE node_qdisc_drops_total counter node_qdisc_drops_total{device="eth0",kind="pfifo_fast"} 0 @@ -2588,10 +3044,10 @@ node_qdisc_requeues_total{device="eth0",kind="pfifo_fast"} 2 node_qdisc_requeues_total{device="wlan0",kind="fq"} 1 # HELP node_rapl_core_joules_total Current RAPL core value in joules # TYPE node_rapl_core_joules_total counter -node_rapl_core_joules_total{index="0"} 118821.284256 +node_rapl_core_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0:0"} 118821.284256 # HELP node_rapl_package_joules_total Current RAPL package value in joules # TYPE node_rapl_package_joules_total counter -node_rapl_package_joules_total{index="0"} 240422.366267 +node_rapl_package_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0"} 240422.366267 # HELP node_schedstat_running_seconds_total Number of seconds CPU spent running a process. # TYPE node_schedstat_running_seconds_total counter node_schedstat_running_seconds_total{cpu="0"} 2.045936778163039e+06 @@ -2613,19 +3069,24 @@ node_scrape_collector_success{collector="bcache"} 1 node_scrape_collector_success{collector="bonding"} 1 node_scrape_collector_success{collector="btrfs"} 1 node_scrape_collector_success{collector="buddyinfo"} 1 +node_scrape_collector_success{collector="cgroups"} 1 node_scrape_collector_success{collector="conntrack"} 1 node_scrape_collector_success{collector="cpu"} 1 +node_scrape_collector_success{collector="cpu_vulnerabilities"} 1 node_scrape_collector_success{collector="cpufreq"} 1 node_scrape_collector_success{collector="diskstats"} 1 +node_scrape_collector_success{collector="dmi"} 1 node_scrape_collector_success{collector="drbd"} 1 node_scrape_collector_success{collector="edac"} 1 node_scrape_collector_success{collector="entropy"} 1 +node_scrape_collector_success{collector="fibrechannel"} 1 node_scrape_collector_success{collector="filefd"} 1 node_scrape_collector_success{collector="hwmon"} 1 node_scrape_collector_success{collector="infiniband"} 1 node_scrape_collector_success{collector="interrupts"} 1 node_scrape_collector_success{collector="ipvs"} 1 node_scrape_collector_success{collector="ksmd"} 1 +node_scrape_collector_success{collector="lnstat"} 1 node_scrape_collector_success{collector="loadavg"} 1 node_scrape_collector_success{collector="mdadm"} 1 node_scrape_collector_success{collector="meminfo"} 1 @@ -2636,22 +3097,63 @@ node_scrape_collector_success{collector="netdev"} 1 node_scrape_collector_success{collector="netstat"} 1 node_scrape_collector_success{collector="nfs"} 1 node_scrape_collector_success{collector="nfsd"} 1 +node_scrape_collector_success{collector="nvme"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="pcidevice"} 1 node_scrape_collector_success{collector="powersupplyclass"} 1 node_scrape_collector_success{collector="pressure"} 1 node_scrape_collector_success{collector="processes"} 1 node_scrape_collector_success{collector="qdisc"} 1 node_scrape_collector_success{collector="rapl"} 1 node_scrape_collector_success{collector="schedstat"} 1 +node_scrape_collector_success{collector="slabinfo"} 1 node_scrape_collector_success{collector="sockstat"} 1 +node_scrape_collector_success{collector="softirqs"} 1 node_scrape_collector_success{collector="softnet"} 1 node_scrape_collector_success{collector="stat"} 1 +node_scrape_collector_success{collector="sysctl"} 1 +node_scrape_collector_success{collector="tapestats"} 1 node_scrape_collector_success{collector="textfile"} 1 node_scrape_collector_success{collector="thermal_zone"} 1 +node_scrape_collector_success{collector="time"} 1 node_scrape_collector_success{collector="udp_queues"} 1 node_scrape_collector_success{collector="vmstat"} 1 +node_scrape_collector_success{collector="watchdog"} 1 node_scrape_collector_success{collector="wifi"} 1 +node_scrape_collector_success{collector="xfrm"} 1 node_scrape_collector_success{collector="xfs"} 1 node_scrape_collector_success{collector="zfs"} 1 +node_scrape_collector_success{collector="zoneinfo"} 1 +# HELP node_slabinfo_active_objects The number of objects that are currently active (i.e., in use). +# TYPE node_slabinfo_active_objects gauge +node_slabinfo_active_objects{slab="dmaengine-unmap-128"} 1206 +node_slabinfo_active_objects{slab="kmalloc-8192"} 132 +node_slabinfo_active_objects{slab="kmem_cache"} 320 +node_slabinfo_active_objects{slab="tw_sock_TCP"} 704 +# HELP node_slabinfo_object_size_bytes The size of objects in this slab, in bytes. +# TYPE node_slabinfo_object_size_bytes gauge +node_slabinfo_object_size_bytes{slab="dmaengine-unmap-128"} 1088 +node_slabinfo_object_size_bytes{slab="kmalloc-8192"} 8192 +node_slabinfo_object_size_bytes{slab="kmem_cache"} 256 +node_slabinfo_object_size_bytes{slab="tw_sock_TCP"} 256 +# HELP node_slabinfo_objects The total number of allocated objects (i.e., objects that are both in use and not in use). +# TYPE node_slabinfo_objects gauge +node_slabinfo_objects{slab="dmaengine-unmap-128"} 1320 +node_slabinfo_objects{slab="kmalloc-8192"} 148 +node_slabinfo_objects{slab="kmem_cache"} 320 +node_slabinfo_objects{slab="tw_sock_TCP"} 864 +# HELP node_slabinfo_objects_per_slab The number of objects stored in each slab. +# TYPE node_slabinfo_objects_per_slab gauge +node_slabinfo_objects_per_slab{slab="dmaengine-unmap-128"} 30 +node_slabinfo_objects_per_slab{slab="kmalloc-8192"} 4 +node_slabinfo_objects_per_slab{slab="kmem_cache"} 32 +node_slabinfo_objects_per_slab{slab="tw_sock_TCP"} 32 +# HELP node_slabinfo_pages_per_slab The number of pages allocated for each slab. +# TYPE node_slabinfo_pages_per_slab gauge +node_slabinfo_pages_per_slab{slab="dmaengine-unmap-128"} 8 +node_slabinfo_pages_per_slab{slab="kmalloc-8192"} 8 +node_slabinfo_pages_per_slab{slab="kmem_cache"} 2 +node_slabinfo_pages_per_slab{slab="tw_sock_TCP"} 2 # HELP node_sockstat_FRAG6_inuse Number of FRAG6 sockets in state inuse. # TYPE node_sockstat_FRAG6_inuse gauge node_sockstat_FRAG6_inuse 0 @@ -2712,24 +3214,139 @@ node_sockstat_UDP_mem_bytes 0 # HELP node_sockstat_sockets_used Number of IPv4 sockets in use. # TYPE node_sockstat_sockets_used gauge node_sockstat_sockets_used 229 +# HELP node_softirqs_functions_total Softirq counts per CPU. +# TYPE node_softirqs_functions_total counter +node_softirqs_functions_total{cpu="0",type="BLOCK"} 23776 +node_softirqs_functions_total{cpu="0",type="HI"} 7 +node_softirqs_functions_total{cpu="0",type="HRTIMER"} 40 +node_softirqs_functions_total{cpu="0",type="IRQ_POLL"} 0 +node_softirqs_functions_total{cpu="0",type="NET_RX"} 43066 +node_softirqs_functions_total{cpu="0",type="NET_TX"} 2301 +node_softirqs_functions_total{cpu="0",type="RCU"} 155929 +node_softirqs_functions_total{cpu="0",type="SCHED"} 378895 +node_softirqs_functions_total{cpu="0",type="TASKLET"} 372 +node_softirqs_functions_total{cpu="0",type="TIMER"} 424191 +node_softirqs_functions_total{cpu="1",type="BLOCK"} 24115 +node_softirqs_functions_total{cpu="1",type="HI"} 1 +node_softirqs_functions_total{cpu="1",type="HRTIMER"} 346 +node_softirqs_functions_total{cpu="1",type="IRQ_POLL"} 0 +node_softirqs_functions_total{cpu="1",type="NET_RX"} 104508 +node_softirqs_functions_total{cpu="1",type="NET_TX"} 2430 +node_softirqs_functions_total{cpu="1",type="RCU"} 146631 +node_softirqs_functions_total{cpu="1",type="SCHED"} 152852 +node_softirqs_functions_total{cpu="1",type="TASKLET"} 1899 +node_softirqs_functions_total{cpu="1",type="TIMER"} 108342 +# HELP node_softirqs_total Number of softirq calls. +# TYPE node_softirqs_total counter +node_softirqs_total{vector="block"} 186066 +node_softirqs_total{vector="block_iopoll"} 0 +node_softirqs_total{vector="hi"} 250191 +node_softirqs_total{vector="hrtimer"} 12499 +node_softirqs_total{vector="net_rx"} 211099 +node_softirqs_total{vector="net_tx"} 1647 +node_softirqs_total{vector="rcu"} 508444 +node_softirqs_total{vector="sched"} 622196 +node_softirqs_total{vector="tasklet"} 1.783454e+06 +node_softirqs_total{vector="timer"} 1.481983e+06 +# HELP node_softnet_backlog_len Softnet backlog status +# TYPE node_softnet_backlog_len gauge +node_softnet_backlog_len{cpu="0"} 0 +node_softnet_backlog_len{cpu="1"} 0 +node_softnet_backlog_len{cpu="2"} 0 +node_softnet_backlog_len{cpu="3"} 0 +# HELP node_softnet_cpu_collision_total Number of collision occur while obtaining device lock while transmitting +# TYPE node_softnet_cpu_collision_total counter +node_softnet_cpu_collision_total{cpu="0"} 0 +node_softnet_cpu_collision_total{cpu="1"} 0 +node_softnet_cpu_collision_total{cpu="2"} 0 +node_softnet_cpu_collision_total{cpu="3"} 0 # HELP node_softnet_dropped_total Number of dropped packets # TYPE node_softnet_dropped_total counter node_softnet_dropped_total{cpu="0"} 0 node_softnet_dropped_total{cpu="1"} 41 node_softnet_dropped_total{cpu="2"} 0 node_softnet_dropped_total{cpu="3"} 0 +# HELP node_softnet_flow_limit_count_total Number of times flow limit has been reached +# TYPE node_softnet_flow_limit_count_total counter +node_softnet_flow_limit_count_total{cpu="0"} 0 +node_softnet_flow_limit_count_total{cpu="1"} 0 +node_softnet_flow_limit_count_total{cpu="2"} 0 +node_softnet_flow_limit_count_total{cpu="3"} 0 # HELP node_softnet_processed_total Number of processed packets # TYPE node_softnet_processed_total counter node_softnet_processed_total{cpu="0"} 299641 node_softnet_processed_total{cpu="1"} 916354 node_softnet_processed_total{cpu="2"} 5.577791e+06 node_softnet_processed_total{cpu="3"} 3.113785e+06 +# HELP node_softnet_received_rps_total Number of times cpu woken up received_rps +# TYPE node_softnet_received_rps_total counter +node_softnet_received_rps_total{cpu="0"} 0 +node_softnet_received_rps_total{cpu="1"} 0 +node_softnet_received_rps_total{cpu="2"} 0 +node_softnet_received_rps_total{cpu="3"} 0 # HELP node_softnet_times_squeezed_total Number of times processing packets ran out of quota # TYPE node_softnet_times_squeezed_total counter node_softnet_times_squeezed_total{cpu="0"} 1 node_softnet_times_squeezed_total{cpu="1"} 10 node_softnet_times_squeezed_total{cpu="2"} 85 node_softnet_times_squeezed_total{cpu="3"} 50 +# HELP node_sysctl_fs_file_nr sysctl fs.file-nr +# TYPE node_sysctl_fs_file_nr untyped +node_sysctl_fs_file_nr{index="0"} 1024 +node_sysctl_fs_file_nr{index="1"} 0 +node_sysctl_fs_file_nr{index="2"} 1.631329e+06 +# HELP node_sysctl_fs_file_nr_current sysctl fs.file-nr, field 1 +# TYPE node_sysctl_fs_file_nr_current untyped +node_sysctl_fs_file_nr_current 0 +# HELP node_sysctl_fs_file_nr_max sysctl fs.file-nr, field 2 +# TYPE node_sysctl_fs_file_nr_max untyped +node_sysctl_fs_file_nr_max 1.631329e+06 +# HELP node_sysctl_fs_file_nr_total sysctl fs.file-nr, field 0 +# TYPE node_sysctl_fs_file_nr_total untyped +node_sysctl_fs_file_nr_total 1024 +# HELP node_sysctl_info sysctl info +# TYPE node_sysctl_info gauge +node_sysctl_info{index="0",name="kernel.seccomp.actions_avail",value="kill_process"} 1 +node_sysctl_info{index="1",name="kernel.seccomp.actions_avail",value="kill_thread"} 1 +node_sysctl_info{index="2",name="kernel.seccomp.actions_avail",value="trap"} 1 +node_sysctl_info{index="3",name="kernel.seccomp.actions_avail",value="errno"} 1 +node_sysctl_info{index="4",name="kernel.seccomp.actions_avail",value="user_notif"} 1 +node_sysctl_info{index="5",name="kernel.seccomp.actions_avail",value="trace"} 1 +node_sysctl_info{index="6",name="kernel.seccomp.actions_avail",value="log"} 1 +node_sysctl_info{index="7",name="kernel.seccomp.actions_avail",value="allow"} 1 +# HELP node_sysctl_kernel_threads_max sysctl kernel.threads-max +# TYPE node_sysctl_kernel_threads_max untyped +node_sysctl_kernel_threads_max 7801 +# HELP node_tape_io_now The number of I/Os currently outstanding to this device. +# TYPE node_tape_io_now gauge +node_tape_io_now{device="st0"} 1 +# HELP node_tape_io_others_total The number of I/Os issued to the tape drive other than read or write commands. The time taken to complete these commands uses the following calculation io_time_seconds_total-read_time_seconds_total-write_time_seconds_total +# TYPE node_tape_io_others_total counter +node_tape_io_others_total{device="st0"} 1409 +# HELP node_tape_io_time_seconds_total The amount of time spent waiting for all I/O to complete (including read and write). This includes tape movement commands such as seeking between file or set marks and implicit tape movement such as when rewind on close tape devices are used. +# TYPE node_tape_io_time_seconds_total counter +node_tape_io_time_seconds_total{device="st0"} 9247.01108772 +# HELP node_tape_read_bytes_total The number of bytes read from the tape drive. +# TYPE node_tape_read_bytes_total counter +node_tape_read_bytes_total{device="st0"} 9.79383912e+08 +# HELP node_tape_read_time_seconds_total The amount of time spent waiting for read requests to complete. +# TYPE node_tape_read_time_seconds_total counter +node_tape_read_time_seconds_total{device="st0"} 33.788355744 +# HELP node_tape_reads_completed_total The number of read requests issued to the tape drive. +# TYPE node_tape_reads_completed_total counter +node_tape_reads_completed_total{device="st0"} 3741 +# HELP node_tape_residual_total The number of times during a read or write we found the residual amount to be non-zero. This should mean that a program is issuing a read larger thean the block size on tape. For write not all data made it to tape. +# TYPE node_tape_residual_total counter +node_tape_residual_total{device="st0"} 19 +# HELP node_tape_write_time_seconds_total The amount of time spent waiting for write requests to complete. +# TYPE node_tape_write_time_seconds_total counter +node_tape_write_time_seconds_total{device="st0"} 5233.597394395 +# HELP node_tape_writes_completed_total The number of write requests issued to the tape drive. +# TYPE node_tape_writes_completed_total counter +node_tape_writes_completed_total{device="st0"} 5.3772916e+07 +# HELP node_tape_written_bytes_total The number of bytes written to the tape drive. +# TYPE node_tape_written_bytes_total counter +node_tape_written_bytes_total{device="st0"} 1.496246784e+12 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise @@ -2738,6 +3355,18 @@ node_textfile_scrape_error 0 # HELP node_thermal_zone_temp Zone temperature in Celsius # TYPE node_thermal_zone_temp gauge node_thermal_zone_temp{type="cpu-thermal",zone="0"} 12.376 +# HELP node_time_clocksource_available_info Available clocksources read from '/sys/devices/system/clocksource'. +# TYPE node_time_clocksource_available_info gauge +node_time_clocksource_available_info{clocksource="acpi_pm",device="0"} 1 +node_time_clocksource_available_info{clocksource="hpet",device="0"} 1 +node_time_clocksource_available_info{clocksource="tsc",device="0"} 1 +# HELP node_time_clocksource_current_info Current clocksource read from '/sys/devices/system/clocksource'. +# TYPE node_time_clocksource_current_info gauge +node_time_clocksource_current_info{clocksource="tsc",device="0"} 1 +# HELP node_time_seconds System time in seconds since epoch (1970). +# TYPE node_time_seconds gauge +# HELP node_time_zone_offset_seconds System time zone offset in seconds. +# TYPE node_time_zone_offset_seconds gauge # HELP node_udp_queues Number of allocated memory in the kernel for UDP datagrams in bytes. # TYPE node_udp_queues gauge node_udp_queues{ip="v4",queue="rx"} 0 @@ -2763,6 +3392,31 @@ node_vmstat_pswpin 1476 # HELP node_vmstat_pswpout /proc/vmstat information field pswpout. # TYPE node_vmstat_pswpout untyped node_vmstat_pswpout 35045 +# HELP node_watchdog_access_cs0 Value of /sys/class/watchdog//access_cs0 +# TYPE node_watchdog_access_cs0 gauge +node_watchdog_access_cs0{name="watchdog0"} 0 +# HELP node_watchdog_bootstatus Value of /sys/class/watchdog//bootstatus +# TYPE node_watchdog_bootstatus gauge +node_watchdog_bootstatus{name="watchdog0"} 1 +# HELP node_watchdog_fw_version Value of /sys/class/watchdog//fw_version +# TYPE node_watchdog_fw_version gauge +node_watchdog_fw_version{name="watchdog0"} 2 +# HELP node_watchdog_info Info of /sys/class/watchdog/ +# TYPE node_watchdog_info gauge +node_watchdog_info{identity="",name="watchdog1",options="",pretimeout_governor="",state="",status=""} 1 +node_watchdog_info{identity="Software Watchdog",name="watchdog0",options="0x8380",pretimeout_governor="noop",state="active",status="0x8000"} 1 +# HELP node_watchdog_nowayout Value of /sys/class/watchdog//nowayout +# TYPE node_watchdog_nowayout gauge +node_watchdog_nowayout{name="watchdog0"} 0 +# HELP node_watchdog_pretimeout_seconds Value of /sys/class/watchdog//pretimeout +# TYPE node_watchdog_pretimeout_seconds gauge +node_watchdog_pretimeout_seconds{name="watchdog0"} 120 +# HELP node_watchdog_timeleft_seconds Value of /sys/class/watchdog//timeleft +# TYPE node_watchdog_timeleft_seconds gauge +node_watchdog_timeleft_seconds{name="watchdog0"} 300 +# HELP node_watchdog_timeout_seconds Value of /sys/class/watchdog//timeout +# TYPE node_watchdog_timeout_seconds gauge +node_watchdog_timeout_seconds{name="watchdog0"} 60 # HELP node_wifi_interface_frequency_hertz The current frequency a WiFi interface is operating at, in hertz. # TYPE node_wifi_interface_frequency_hertz gauge node_wifi_interface_frequency_hertz{device="wlan0"} 2.412e+09 @@ -2790,6 +3444,10 @@ node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="aa:bb:cc:d # TYPE node_wifi_station_receive_bytes_total counter node_wifi_station_receive_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 node_wifi_station_receive_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 +# HELP node_wifi_station_received_packets_total The total number of packets received by a station. +# TYPE node_wifi_station_received_packets_total counter +node_wifi_station_received_packets_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 +node_wifi_station_received_packets_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 # HELP node_wifi_station_signal_dbm The current WiFi signal strength, in decibel-milliwatts (dBm). # TYPE node_wifi_station_signal_dbm gauge node_wifi_station_signal_dbm{device="wlan0",mac_address="01:02:03:04:05:06"} -26 @@ -2810,6 +3468,94 @@ node_wifi_station_transmit_failed_total{device="wlan0",mac_address="aa:bb:cc:dd: # TYPE node_wifi_station_transmit_retries_total counter node_wifi_station_transmit_retries_total{device="wlan0",mac_address="01:02:03:04:05:06"} 20 node_wifi_station_transmit_retries_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 10 +# HELP node_wifi_station_transmitted_packets_total The total number of packets transmitted by a station. +# TYPE node_wifi_station_transmitted_packets_total counter +node_wifi_station_transmitted_packets_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 +node_wifi_station_transmitted_packets_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 +# HELP node_xfrm_acquire_error_packets_total State hasn’t been fully acquired before use +# TYPE node_xfrm_acquire_error_packets_total counter +node_xfrm_acquire_error_packets_total 24532 +# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed +# TYPE node_xfrm_fwd_hdr_error_packets_total counter +node_xfrm_fwd_hdr_error_packets_total 6654 +# HELP node_xfrm_in_buffer_error_packets_total No buffer is left +# TYPE node_xfrm_in_buffer_error_packets_total counter +node_xfrm_in_buffer_error_packets_total 2 +# HELP node_xfrm_in_error_packets_total All errors not matched by other +# TYPE node_xfrm_in_error_packets_total counter +node_xfrm_in_error_packets_total 1 +# HELP node_xfrm_in_hdr_error_packets_total Header error +# TYPE node_xfrm_in_hdr_error_packets_total counter +node_xfrm_in_hdr_error_packets_total 4 +# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found +# TYPE node_xfrm_in_no_pols_packets_total counter +node_xfrm_in_no_pols_packets_total 65432 +# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong +# TYPE node_xfrm_in_no_states_packets_total counter +node_xfrm_in_no_states_packets_total 3 +# HELP node_xfrm_in_pol_block_packets_total Policy discards +# TYPE node_xfrm_in_pol_block_packets_total counter +node_xfrm_in_pol_block_packets_total 100 +# HELP node_xfrm_in_pol_error_packets_total Policy error +# TYPE node_xfrm_in_pol_error_packets_total counter +node_xfrm_in_pol_error_packets_total 10000 +# HELP node_xfrm_in_state_expired_packets_total State is expired +# TYPE node_xfrm_in_state_expired_packets_total counter +node_xfrm_in_state_expired_packets_total 7 +# HELP node_xfrm_in_state_invalid_packets_total State is invalid +# TYPE node_xfrm_in_state_invalid_packets_total counter +node_xfrm_in_state_invalid_packets_total 55555 +# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch +# TYPE node_xfrm_in_state_mismatch_packets_total counter +node_xfrm_in_state_mismatch_packets_total 23451 +# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_in_state_mode_error_packets_total counter +node_xfrm_in_state_mode_error_packets_total 100 +# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong +# TYPE node_xfrm_in_state_proto_error_packets_total counter +node_xfrm_in_state_proto_error_packets_total 40 +# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window +# TYPE node_xfrm_in_state_seq_error_packets_total counter +node_xfrm_in_state_seq_error_packets_total 6000 +# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong +# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter +node_xfrm_in_tmpl_mismatch_packets_total 51 +# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error +# TYPE node_xfrm_out_bundle_check_error_packets_total counter +node_xfrm_out_bundle_check_error_packets_total 555 +# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error +# TYPE node_xfrm_out_bundle_gen_error_packets_total counter +node_xfrm_out_bundle_gen_error_packets_total 43321 +# HELP node_xfrm_out_error_packets_total All errors which is not matched others +# TYPE node_xfrm_out_error_packets_total counter +node_xfrm_out_error_packets_total 1e+06 +# HELP node_xfrm_out_no_states_packets_total No state is found +# TYPE node_xfrm_out_no_states_packets_total counter +node_xfrm_out_no_states_packets_total 869 +# HELP node_xfrm_out_pol_block_packets_total Policy discards +# TYPE node_xfrm_out_pol_block_packets_total counter +node_xfrm_out_pol_block_packets_total 43456 +# HELP node_xfrm_out_pol_dead_packets_total Policy is dead +# TYPE node_xfrm_out_pol_dead_packets_total counter +node_xfrm_out_pol_dead_packets_total 7656 +# HELP node_xfrm_out_pol_error_packets_total Policy error +# TYPE node_xfrm_out_pol_error_packets_total counter +node_xfrm_out_pol_error_packets_total 1454 +# HELP node_xfrm_out_state_expired_packets_total State is expired +# TYPE node_xfrm_out_state_expired_packets_total counter +node_xfrm_out_state_expired_packets_total 565 +# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired +# TYPE node_xfrm_out_state_invalid_packets_total counter +node_xfrm_out_state_invalid_packets_total 28765 +# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error +# TYPE node_xfrm_out_state_mode_error_packets_total counter +node_xfrm_out_state_mode_error_packets_total 8 +# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error +# TYPE node_xfrm_out_state_proto_error_packets_total counter +node_xfrm_out_state_proto_error_packets_total 4542 +# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow +# TYPE node_xfrm_out_state_seq_error_packets_total counter +node_xfrm_out_state_seq_error_packets_total 543 # HELP node_xfs_allocation_btree_compares_total Number of allocation B-tree compares for a filesystem. # TYPE node_xfs_allocation_btree_compares_total counter node_xfs_allocation_btree_compares_total{device="sda1"} 0 @@ -2879,9 +3625,30 @@ node_xfs_extent_allocation_extents_allocated_total{device="sda1"} 1 # HELP node_xfs_extent_allocation_extents_freed_total Number of extents freed for a filesystem. # TYPE node_xfs_extent_allocation_extents_freed_total counter node_xfs_extent_allocation_extents_freed_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_attempts_total Number of times the OS looked for an XFS inode in the inode cache. +# TYPE node_xfs_inode_operation_attempts_total counter +node_xfs_inode_operation_attempts_total{device="sda1"} 5 +# HELP node_xfs_inode_operation_attribute_changes_total Number of times the OS explicitly changed the attributes of an XFS inode. +# TYPE node_xfs_inode_operation_attribute_changes_total counter +node_xfs_inode_operation_attribute_changes_total{device="sda1"} 1 +# HELP node_xfs_inode_operation_duplicates_total Number of times the OS tried to add a missing XFS inode to the inode cache, but found it had already been added by another process. +# TYPE node_xfs_inode_operation_duplicates_total counter +node_xfs_inode_operation_duplicates_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_found_total Number of times the OS looked for and found an XFS inode in the inode cache. +# TYPE node_xfs_inode_operation_found_total counter +node_xfs_inode_operation_found_total{device="sda1"} 1 +# HELP node_xfs_inode_operation_missed_total Number of times the OS looked for an XFS inode in the cache, but did not find it. +# TYPE node_xfs_inode_operation_missed_total counter +node_xfs_inode_operation_missed_total{device="sda1"} 4 +# HELP node_xfs_inode_operation_reclaims_total Number of times the OS reclaimed an XFS inode from the inode cache to free memory for another purpose. +# TYPE node_xfs_inode_operation_reclaims_total counter +node_xfs_inode_operation_reclaims_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_recycled_total Number of times the OS found an XFS inode in the cache, but could not use it as it was being recycled. +# TYPE node_xfs_inode_operation_recycled_total counter +node_xfs_inode_operation_recycled_total{device="sda1"} 0 # HELP node_xfs_read_calls_total Number of read(2) system calls made to files in a filesystem. # TYPE node_xfs_read_calls_total counter -node_xfs_read_calls_total{device="sda1"} 28 +node_xfs_read_calls_total{device="sda1"} 0 # HELP node_xfs_vnode_active_total Number of vnodes not on free lists for a filesystem. # TYPE node_xfs_vnode_active_total counter node_xfs_vnode_active_total{device="sda1"} 4 @@ -2905,7 +3672,7 @@ node_xfs_vnode_release_total{device="sda1"} 1 node_xfs_vnode_remove_total{device="sda1"} 1 # HELP node_xfs_write_calls_total Number of write(2) system calls made to files in a filesystem. # TYPE node_xfs_write_calls_total counter -node_xfs_write_calls_total{device="sda1"} 0 +node_xfs_write_calls_total{device="sda1"} 28 # HELP node_zfs_abd_linear_cnt kstat.zfs.misc.abdstats.linear_cnt # TYPE node_zfs_abd_linear_cnt untyped node_zfs_abd_linear_cnt 62 @@ -3155,6 +3922,9 @@ node_zfs_arc_l2_writes_lock_retry 0 # HELP node_zfs_arc_l2_writes_sent kstat.zfs.misc.arcstats.l2_writes_sent # TYPE node_zfs_arc_l2_writes_sent untyped node_zfs_arc_l2_writes_sent 0 +# HELP node_zfs_arc_memory_available_bytes kstat.zfs.misc.arcstats.memory_available_bytes +# TYPE node_zfs_arc_memory_available_bytes untyped +node_zfs_arc_memory_available_bytes -9.223372036854776e+17 # HELP node_zfs_arc_memory_direct_count kstat.zfs.misc.arcstats.memory_direct_count # TYPE node_zfs_arc_memory_direct_count untyped node_zfs_arc_memory_direct_count 542 @@ -3242,193 +4012,193 @@ node_zfs_arc_prefetch_metadata_misses 16071 # HELP node_zfs_arc_size kstat.zfs.misc.arcstats.size # TYPE node_zfs_arc_size untyped node_zfs_arc_size 1.603939792e+09 -# HELP node_zfs_dbuf_dbuf_cache_count kstat.zfs.misc.dbuf_stats.dbuf_cache_count +# HELP node_zfs_dbuf_dbuf_cache_count kstat.zfs.misc.dbufstats.dbuf_cache_count # TYPE node_zfs_dbuf_dbuf_cache_count untyped node_zfs_dbuf_dbuf_cache_count 27 -# HELP node_zfs_dbuf_dbuf_cache_hiwater_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_hiwater_bytes +# HELP node_zfs_dbuf_dbuf_cache_hiwater_bytes kstat.zfs.misc.dbufstats.dbuf_cache_hiwater_bytes # TYPE node_zfs_dbuf_dbuf_cache_hiwater_bytes untyped node_zfs_dbuf_dbuf_cache_hiwater_bytes 6.9117804e+07 -# HELP node_zfs_dbuf_dbuf_cache_level_0 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_0 +# HELP node_zfs_dbuf_dbuf_cache_level_0 kstat.zfs.misc.dbufstats.dbuf_cache_level_0 # TYPE node_zfs_dbuf_dbuf_cache_level_0 untyped node_zfs_dbuf_dbuf_cache_level_0 27 -# HELP node_zfs_dbuf_dbuf_cache_level_0_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_0_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_0_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_0_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_0_bytes untyped node_zfs_dbuf_dbuf_cache_level_0_bytes 302080 -# HELP node_zfs_dbuf_dbuf_cache_level_1 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_1 +# HELP node_zfs_dbuf_dbuf_cache_level_1 kstat.zfs.misc.dbufstats.dbuf_cache_level_1 # TYPE node_zfs_dbuf_dbuf_cache_level_1 untyped node_zfs_dbuf_dbuf_cache_level_1 0 -# HELP node_zfs_dbuf_dbuf_cache_level_10 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_10 +# HELP node_zfs_dbuf_dbuf_cache_level_10 kstat.zfs.misc.dbufstats.dbuf_cache_level_10 # TYPE node_zfs_dbuf_dbuf_cache_level_10 untyped node_zfs_dbuf_dbuf_cache_level_10 0 -# HELP node_zfs_dbuf_dbuf_cache_level_10_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_10_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_10_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_10_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_10_bytes untyped node_zfs_dbuf_dbuf_cache_level_10_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_11 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_11 +# HELP node_zfs_dbuf_dbuf_cache_level_11 kstat.zfs.misc.dbufstats.dbuf_cache_level_11 # TYPE node_zfs_dbuf_dbuf_cache_level_11 untyped node_zfs_dbuf_dbuf_cache_level_11 0 -# HELP node_zfs_dbuf_dbuf_cache_level_11_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_11_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_11_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_11_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_11_bytes untyped node_zfs_dbuf_dbuf_cache_level_11_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_1_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_1_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_1_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_1_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_1_bytes untyped node_zfs_dbuf_dbuf_cache_level_1_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_2 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_2 +# HELP node_zfs_dbuf_dbuf_cache_level_2 kstat.zfs.misc.dbufstats.dbuf_cache_level_2 # TYPE node_zfs_dbuf_dbuf_cache_level_2 untyped node_zfs_dbuf_dbuf_cache_level_2 0 -# HELP node_zfs_dbuf_dbuf_cache_level_2_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_2_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_2_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_2_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_2_bytes untyped node_zfs_dbuf_dbuf_cache_level_2_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_3 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_3 +# HELP node_zfs_dbuf_dbuf_cache_level_3 kstat.zfs.misc.dbufstats.dbuf_cache_level_3 # TYPE node_zfs_dbuf_dbuf_cache_level_3 untyped node_zfs_dbuf_dbuf_cache_level_3 0 -# HELP node_zfs_dbuf_dbuf_cache_level_3_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_3_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_3_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_3_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_3_bytes untyped node_zfs_dbuf_dbuf_cache_level_3_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_4 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_4 +# HELP node_zfs_dbuf_dbuf_cache_level_4 kstat.zfs.misc.dbufstats.dbuf_cache_level_4 # TYPE node_zfs_dbuf_dbuf_cache_level_4 untyped node_zfs_dbuf_dbuf_cache_level_4 0 -# HELP node_zfs_dbuf_dbuf_cache_level_4_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_4_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_4_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_4_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_4_bytes untyped node_zfs_dbuf_dbuf_cache_level_4_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_5 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_5 +# HELP node_zfs_dbuf_dbuf_cache_level_5 kstat.zfs.misc.dbufstats.dbuf_cache_level_5 # TYPE node_zfs_dbuf_dbuf_cache_level_5 untyped node_zfs_dbuf_dbuf_cache_level_5 0 -# HELP node_zfs_dbuf_dbuf_cache_level_5_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_5_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_5_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_5_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_5_bytes untyped node_zfs_dbuf_dbuf_cache_level_5_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_6 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_6 +# HELP node_zfs_dbuf_dbuf_cache_level_6 kstat.zfs.misc.dbufstats.dbuf_cache_level_6 # TYPE node_zfs_dbuf_dbuf_cache_level_6 untyped node_zfs_dbuf_dbuf_cache_level_6 0 -# HELP node_zfs_dbuf_dbuf_cache_level_6_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_6_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_6_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_6_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_6_bytes untyped node_zfs_dbuf_dbuf_cache_level_6_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_7 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_7 +# HELP node_zfs_dbuf_dbuf_cache_level_7 kstat.zfs.misc.dbufstats.dbuf_cache_level_7 # TYPE node_zfs_dbuf_dbuf_cache_level_7 untyped node_zfs_dbuf_dbuf_cache_level_7 0 -# HELP node_zfs_dbuf_dbuf_cache_level_7_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_7_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_7_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_7_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_7_bytes untyped node_zfs_dbuf_dbuf_cache_level_7_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_8 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_8 +# HELP node_zfs_dbuf_dbuf_cache_level_8 kstat.zfs.misc.dbufstats.dbuf_cache_level_8 # TYPE node_zfs_dbuf_dbuf_cache_level_8 untyped node_zfs_dbuf_dbuf_cache_level_8 0 -# HELP node_zfs_dbuf_dbuf_cache_level_8_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_8_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_8_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_8_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_8_bytes untyped node_zfs_dbuf_dbuf_cache_level_8_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_level_9 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_9 +# HELP node_zfs_dbuf_dbuf_cache_level_9 kstat.zfs.misc.dbufstats.dbuf_cache_level_9 # TYPE node_zfs_dbuf_dbuf_cache_level_9 untyped node_zfs_dbuf_dbuf_cache_level_9 0 -# HELP node_zfs_dbuf_dbuf_cache_level_9_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_9_bytes +# HELP node_zfs_dbuf_dbuf_cache_level_9_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_9_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_9_bytes untyped node_zfs_dbuf_dbuf_cache_level_9_bytes 0 -# HELP node_zfs_dbuf_dbuf_cache_lowater_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_lowater_bytes +# HELP node_zfs_dbuf_dbuf_cache_lowater_bytes kstat.zfs.misc.dbufstats.dbuf_cache_lowater_bytes # TYPE node_zfs_dbuf_dbuf_cache_lowater_bytes untyped node_zfs_dbuf_dbuf_cache_lowater_bytes 5.6550932e+07 -# HELP node_zfs_dbuf_dbuf_cache_max_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_max_bytes +# HELP node_zfs_dbuf_dbuf_cache_max_bytes kstat.zfs.misc.dbufstats.dbuf_cache_max_bytes # TYPE node_zfs_dbuf_dbuf_cache_max_bytes untyped node_zfs_dbuf_dbuf_cache_max_bytes 6.2834368e+07 -# HELP node_zfs_dbuf_dbuf_cache_size kstat.zfs.misc.dbuf_stats.dbuf_cache_size +# HELP node_zfs_dbuf_dbuf_cache_size kstat.zfs.misc.dbufstats.dbuf_cache_size # TYPE node_zfs_dbuf_dbuf_cache_size untyped node_zfs_dbuf_dbuf_cache_size 302080 -# HELP node_zfs_dbuf_dbuf_cache_size_max kstat.zfs.misc.dbuf_stats.dbuf_cache_size_max +# HELP node_zfs_dbuf_dbuf_cache_size_max kstat.zfs.misc.dbufstats.dbuf_cache_size_max # TYPE node_zfs_dbuf_dbuf_cache_size_max untyped node_zfs_dbuf_dbuf_cache_size_max 394240 -# HELP node_zfs_dbuf_dbuf_cache_total_evicts kstat.zfs.misc.dbuf_stats.dbuf_cache_total_evicts +# HELP node_zfs_dbuf_dbuf_cache_total_evicts kstat.zfs.misc.dbufstats.dbuf_cache_total_evicts # TYPE node_zfs_dbuf_dbuf_cache_total_evicts untyped node_zfs_dbuf_dbuf_cache_total_evicts 0 -# HELP node_zfs_dbuf_hash_chain_max kstat.zfs.misc.dbuf_stats.hash_chain_max +# HELP node_zfs_dbuf_hash_chain_max kstat.zfs.misc.dbufstats.hash_chain_max # TYPE node_zfs_dbuf_hash_chain_max untyped node_zfs_dbuf_hash_chain_max 0 -# HELP node_zfs_dbuf_hash_chains kstat.zfs.misc.dbuf_stats.hash_chains +# HELP node_zfs_dbuf_hash_chains kstat.zfs.misc.dbufstats.hash_chains # TYPE node_zfs_dbuf_hash_chains untyped node_zfs_dbuf_hash_chains 0 -# HELP node_zfs_dbuf_hash_collisions kstat.zfs.misc.dbuf_stats.hash_collisions +# HELP node_zfs_dbuf_hash_collisions kstat.zfs.misc.dbufstats.hash_collisions # TYPE node_zfs_dbuf_hash_collisions untyped node_zfs_dbuf_hash_collisions 0 -# HELP node_zfs_dbuf_hash_dbuf_level_0 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_0 +# HELP node_zfs_dbuf_hash_dbuf_level_0 kstat.zfs.misc.dbufstats.hash_dbuf_level_0 # TYPE node_zfs_dbuf_hash_dbuf_level_0 untyped node_zfs_dbuf_hash_dbuf_level_0 37 -# HELP node_zfs_dbuf_hash_dbuf_level_0_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_0_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_0_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_0_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_0_bytes untyped node_zfs_dbuf_hash_dbuf_level_0_bytes 465920 -# HELP node_zfs_dbuf_hash_dbuf_level_1 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_1 +# HELP node_zfs_dbuf_hash_dbuf_level_1 kstat.zfs.misc.dbufstats.hash_dbuf_level_1 # TYPE node_zfs_dbuf_hash_dbuf_level_1 untyped node_zfs_dbuf_hash_dbuf_level_1 10 -# HELP node_zfs_dbuf_hash_dbuf_level_10 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_10 +# HELP node_zfs_dbuf_hash_dbuf_level_10 kstat.zfs.misc.dbufstats.hash_dbuf_level_10 # TYPE node_zfs_dbuf_hash_dbuf_level_10 untyped node_zfs_dbuf_hash_dbuf_level_10 0 -# HELP node_zfs_dbuf_hash_dbuf_level_10_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_10_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_10_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_10_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_10_bytes untyped node_zfs_dbuf_hash_dbuf_level_10_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_11 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_11 +# HELP node_zfs_dbuf_hash_dbuf_level_11 kstat.zfs.misc.dbufstats.hash_dbuf_level_11 # TYPE node_zfs_dbuf_hash_dbuf_level_11 untyped node_zfs_dbuf_hash_dbuf_level_11 0 -# HELP node_zfs_dbuf_hash_dbuf_level_11_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_11_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_11_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_11_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_11_bytes untyped node_zfs_dbuf_hash_dbuf_level_11_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_1_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_1_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_1_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_1_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_1_bytes untyped node_zfs_dbuf_hash_dbuf_level_1_bytes 1.31072e+06 -# HELP node_zfs_dbuf_hash_dbuf_level_2 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_2 +# HELP node_zfs_dbuf_hash_dbuf_level_2 kstat.zfs.misc.dbufstats.hash_dbuf_level_2 # TYPE node_zfs_dbuf_hash_dbuf_level_2 untyped node_zfs_dbuf_hash_dbuf_level_2 2 -# HELP node_zfs_dbuf_hash_dbuf_level_2_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_2_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_2_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_2_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_2_bytes untyped node_zfs_dbuf_hash_dbuf_level_2_bytes 262144 -# HELP node_zfs_dbuf_hash_dbuf_level_3 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_3 +# HELP node_zfs_dbuf_hash_dbuf_level_3 kstat.zfs.misc.dbufstats.hash_dbuf_level_3 # TYPE node_zfs_dbuf_hash_dbuf_level_3 untyped node_zfs_dbuf_hash_dbuf_level_3 2 -# HELP node_zfs_dbuf_hash_dbuf_level_3_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_3_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_3_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_3_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_3_bytes untyped node_zfs_dbuf_hash_dbuf_level_3_bytes 262144 -# HELP node_zfs_dbuf_hash_dbuf_level_4 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_4 +# HELP node_zfs_dbuf_hash_dbuf_level_4 kstat.zfs.misc.dbufstats.hash_dbuf_level_4 # TYPE node_zfs_dbuf_hash_dbuf_level_4 untyped node_zfs_dbuf_hash_dbuf_level_4 2 -# HELP node_zfs_dbuf_hash_dbuf_level_4_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_4_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_4_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_4_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_4_bytes untyped node_zfs_dbuf_hash_dbuf_level_4_bytes 262144 -# HELP node_zfs_dbuf_hash_dbuf_level_5 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_5 +# HELP node_zfs_dbuf_hash_dbuf_level_5 kstat.zfs.misc.dbufstats.hash_dbuf_level_5 # TYPE node_zfs_dbuf_hash_dbuf_level_5 untyped node_zfs_dbuf_hash_dbuf_level_5 2 -# HELP node_zfs_dbuf_hash_dbuf_level_5_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_5_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_5_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_5_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_5_bytes untyped node_zfs_dbuf_hash_dbuf_level_5_bytes 262144 -# HELP node_zfs_dbuf_hash_dbuf_level_6 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_6 +# HELP node_zfs_dbuf_hash_dbuf_level_6 kstat.zfs.misc.dbufstats.hash_dbuf_level_6 # TYPE node_zfs_dbuf_hash_dbuf_level_6 untyped node_zfs_dbuf_hash_dbuf_level_6 0 -# HELP node_zfs_dbuf_hash_dbuf_level_6_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_6_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_6_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_6_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_6_bytes untyped node_zfs_dbuf_hash_dbuf_level_6_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_7 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_7 +# HELP node_zfs_dbuf_hash_dbuf_level_7 kstat.zfs.misc.dbufstats.hash_dbuf_level_7 # TYPE node_zfs_dbuf_hash_dbuf_level_7 untyped node_zfs_dbuf_hash_dbuf_level_7 0 -# HELP node_zfs_dbuf_hash_dbuf_level_7_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_7_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_7_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_7_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_7_bytes untyped node_zfs_dbuf_hash_dbuf_level_7_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_8 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_8 +# HELP node_zfs_dbuf_hash_dbuf_level_8 kstat.zfs.misc.dbufstats.hash_dbuf_level_8 # TYPE node_zfs_dbuf_hash_dbuf_level_8 untyped node_zfs_dbuf_hash_dbuf_level_8 0 -# HELP node_zfs_dbuf_hash_dbuf_level_8_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_8_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_8_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_8_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_8_bytes untyped node_zfs_dbuf_hash_dbuf_level_8_bytes 0 -# HELP node_zfs_dbuf_hash_dbuf_level_9 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_9 +# HELP node_zfs_dbuf_hash_dbuf_level_9 kstat.zfs.misc.dbufstats.hash_dbuf_level_9 # TYPE node_zfs_dbuf_hash_dbuf_level_9 untyped node_zfs_dbuf_hash_dbuf_level_9 0 -# HELP node_zfs_dbuf_hash_dbuf_level_9_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_9_bytes +# HELP node_zfs_dbuf_hash_dbuf_level_9_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_9_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_9_bytes untyped node_zfs_dbuf_hash_dbuf_level_9_bytes 0 -# HELP node_zfs_dbuf_hash_elements kstat.zfs.misc.dbuf_stats.hash_elements +# HELP node_zfs_dbuf_hash_elements kstat.zfs.misc.dbufstats.hash_elements # TYPE node_zfs_dbuf_hash_elements untyped node_zfs_dbuf_hash_elements 55 -# HELP node_zfs_dbuf_hash_elements_max kstat.zfs.misc.dbuf_stats.hash_elements_max +# HELP node_zfs_dbuf_hash_elements_max kstat.zfs.misc.dbufstats.hash_elements_max # TYPE node_zfs_dbuf_hash_elements_max untyped node_zfs_dbuf_hash_elements_max 55 -# HELP node_zfs_dbuf_hash_hits kstat.zfs.misc.dbuf_stats.hash_hits +# HELP node_zfs_dbuf_hash_hits kstat.zfs.misc.dbufstats.hash_hits # TYPE node_zfs_dbuf_hash_hits untyped node_zfs_dbuf_hash_hits 108807 -# HELP node_zfs_dbuf_hash_insert_race kstat.zfs.misc.dbuf_stats.hash_insert_race +# HELP node_zfs_dbuf_hash_insert_race kstat.zfs.misc.dbufstats.hash_insert_race # TYPE node_zfs_dbuf_hash_insert_race untyped node_zfs_dbuf_hash_insert_race 0 -# HELP node_zfs_dbuf_hash_misses kstat.zfs.misc.dbuf_stats.hash_misses +# HELP node_zfs_dbuf_hash_misses kstat.zfs.misc.dbufstats.hash_misses # TYPE node_zfs_dbuf_hash_misses untyped node_zfs_dbuf_hash_misses 1851 # HELP node_zfs_dmu_tx_dmu_tx_assigned kstat.zfs.misc.dmu_tx.dmu_tx_assigned @@ -3684,90 +4454,319 @@ node_zfs_zil_zil_itx_needcopy_count 0 # TYPE node_zfs_zpool_dataset_nread untyped node_zfs_zpool_dataset_nread{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nread{dataset="pool1/dataset1",zpool="pool1"} 28 +node_zfs_zpool_dataset_nread{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_nread{dataset="pool3/dataset with space",zpool="pool3"} 28 node_zfs_zpool_dataset_nread{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nread{dataset="poolz1/dataset1",zpool="poolz1"} 28 # HELP node_zfs_zpool_dataset_nunlinked kstat.zfs.misc.objset.nunlinked # TYPE node_zfs_zpool_dataset_nunlinked untyped node_zfs_zpool_dataset_nunlinked{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nunlinked{dataset="pool1/dataset1",zpool="pool1"} 3 +node_zfs_zpool_dataset_nunlinked{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_nunlinked{dataset="pool3/dataset with space",zpool="pool3"} 3 node_zfs_zpool_dataset_nunlinked{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nunlinked{dataset="poolz1/dataset1",zpool="poolz1"} 14 # HELP node_zfs_zpool_dataset_nunlinks kstat.zfs.misc.objset.nunlinks # TYPE node_zfs_zpool_dataset_nunlinks untyped node_zfs_zpool_dataset_nunlinks{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nunlinks{dataset="pool1/dataset1",zpool="pool1"} 3 +node_zfs_zpool_dataset_nunlinks{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_nunlinks{dataset="pool3/dataset with space",zpool="pool3"} 3 node_zfs_zpool_dataset_nunlinks{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nunlinks{dataset="poolz1/dataset1",zpool="poolz1"} 14 # HELP node_zfs_zpool_dataset_nwritten kstat.zfs.misc.objset.nwritten # TYPE node_zfs_zpool_dataset_nwritten untyped node_zfs_zpool_dataset_nwritten{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nwritten{dataset="pool1/dataset1",zpool="pool1"} 12302 +node_zfs_zpool_dataset_nwritten{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_nwritten{dataset="pool3/dataset with space",zpool="pool3"} 12302 node_zfs_zpool_dataset_nwritten{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nwritten{dataset="poolz1/dataset1",zpool="poolz1"} 32806 # HELP node_zfs_zpool_dataset_reads kstat.zfs.misc.objset.reads # TYPE node_zfs_zpool_dataset_reads untyped node_zfs_zpool_dataset_reads{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_reads{dataset="pool1/dataset1",zpool="pool1"} 2 +node_zfs_zpool_dataset_reads{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_reads{dataset="pool3/dataset with space",zpool="pool3"} 2 node_zfs_zpool_dataset_reads{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_reads{dataset="poolz1/dataset1",zpool="poolz1"} 2 # HELP node_zfs_zpool_dataset_writes kstat.zfs.misc.objset.writes # TYPE node_zfs_zpool_dataset_writes untyped node_zfs_zpool_dataset_writes{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_writes{dataset="pool1/dataset1",zpool="pool1"} 4 +node_zfs_zpool_dataset_writes{dataset="pool3",zpool="pool3"} 0 +node_zfs_zpool_dataset_writes{dataset="pool3/dataset with space",zpool="pool3"} 4 node_zfs_zpool_dataset_writes{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_writes{dataset="poolz1/dataset1",zpool="poolz1"} 10 # HELP node_zfs_zpool_nread kstat.zfs.misc.io.nread # TYPE node_zfs_zpool_nread untyped node_zfs_zpool_nread{zpool="pool1"} 1.88416e+06 +node_zfs_zpool_nread{zpool="pool3"} 1.88416e+06 node_zfs_zpool_nread{zpool="poolz1"} 2.82624e+06 # HELP node_zfs_zpool_nwritten kstat.zfs.misc.io.nwritten # TYPE node_zfs_zpool_nwritten untyped node_zfs_zpool_nwritten{zpool="pool1"} 3.206144e+06 +node_zfs_zpool_nwritten{zpool="pool3"} 3.206144e+06 node_zfs_zpool_nwritten{zpool="poolz1"} 2.680501248e+09 # HELP node_zfs_zpool_rcnt kstat.zfs.misc.io.rcnt # TYPE node_zfs_zpool_rcnt untyped node_zfs_zpool_rcnt{zpool="pool1"} 0 +node_zfs_zpool_rcnt{zpool="pool3"} 0 node_zfs_zpool_rcnt{zpool="poolz1"} 0 # HELP node_zfs_zpool_reads kstat.zfs.misc.io.reads # TYPE node_zfs_zpool_reads untyped node_zfs_zpool_reads{zpool="pool1"} 22 +node_zfs_zpool_reads{zpool="pool3"} 22 node_zfs_zpool_reads{zpool="poolz1"} 33 # HELP node_zfs_zpool_rlentime kstat.zfs.misc.io.rlentime # TYPE node_zfs_zpool_rlentime untyped node_zfs_zpool_rlentime{zpool="pool1"} 1.04112268e+08 +node_zfs_zpool_rlentime{zpool="pool3"} 1.04112268e+08 node_zfs_zpool_rlentime{zpool="poolz1"} 6.472105124093e+12 # HELP node_zfs_zpool_rtime kstat.zfs.misc.io.rtime # TYPE node_zfs_zpool_rtime untyped node_zfs_zpool_rtime{zpool="pool1"} 2.4168078e+07 +node_zfs_zpool_rtime{zpool="pool3"} 2.4168078e+07 node_zfs_zpool_rtime{zpool="poolz1"} 9.82909164e+09 # HELP node_zfs_zpool_rupdate kstat.zfs.misc.io.rupdate # TYPE node_zfs_zpool_rupdate untyped node_zfs_zpool_rupdate{zpool="pool1"} 7.921048984922e+13 +node_zfs_zpool_rupdate{zpool="pool3"} 7.921048984922e+13 node_zfs_zpool_rupdate{zpool="poolz1"} 1.10734831944501e+14 +# HELP node_zfs_zpool_state kstat.zfs.misc.state +# TYPE node_zfs_zpool_state gauge +node_zfs_zpool_state{state="degraded",zpool="pool1"} 0 +node_zfs_zpool_state{state="degraded",zpool="pool2"} 0 +node_zfs_zpool_state{state="degraded",zpool="pool3"} 0 +node_zfs_zpool_state{state="degraded",zpool="poolz1"} 1 +node_zfs_zpool_state{state="faulted",zpool="pool1"} 0 +node_zfs_zpool_state{state="faulted",zpool="pool2"} 0 +node_zfs_zpool_state{state="faulted",zpool="pool3"} 0 +node_zfs_zpool_state{state="faulted",zpool="poolz1"} 0 +node_zfs_zpool_state{state="offline",zpool="pool1"} 0 +node_zfs_zpool_state{state="offline",zpool="pool2"} 0 +node_zfs_zpool_state{state="offline",zpool="pool3"} 0 +node_zfs_zpool_state{state="offline",zpool="poolz1"} 0 +node_zfs_zpool_state{state="online",zpool="pool1"} 1 +node_zfs_zpool_state{state="online",zpool="pool2"} 0 +node_zfs_zpool_state{state="online",zpool="pool3"} 1 +node_zfs_zpool_state{state="online",zpool="poolz1"} 0 +node_zfs_zpool_state{state="removed",zpool="pool1"} 0 +node_zfs_zpool_state{state="removed",zpool="pool2"} 0 +node_zfs_zpool_state{state="removed",zpool="pool3"} 0 +node_zfs_zpool_state{state="removed",zpool="poolz1"} 0 +node_zfs_zpool_state{state="suspended",zpool="pool1"} 0 +node_zfs_zpool_state{state="suspended",zpool="pool2"} 1 +node_zfs_zpool_state{state="suspended",zpool="pool3"} 0 +node_zfs_zpool_state{state="suspended",zpool="poolz1"} 0 +node_zfs_zpool_state{state="unavail",zpool="pool1"} 0 +node_zfs_zpool_state{state="unavail",zpool="pool2"} 0 +node_zfs_zpool_state{state="unavail",zpool="pool3"} 0 +node_zfs_zpool_state{state="unavail",zpool="poolz1"} 0 # HELP node_zfs_zpool_wcnt kstat.zfs.misc.io.wcnt # TYPE node_zfs_zpool_wcnt untyped node_zfs_zpool_wcnt{zpool="pool1"} 0 +node_zfs_zpool_wcnt{zpool="pool3"} 0 node_zfs_zpool_wcnt{zpool="poolz1"} 0 # HELP node_zfs_zpool_wlentime kstat.zfs.misc.io.wlentime # TYPE node_zfs_zpool_wlentime untyped node_zfs_zpool_wlentime{zpool="pool1"} 1.04112268e+08 +node_zfs_zpool_wlentime{zpool="pool3"} 1.04112268e+08 node_zfs_zpool_wlentime{zpool="poolz1"} 6.472105124093e+12 # HELP node_zfs_zpool_writes kstat.zfs.misc.io.writes # TYPE node_zfs_zpool_writes untyped node_zfs_zpool_writes{zpool="pool1"} 132 +node_zfs_zpool_writes{zpool="pool3"} 132 node_zfs_zpool_writes{zpool="poolz1"} 25294 # HELP node_zfs_zpool_wtime kstat.zfs.misc.io.wtime # TYPE node_zfs_zpool_wtime untyped node_zfs_zpool_wtime{zpool="pool1"} 7.155162e+06 +node_zfs_zpool_wtime{zpool="pool3"} 7.155162e+06 node_zfs_zpool_wtime{zpool="poolz1"} 9.673715628e+09 # HELP node_zfs_zpool_wupdate kstat.zfs.misc.io.wupdate # TYPE node_zfs_zpool_wupdate untyped node_zfs_zpool_wupdate{zpool="pool1"} 7.9210489694949e+13 +node_zfs_zpool_wupdate{zpool="pool3"} 7.9210489694949e+13 node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14 +# HELP node_zoneinfo_high_pages Zone watermark pages_high +# TYPE node_zoneinfo_high_pages gauge +node_zoneinfo_high_pages{node="0",zone="DMA"} 14 +node_zoneinfo_high_pages{node="0",zone="DMA32"} 2122 +node_zoneinfo_high_pages{node="0",zone="Device"} 0 +node_zoneinfo_high_pages{node="0",zone="Movable"} 0 +node_zoneinfo_high_pages{node="0",zone="Normal"} 31113 +# HELP node_zoneinfo_low_pages Zone watermark pages_low +# TYPE node_zoneinfo_low_pages gauge +node_zoneinfo_low_pages{node="0",zone="DMA"} 11 +node_zoneinfo_low_pages{node="0",zone="DMA32"} 1600 +node_zoneinfo_low_pages{node="0",zone="Device"} 0 +node_zoneinfo_low_pages{node="0",zone="Movable"} 0 +node_zoneinfo_low_pages{node="0",zone="Normal"} 23461 +# HELP node_zoneinfo_managed_pages Present pages managed by the buddy system +# TYPE node_zoneinfo_managed_pages gauge +node_zoneinfo_managed_pages{node="0",zone="DMA"} 3973 +node_zoneinfo_managed_pages{node="0",zone="DMA32"} 530339 +node_zoneinfo_managed_pages{node="0",zone="Device"} 0 +node_zoneinfo_managed_pages{node="0",zone="Movable"} 0 +node_zoneinfo_managed_pages{node="0",zone="Normal"} 7.654794e+06 +# HELP node_zoneinfo_min_pages Zone watermark pages_min +# TYPE node_zoneinfo_min_pages gauge +node_zoneinfo_min_pages{node="0",zone="DMA"} 8 +node_zoneinfo_min_pages{node="0",zone="DMA32"} 1078 +node_zoneinfo_min_pages{node="0",zone="Device"} 0 +node_zoneinfo_min_pages{node="0",zone="Movable"} 0 +node_zoneinfo_min_pages{node="0",zone="Normal"} 15809 +# HELP node_zoneinfo_nr_active_anon_pages Number of anonymous pages recently more used +# TYPE node_zoneinfo_nr_active_anon_pages gauge +node_zoneinfo_nr_active_anon_pages{node="0",zone="DMA"} 1.175853e+06 +# HELP node_zoneinfo_nr_active_file_pages Number of active pages with file-backing +# TYPE node_zoneinfo_nr_active_file_pages gauge +node_zoneinfo_nr_active_file_pages{node="0",zone="DMA"} 688810 +# HELP node_zoneinfo_nr_anon_pages Number of anonymous pages currently used by the system +# TYPE node_zoneinfo_nr_anon_pages gauge +node_zoneinfo_nr_anon_pages{node="0",zone="DMA"} 1.156608e+06 +# HELP node_zoneinfo_nr_anon_transparent_hugepages Number of anonymous transparent huge pages currently used by the system +# TYPE node_zoneinfo_nr_anon_transparent_hugepages gauge +node_zoneinfo_nr_anon_transparent_hugepages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_dirtied_total Page dirtyings since bootup +# TYPE node_zoneinfo_nr_dirtied_total counter +node_zoneinfo_nr_dirtied_total{node="0",zone="DMA"} 1.189097e+06 +# HELP node_zoneinfo_nr_dirty_pages Number of dirty pages +# TYPE node_zoneinfo_nr_dirty_pages gauge +node_zoneinfo_nr_dirty_pages{node="0",zone="DMA"} 103 +# HELP node_zoneinfo_nr_file_pages Number of file pages +# TYPE node_zoneinfo_nr_file_pages gauge +node_zoneinfo_nr_file_pages{node="0",zone="DMA"} 1.740118e+06 +# HELP node_zoneinfo_nr_free_pages Total number of free pages in the zone +# TYPE node_zoneinfo_nr_free_pages gauge +node_zoneinfo_nr_free_pages{node="0",zone="DMA"} 2949 +node_zoneinfo_nr_free_pages{node="0",zone="DMA32"} 528427 +node_zoneinfo_nr_free_pages{node="0",zone="Normal"} 4.539739e+06 +# HELP node_zoneinfo_nr_inactive_anon_pages Number of anonymous pages recently less used +# TYPE node_zoneinfo_nr_inactive_anon_pages gauge +node_zoneinfo_nr_inactive_anon_pages{node="0",zone="DMA"} 95612 +# HELP node_zoneinfo_nr_inactive_file_pages Number of inactive pages with file-backing +# TYPE node_zoneinfo_nr_inactive_file_pages gauge +node_zoneinfo_nr_inactive_file_pages{node="0",zone="DMA"} 723339 +# HELP node_zoneinfo_nr_isolated_anon_pages Temporary isolated pages from anon lru +# TYPE node_zoneinfo_nr_isolated_anon_pages gauge +node_zoneinfo_nr_isolated_anon_pages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_isolated_file_pages Temporary isolated pages from file lru +# TYPE node_zoneinfo_nr_isolated_file_pages gauge +node_zoneinfo_nr_isolated_file_pages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_kernel_stacks Number of kernel stacks +# TYPE node_zoneinfo_nr_kernel_stacks gauge +node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA"} 0 +node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA32"} 0 +node_zoneinfo_nr_kernel_stacks{node="0",zone="Normal"} 18864 +# HELP node_zoneinfo_nr_mapped_pages Number of mapped pages +# TYPE node_zoneinfo_nr_mapped_pages gauge +node_zoneinfo_nr_mapped_pages{node="0",zone="DMA"} 423143 +# HELP node_zoneinfo_nr_shmem_pages Number of shmem pages (included tmpfs/GEM pages) +# TYPE node_zoneinfo_nr_shmem_pages gauge +node_zoneinfo_nr_shmem_pages{node="0",zone="DMA"} 330517 +# HELP node_zoneinfo_nr_slab_reclaimable_pages Number of reclaimable slab pages +# TYPE node_zoneinfo_nr_slab_reclaimable_pages gauge +node_zoneinfo_nr_slab_reclaimable_pages{node="0",zone="DMA"} 121763 +# HELP node_zoneinfo_nr_slab_unreclaimable_pages Number of unreclaimable slab pages +# TYPE node_zoneinfo_nr_slab_unreclaimable_pages gauge +node_zoneinfo_nr_slab_unreclaimable_pages{node="0",zone="DMA"} 56182 +# HELP node_zoneinfo_nr_unevictable_pages Number of unevictable pages +# TYPE node_zoneinfo_nr_unevictable_pages gauge +node_zoneinfo_nr_unevictable_pages{node="0",zone="DMA"} 213111 +# HELP node_zoneinfo_nr_writeback_pages Number of writeback pages +# TYPE node_zoneinfo_nr_writeback_pages gauge +node_zoneinfo_nr_writeback_pages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_written_total Page writings since bootup +# TYPE node_zoneinfo_nr_written_total counter +node_zoneinfo_nr_written_total{node="0",zone="DMA"} 1.181554e+06 +# HELP node_zoneinfo_numa_foreign_total Was intended here, hit elsewhere +# TYPE node_zoneinfo_numa_foreign_total counter +node_zoneinfo_numa_foreign_total{node="0",zone="DMA"} 0 +node_zoneinfo_numa_foreign_total{node="0",zone="DMA32"} 0 +node_zoneinfo_numa_foreign_total{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_numa_hit_total Allocated in intended node +# TYPE node_zoneinfo_numa_hit_total counter +node_zoneinfo_numa_hit_total{node="0",zone="DMA"} 1 +node_zoneinfo_numa_hit_total{node="0",zone="DMA32"} 13 +node_zoneinfo_numa_hit_total{node="0",zone="Normal"} 6.2836441e+07 +# HELP node_zoneinfo_numa_interleave_total Interleaver preferred this zone +# TYPE node_zoneinfo_numa_interleave_total counter +node_zoneinfo_numa_interleave_total{node="0",zone="DMA"} 1 +node_zoneinfo_numa_interleave_total{node="0",zone="DMA32"} 1 +node_zoneinfo_numa_interleave_total{node="0",zone="Normal"} 23174 +# HELP node_zoneinfo_numa_local_total Allocation from local node +# TYPE node_zoneinfo_numa_local_total counter +node_zoneinfo_numa_local_total{node="0",zone="DMA"} 1 +node_zoneinfo_numa_local_total{node="0",zone="DMA32"} 13 +node_zoneinfo_numa_local_total{node="0",zone="Normal"} 6.2836441e+07 +# HELP node_zoneinfo_numa_miss_total Allocated in non intended node +# TYPE node_zoneinfo_numa_miss_total counter +node_zoneinfo_numa_miss_total{node="0",zone="DMA"} 0 +node_zoneinfo_numa_miss_total{node="0",zone="DMA32"} 0 +node_zoneinfo_numa_miss_total{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_numa_other_total Allocation from other node +# TYPE node_zoneinfo_numa_other_total counter +node_zoneinfo_numa_other_total{node="0",zone="DMA"} 0 +node_zoneinfo_numa_other_total{node="0",zone="DMA32"} 0 +node_zoneinfo_numa_other_total{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_present_pages Physical pages existing within the zone +# TYPE node_zoneinfo_present_pages gauge +node_zoneinfo_present_pages{node="0",zone="DMA"} 3997 +node_zoneinfo_present_pages{node="0",zone="DMA32"} 546847 +node_zoneinfo_present_pages{node="0",zone="Device"} 0 +node_zoneinfo_present_pages{node="0",zone="Movable"} 0 +node_zoneinfo_present_pages{node="0",zone="Normal"} 7.806976e+06 +# HELP node_zoneinfo_protection_0 Protection array 0. field +# TYPE node_zoneinfo_protection_0 gauge +node_zoneinfo_protection_0{node="0",zone="DMA"} 0 +node_zoneinfo_protection_0{node="0",zone="DMA32"} 0 +node_zoneinfo_protection_0{node="0",zone="Device"} 0 +node_zoneinfo_protection_0{node="0",zone="Movable"} 0 +node_zoneinfo_protection_0{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_1 Protection array 1. field +# TYPE node_zoneinfo_protection_1 gauge +node_zoneinfo_protection_1{node="0",zone="DMA"} 2039 +node_zoneinfo_protection_1{node="0",zone="DMA32"} 0 +node_zoneinfo_protection_1{node="0",zone="Device"} 0 +node_zoneinfo_protection_1{node="0",zone="Movable"} 0 +node_zoneinfo_protection_1{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_2 Protection array 2. field +# TYPE node_zoneinfo_protection_2 gauge +node_zoneinfo_protection_2{node="0",zone="DMA"} 31932 +node_zoneinfo_protection_2{node="0",zone="DMA32"} 29893 +node_zoneinfo_protection_2{node="0",zone="Device"} 0 +node_zoneinfo_protection_2{node="0",zone="Movable"} 0 +node_zoneinfo_protection_2{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_3 Protection array 3. field +# TYPE node_zoneinfo_protection_3 gauge +node_zoneinfo_protection_3{node="0",zone="DMA"} 31932 +node_zoneinfo_protection_3{node="0",zone="DMA32"} 29893 +node_zoneinfo_protection_3{node="0",zone="Device"} 0 +node_zoneinfo_protection_3{node="0",zone="Movable"} 0 +node_zoneinfo_protection_3{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_4 Protection array 4. field +# TYPE node_zoneinfo_protection_4 gauge +node_zoneinfo_protection_4{node="0",zone="DMA"} 31932 +node_zoneinfo_protection_4{node="0",zone="DMA32"} 29893 +node_zoneinfo_protection_4{node="0",zone="Device"} 0 +node_zoneinfo_protection_4{node="0",zone="Movable"} 0 +node_zoneinfo_protection_4{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_spanned_pages Total pages spanned by the zone, including holes +# TYPE node_zoneinfo_spanned_pages gauge +node_zoneinfo_spanned_pages{node="0",zone="DMA"} 4095 +node_zoneinfo_spanned_pages{node="0",zone="DMA32"} 1.04448e+06 +node_zoneinfo_spanned_pages{node="0",zone="Device"} 0 +node_zoneinfo_spanned_pages{node="0",zone="Movable"} 0 +node_zoneinfo_spanned_pages{node="0",zone="Normal"} 7.806976e+06 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge +# HELP process_network_receive_bytes_total Number of bytes received by the process over the network. +# TYPE process_network_receive_bytes_total counter +# HELP process_network_transmit_bytes_total Number of bytes sent by the process over the network. +# TYPE process_network_transmit_bytes_total counter # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge # HELP process_resident_memory_bytes Resident memory size in bytes. diff --git a/collector/fixtures/ethtool/bond0/statistics b/collector/fixtures/ethtool/bond0/statistics new file mode 100644 index 0000000000..42e4a141d6 --- /dev/null +++ b/collector/fixtures/ethtool/bond0/statistics @@ -0,0 +1 @@ +ERROR: 1 \ No newline at end of file diff --git a/collector/fixtures/ethtool/eth0/driver b/collector/fixtures/ethtool/eth0/driver new file mode 100644 index 0000000000..7ec84a81ed --- /dev/null +++ b/collector/fixtures/ethtool/eth0/driver @@ -0,0 +1,11 @@ +# ethtool -i eth0 +driver: e1000e +version: 5.11.0-22-generic +firmware-version: 0.5-4 +expansion-rom-version: +bus-info: 0000:00:1f.6 +supports-statistics: yes +supports-test: yes +supports-eeprom-access: yes +supports-register-dump: yes +supports-priv-flags: yes diff --git a/collector/fixtures/ethtool/eth0/settings b/collector/fixtures/ethtool/eth0/settings new file mode 100644 index 0000000000..47989a816e --- /dev/null +++ b/collector/fixtures/ethtool/eth0/settings @@ -0,0 +1,27 @@ +# ethtool eth0 +Settings for eth0: + Supported ports: [ TP MII ] + Supported link modes: 10baseT/Half 10baseT/Full + 100baseT/Half 100baseT/Full + 1000baseT/Full 10000baseT/Full + Supported pause frame use: Symmetric + Supports auto-negotiation: Yes + Supported FEC modes: Not reported + Advertised link modes: 10baseT/Half 10baseT/Full + 100baseT/Half 100baseT/Full + 1000baseT/Full + Advertised pause frame use: Symmetric + Advertised auto-negotiation: Yes + Advertised FEC modes: Not reported + Speed: 1000Mb/s + Duplex: Full + Auto-negotiation: on + Port: Twisted Pair + PHYAD: 1 + Transceiver: internal + MDI-X: off (auto) +netlink error: Operation not permitted + Current message level: 0x00000007 (7) + drv probe link + Link detected: yes + diff --git a/collector/fixtures/ethtool/eth0/statistics b/collector/fixtures/ethtool/eth0/statistics new file mode 100644 index 0000000000..81e511e7ec --- /dev/null +++ b/collector/fixtures/ethtool/eth0/statistics @@ -0,0 +1,18 @@ +# ethtool -S eth0 +NIC statistics: + tx_packets: 961500 + rx_packets: 1260062 + tx_errors: 0 + rx_errors: 0 + port.rx_dropped: 12028 + rx_missed: 401 + align_errors: 0 + tx_single_collisions: 0 + tx_multi_collisions: 0 + rx_unicast: 1230297 + rx_broadcast: 5792 + rx_multicast: 23973 + tx_aborted: 0 + tx_underrun: 0 + duplicate metric: 1 + duplicate_metric: 2 diff --git a/collector/fixtures/pci.ids b/collector/fixtures/pci.ids new file mode 100644 index 0000000000..a25e65325d --- /dev/null +++ b/collector/fixtures/pci.ids @@ -0,0 +1,26 @@ +# Test PCI IDs file for node_exporter testing +# This file contains sample entries for testing PCI name resolution + +# Classes +C 06 Bridge device + 04 PCI bridge +C 01 Mass storage controller + 08 Non-Volatile memory controller + 02 NVM Express +C 02 Network controller + 00 Ethernet controller + +# Vendors +1022 Advanced Micro Devices, Inc. [AMD] + 1634 Renoir/Cezanne PCIe GPP Bridge + 17aa 5095 T540-5095 Unified Wire Ethernet Controller + +c0a9 Micron/Crucial Technology + 540a P2 [Nick P2] / P3 / P3 Plus NVMe PCIe SSD (DRAM-less) + c0a9 5021 PS5021-E21 PCIe4 NVMe Controller (DRAM-less) + +8086 Intel Corporation + 1521 I350 Gigabit Network Connection + 8086 00a3 Ethernet Network Adapter I350-T4 for OCP NIC 3.0 + +17aa Lenovo \ No newline at end of file diff --git a/collector/fixtures/pcidevice-names-output.txt b/collector/fixtures/pcidevice-names-output.txt new file mode 100644 index 0000000000..674404f4b8 --- /dev/null +++ b/collector/fixtures/pcidevice-names-output.txt @@ -0,0 +1,95 @@ +# Test output for PCI device collector with name resolution enabled +# This file demonstrates the --collector.pcidevice.names=true functionality + +# HELP node_pcidevice_current_link_transfers_per_second Value of current link's transfers per second (T/s) +# TYPE node_pcidevice_current_link_transfers_per_second gauge +node_pcidevice_current_link_transfers_per_second{bus="00",device="02",function="1",segment="0000"} 8e+09 +node_pcidevice_current_link_transfers_per_second{bus="01",device="00",function="0",segment="0000"} 8e+09 +node_pcidevice_current_link_transfers_per_second{bus="45",device="00",function="0",segment="0000"} 5e+09 + +# HELP node_pcidevice_current_link_width Value of current link's width (number of lanes) +# TYPE node_pcidevice_current_link_width gauge +node_pcidevice_current_link_width{bus="00",device="02",function="1",segment="0000"} 4 +node_pcidevice_current_link_width{bus="01",device="00",function="0",segment="0000"} 4 +node_pcidevice_current_link_width{bus="45",device="00",function="0",segment="0000"} 4 + +# HELP node_pcidevice_d3cold_allowed Whether the PCIe device supports D3cold power state (0/1). +# TYPE node_pcidevice_d3cold_allowed gauge +node_pcidevice_d3cold_allowed{bus="00",device="02",function="1",segment="0000"} 1 +node_pcidevice_d3cold_allowed{bus="01",device="00",function="0",segment="0000"} 1 +node_pcidevice_d3cold_allowed{bus="45",device="00",function="0",segment="0000"} 1 + +# HELP node_pcidevice_info Non-numeric data from /sys/bus/pci/devices/, value is always 1. +# TYPE node_pcidevice_info gauge +# Example 1: AMD PCIe Bridge with Lenovo subsystem +node_pcidevice_info{bus="00",class_id="0x060400",class_name="PCI bridge",device="02",device_id="0x1634",device_name="Renoir/Cezanne PCIe GPP Bridge",function="1",parent_bus="*",parent_device="*",parent_function="*",parent_segment="*",revision="0x00",segment="0000",subsystem_device_id="0x5095",subsystem_device_name="T540-5095 Unified Wire Ethernet Controller",subsystem_vendor_id="0x17aa",subsystem_vendor_name="Lenovo",vendor_id="0x1022",vendor_name="Advanced Micro Devices, Inc. [AMD]"} 1 + +# Example 2: Micron/Crucial NVMe Controller +node_pcidevice_info{bus="01",class_id="0x010802",class_name="NVM Express",device="00",device_id="0x540a",device_name="P2 [Nick P2] / P3 / P3 Plus NVMe PCIe SSD (DRAM-less)",function="0",parent_bus="00",parent_device="02",parent_function="1",parent_segment="0000",revision="0x01",segment="0000",subsystem_device_id="0x5021",subsystem_device_name="PS5021-E21 PCIe4 NVMe Controller (DRAM-less)",subsystem_vendor_id="0xc0a9",subsystem_vendor_name="Micron/Crucial Technology",vendor_id="0xc0a9",vendor_name="Micron/Crucial Technology"} 1 + +# Example 3: Intel Network Controller +node_pcidevice_info{bus="45",class_id="0x020000",class_name="Ethernet controller",device="00",device_id="0x1521",device_name="I350 Gigabit Network Connection",function="0",parent_bus="40",parent_device="01",parent_function="3",parent_segment="0000",revision="0x01",segment="0000",subsystem_device_id="0x00a3",subsystem_device_name="Ethernet Network Adapter I350-T4 for OCP NIC 3.0",subsystem_vendor_id="0x8086",subsystem_vendor_name="Intel Corporation",vendor_id="0x8086",vendor_name="Intel Corporation"} 1 + +# HELP node_pcidevice_numa_node NUMA node number for the PCI device. -1 indicates unknown or not available. +# TYPE node_pcidevice_numa_node gauge +node_pcidevice_numa_node{bus="45",device="00",function="0",segment="0000"} 0 + +# HELP node_pcidevice_max_link_transfers_per_second Value of maximum link's transfers per second (T/s) +# TYPE node_pcidevice_max_link_transfers_per_second gauge +node_pcidevice_max_link_transfers_per_second{bus="00",device="02",function="1",segment="0000"} 8e+09 +node_pcidevice_max_link_transfers_per_second{bus="01",device="00",function="0",segment="0000"} 1.6e+10 +node_pcidevice_max_link_transfers_per_second{bus="45",device="00",function="0",segment="0000"} 5e+09 + +# HELP node_pcidevice_max_link_width Value of maximum link's width (number of lanes) +# TYPE node_pcidevice_max_link_width gauge +node_pcidevice_max_link_width{bus="00",device="02",function="1",segment="0000"} 8 +node_pcidevice_max_link_width{bus="01",device="00",function="0",segment="0000"} 4 +node_pcidevice_max_link_width{bus="45",device="00",function="0",segment="0000"} 4 + +# HELP node_pcidevice_power_state PCIe device power state, one of: D0, D1, D2, D3hot, D3cold, unknown or error. +# TYPE node_pcidevice_power_state gauge +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D0"} 1 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D1"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D2"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D3cold"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="D3hot"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="error"} 0 +node_pcidevice_power_state{bus="00",device="02",function="1",segment="0000",state="unknown"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D0"} 1 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D1"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D2"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D3cold"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="D3hot"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="error"} 0 +node_pcidevice_power_state{bus="01",device="00",function="0",segment="0000",state="unknown"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D0"} 1 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D1"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D2"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D3cold"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="D3hot"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="error"} 0 +node_pcidevice_power_state{bus="45",device="00",function="0",segment="0000",state="unknown"} 0 + +# HELP node_pcidevice_sriov_drivers_autoprobe Whether SR-IOV drivers autoprobe is enabled for the device (0/1). +# TYPE node_pcidevice_sriov_drivers_autoprobe gauge +node_pcidevice_sriov_drivers_autoprobe{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_drivers_autoprobe{bus="01",device="00",function="0",segment="0000"} 1 +node_pcidevice_sriov_drivers_autoprobe{bus="45",device="00",function="0",segment="0000"} 1 + +# HELP node_pcidevice_sriov_numvfs Number of Virtual Functions (VFs) currently enabled for SR-IOV. +# TYPE node_pcidevice_sriov_numvfs gauge +node_pcidevice_sriov_numvfs{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_numvfs{bus="01",device="00",function="0",segment="0000"} 4 +node_pcidevice_sriov_numvfs{bus="45",device="00",function="0",segment="0000"} 0 + +# HELP node_pcidevice_sriov_totalvfs Total number of Virtual Functions (VFs) supported by the device. +# TYPE node_pcidevice_sriov_totalvfs gauge +node_pcidevice_sriov_totalvfs{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_totalvfs{bus="01",device="00",function="0",segment="0000"} 8 +node_pcidevice_sriov_totalvfs{bus="45",device="00",function="0",segment="0000"} 7 + +# HELP node_pcidevice_sriov_vf_total_msix Total number of MSI-X vectors for Virtual Functions. +# TYPE node_pcidevice_sriov_vf_total_msix gauge +node_pcidevice_sriov_vf_total_msix{bus="00",device="02",function="1",segment="0000"} 0 +node_pcidevice_sriov_vf_total_msix{bus="01",device="00",function="0",segment="0000"} 16 +node_pcidevice_sriov_vf_total_msix{bus="45",device="00",function="0",segment="0000"} 0 diff --git a/collector/fixtures/proc/1/mountinfo b/collector/fixtures/proc/1/mountinfo new file mode 100644 index 0000000000..2a7deda8c8 --- /dev/null +++ b/collector/fixtures/proc/1/mountinfo @@ -0,0 +1,32 @@ +24 29 0:22 / /sys rw,nosuid,nodev,noexec,relatime shared:7 - sysfs sysfs rw +25 29 0:23 / /proc rw,nosuid,nodev,noexec,relatime shared:13 - proc proc rw +26 29 0:5 / /dev rw,nosuid,relatime shared:2 - devtmpfs udev rw,size=7978892k,nr_inodes=1994723,mode=755 +27 26 0:24 / /dev/pts rw,nosuid,noexec,relatime shared:3 - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +28 29 0:25 / /run rw,nosuid,relatime shared:5 - tmpfs tmpfs rw,size=1617716k,mode=755 +29 1 259:2 / / rw,relatime shared:1 - ext4 /dev/dm-2 errors=remount-ro,data=ordered +30 24 0:6 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:8 - securityfs securityfs rw +31 26 0:26 / /dev/shm rw,nosuid,nodev shared:4 - tmpfs tmpfs rw,inode64 +32 28 0:27 / /run/lock rw,nosuid,nodev,noexec,relatime shared:6 - tmpfs tmpfs rw,size=5120k +33 24 0:28 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:9 - tmpfs tmpfs ro,mode=755 +34 31 0:24 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd +35 32 0:25 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:11 - pstore pstore rw +36 33 0:26 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuset +37 34 0:27 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,cpu,cpuacct +38 35 0:28 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,devices +39 36 0:29 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,freezer +40 37 0:30 / /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,net_cls,net_prio +41 38 0:31 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,blkio +42 39 0:32 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:20 - cgroup cgroup rw,perf_event +43 40 0:33 / /proc/sys/fs/binfmt_misc rw,relatime shared:21 - systemd-1 autofs rw,fd=22,pgrp=1,timeout=300,minproto=5,maxproto=5,direct +44 41 0:34 / /dev/mqueue rw,relatime shared:22 - mqueue mqueue rw +45 42 0:35 / /sys/kernel/debug rw,relatime shared:23 - debugfs debugfs rw +46 43 0:36 / /dev/hugepages rw,relatime shared:24 - hugetlbfs hugetlbfs rw +47 44 0:37 / /sys/fs/fuse/connections rw,relatime shared:25 - fusectl fusectl rw +48 45 260:3 / /boot rw,relatime shared:92 - ext2 /dev/sda3 rw +49 46 0:39 / /run/rpc_pipefs rw,relatime shared:27 - rpc_pipefs rpc_pipefs rw +265 37 0:41 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime shared:94 - binfmt_misc binfmt_misc rw +3002 28 0:79 / /run/user/1000 rw,nosuid,nodev,relatime shared:1225 - tmpfs tmpfs rw,size=1603436k,nr_inodes=400859,mode=700,uid=1000,gid=1000 +3147 3002 0:81 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:1290 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 +3148 3003 260:0 / /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\040bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk rw,relatime shared:31 - ext4 /dev/sda rw,data=ordered +3149 3004 260:0 / /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\011bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk rw,relatime shared:32 - ext4 /dev/sda rw,data=ordered +1128 67 253:0 /var/lib/containers/storage/overlay /var/lib/containers/storage/overlay rw,relatime - xfs /dev/mapper/rhel-root rw,seclabel,attr2,inode64,logbufs=8,logbsize=32k,noquota diff --git a/collector/fixtures/proc/1/mounts b/collector/fixtures/proc/1/mounts deleted file mode 100644 index 7452d495a6..0000000000 --- a/collector/fixtures/proc/1/mounts +++ /dev/null @@ -1,32 +0,0 @@ -rootfs / rootfs rw 0 0 -sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 -proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 -udev /dev devtmpfs rw,relatime,size=10240k,nr_inodes=1008585,mode=755 0 0 -devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 -tmpfs /run tmpfs rw,nosuid,relatime,size=1617716k,mode=755 0 0 -/dev/dm-2 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 -securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0 -tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0 -tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 -tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0 -cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 -pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0 -cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0 -cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0 -cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0 -cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 -cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0 -cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0 -cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0 -systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=22,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 0 0 -mqueue /dev/mqueue mqueue rw,relatime 0 0 -debugfs /sys/kernel/debug debugfs rw,relatime 0 0 -hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0 -fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0 -/dev/sda3 /boot ext2 rw,relatime 0 0 -rpc_pipefs /run/rpc_pipefs rpc_pipefs rw,relatime 0 0 -binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,relatime 0 0 -tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=808860k,mode=700,uid=1000,gid=1000 0 0 -gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 -/dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\040bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0 -/dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\011bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0 diff --git a/collector/fixtures/proc/1/stat b/collector/fixtures/proc/1/stat new file mode 100644 index 0000000000..b6bc64d683 --- /dev/null +++ b/collector/fixtures/proc/1/stat @@ -0,0 +1 @@ +1 (systemd) S 0 1 1 0 -1 4194560 9061 9416027 94 2620 36 98 54406 13885 20 0 1 0 29 109604864 2507 18446744073709551615 1 1 0 0 0 0 671173123 4096 1260 0 0 0 17 0 0 0 19 0 0 0 0 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/11/stat b/collector/fixtures/proc/11/stat new file mode 100644 index 0000000000..1d91e2c19a --- /dev/null +++ b/collector/fixtures/proc/11/stat @@ -0,0 +1 @@ +11 (rcu_preempt) I 2 0 0 0 -1 2129984 0 0 0 0 0 346 0 0 -2 0 1 0 32 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 2 1 1 0 0 0 0 0 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/cgroups b/collector/fixtures/proc/cgroups new file mode 100644 index 0000000000..99382e312e --- /dev/null +++ b/collector/fixtures/proc/cgroups @@ -0,0 +1,13 @@ +#subsys_name hierarchy num_cgroups enabled +cpuset 5 47 1 +cpu 3 172 1 +cpuacct 3 172 1 +blkio 6 170 1 +memory 7 234 1 +devices 11 170 1 +freezer 9 47 1 +net_cls 2 47 1 +perf_event 8 47 1 +hugetlb 12 47 1 +pids 10 170 1 +rdma 4 1 1 \ No newline at end of file diff --git a/collector/fixtures/proc/diskstats b/collector/fixtures/proc/diskstats index 3a75de515b..0fb024e537 100644 --- a/collector/fixtures/proc/diskstats +++ b/collector/fixtures/proc/diskstats @@ -44,8 +44,8 @@ 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 - 8 0 sdc 126552 141 1657779 14 11822 1895 172905 1007 0 10730 17070 18851 0 125173784 11130 1555 1944 - 8 1 sdc1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 0 0 + 8 16 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 + 8 17 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 + 8 18 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 + 8 32 sdc 126552 141 1657779 14 11822 1895 172905 1007 0 10730 17070 18851 0 125173784 11130 1555 1944 + 8 33 sdc1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/interrupts_aarch64 b/collector/fixtures/proc/interrupts_aarch64 new file mode 100644 index 0000000000..88f2dea7e2 --- /dev/null +++ b/collector/fixtures/proc/interrupts_aarch64 @@ -0,0 +1,61 @@ + CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7 + 10: 3287008667 3310445093 3301386305 3273132897 3368262064 3641875466 3360412019 3225020442 GICv3 27 Level arch_timer + 14: 7815 0 0 4 0 0 0 0 GICv3 37 Level ttyS0 + 17: 0 0 0 0 0 0 0 0 GICv3 48 Edge ACPI:Ged + 18: 0 0 0 0 0 0 0 0 GICv3 49 Edge ACPI:Ged + 19: 0 0 0 0 0 0 0 0 GICv3 50 Edge ACPI:Ged + 20: 0 0 0 0 0 0 0 0 GICv3 51 Edge ACPI:Ged + 21: 0 0 0 0 0 0 0 0 GICv3 52 Edge ACPI:Ged + 22: 0 0 0 0 0 0 0 0 GICv3 53 Edge ACPI:Ged + 23: 0 0 0 0 0 0 0 0 GICv3 54 Edge ACPI:Ged + 24: 0 0 0 0 0 0 0 0 GICv3 55 Edge ACPI:Ged + 25: 0 0 0 0 0 0 0 0 GICv3 56 Edge ACPI:Ged + 26: 0 0 0 0 0 0 0 0 GICv3 57 Edge ACPI:Ged + 27: 0 0 0 0 0 0 0 0 GICv3 58 Edge ACPI:Ged + 28: 0 0 0 0 0 0 0 0 GICv3 59 Edge ACPI:Ged + 29: 0 0 0 0 0 0 0 0 GICv3 60 Edge ACPI:Ged + 30: 0 0 0 0 0 0 0 0 GICv3 61 Edge ACPI:Ged + 31: 0 0 0 0 0 0 0 0 GICv3 62 Edge ACPI:Ged + 32: 0 0 0 0 0 0 0 0 GICv3 63 Edge ACPI:Ged + 33: 0 0 0 0 0 0 0 0 GICv3 64 Edge ACPI:Ged + 34: 0 0 0 0 0 0 0 0 GICv3 65 Edge ACPI:Ged + 35: 0 0 0 0 0 0 0 0 GICv3 66 Edge ACPI:Ged + 36: 0 0 0 0 0 0 0 0 GICv3 67 Edge ACPI:Ged + 37: 0 0 0 0 0 0 0 0 GICv3 68 Edge ACPI:Ged + 38: 0 0 0 0 0 0 0 0 GICv3 69 Edge ACPI:Ged + 39: 0 0 0 0 0 0 0 0 GICv3 70 Edge ACPI:Ged + 40: 0 0 0 0 0 0 0 0 GICv3 71 Edge ACPI:Ged + 41: 0 0 0 0 0 0 0 0 GICv3 72 Edge ACPI:Ged + 42: 0 0 0 0 0 0 0 0 GICv3 73 Edge ACPI:Ged + 43: 0 0 0 0 0 0 0 0 GICv3 74 Edge ACPI:Ged + 44: 0 0 0 0 0 0 0 0 GICv3 75 Edge ACPI:Ged + 45: 0 0 0 0 0 0 0 0 GICv3 76 Edge ACPI:Ged + 46: 0 0 0 0 0 0 0 0 GICv3 77 Edge ACPI:Ged + 47: 0 0 0 0 0 0 0 0 GICv3 78 Edge ACPI:Ged + 48: 0 0 0 0 0 0 0 0 GICv3 79 Edge ACPI:Ged + 49: 0 0 0 0 0 0 0 0 GICv3 23 Level arm-pmu + 50: 0 0 0 0 0 0 0 0 ARMH0061:00 3 Edge ACPI:Event + 51: 13 0 0 20 4 0 0 0 ITS-MSI 65536 Edge nvme0q0 + 52: 0 9 0 0 0 5 20 0 ITS-MSI 507904 Edge nvme1q0 + 53: 129969327 0 0 0 0 0 0 0 ITS-MSI 65537 Edge nvme0q1 + 54: 0 0 0 0 126913956 0 0 0 ITS-MSI 65538 Edge nvme0q2 + 55: 0 199619844 0 0 0 0 0 0 ITS-MSI 507905 Edge nvme1q1 + 56: 0 0 0 0 0 198494086 0 0 ITS-MSI 507906 Edge nvme1q2 + 57: 0 0 51 0 0 32479308 0 0 ITS-MSI 81920 Edge ena-mgmnt@pci:0000:00:05.0 + 58: 0 0 1195697946 437 0 0 0 0 ITS-MSI 81921 Edge eth0-Tx-Rx-0 + 59: 0 0 0 2709937608 1619 0 0 0 ITS-MSI 81922 Edge eth0-Tx-Rx-1 + 60: 0 1457922109 0 0 0 71 0 0 ITS-MSI 81923 Edge eth0-Tx-Rx-2 + 61: 2052879736 0 0 0 0 0 124 0 ITS-MSI 81924 Edge eth0-Tx-Rx-3 + 62: 0 0 0 0 0 0 2268695629 1530 ITS-MSI 81925 Edge eth0-Tx-Rx-4 + 63: 50 0 0 0 0 0 0 1997799253 ITS-MSI 81926 Edge eth0-Tx-Rx-5 + 64: 0 48 0 0 1238622585 0 0 0 ITS-MSI 81927 Edge eth0-Tx-Rx-6 + 65: 0 0 47 0 0 0 0 1574978449 ITS-MSI 81928 Edge eth0-Tx-Rx-7 +IPI0:2768808080 2844211768 2878602432 2730576120 2723524623 3349096412 2717389879 2154252810 Rescheduling interrupts +IPI1: 357815098 213258177 153713187 132890624 124746406 123498004 122386326 120728639 Function call interrupts +IPI2: 0 0 0 0 0 0 0 0 CPU stop interrupts +IPI3: 0 0 0 0 0 0 0 0 CPU stop (for crash dump) interrupts +IPI4: 0 0 0 0 0 0 0 0 Timer broadcast interrupts +IPI5: 0 0 0 0 0 0 0 0 IRQ work interrupts +IPI6: 0 0 0 0 0 0 0 0 CPU wake-up interrupts +Err: 0 + diff --git a/collector/fixtures/proc/mdstat b/collector/fixtures/proc/mdstat index a135435f06..a19bf5e3f0 100644 --- a/collector/fixtures/proc/mdstat +++ b/collector/fixtures/proc/mdstat @@ -20,6 +20,10 @@ md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) 195310144 blocks [2/2] [UU] [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec +md201 : active raid1 sda3[0] sdb3[1] + 1993728 blocks super 1.2 [2/2] [UU] + [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec + md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] bitmap: 0/30 pages [0KB], 65536KB chunk diff --git a/collector/fixtures/proc/net/arp b/collector/fixtures/proc/net/arp index 84c67f8c00..3b9e4e7661 100644 --- a/collector/fixtures/proc/net/arp +++ b/collector/fixtures/proc/net/arp @@ -5,3 +5,4 @@ IP address HW type Flags HW address Mask Device 192.168.1.4 0x1 0x2 dd:ee:ff:aa:bb:cc * eth1 192.168.1.5 0x1 0x2 ee:ff:aa:bb:cc:dd * eth1 192.168.1.6 0x1 0x2 ff:aa:bb:cc:dd:ee * eth1 +10.0.0.1 0x1 0x2 de:ad:be:ef:00:00 * nope diff --git a/collector/fixtures/proc/net/dev b/collector/fixtures/proc/net/dev deleted file mode 100644 index a3534c1309..0000000000 --- a/collector/fixtures/proc/net/dev +++ /dev/null @@ -1,12 +0,0 @@ -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - tun0: 1888 24 0 0 0 0 0 0 67120 934 0 0 0 0 0 0 -veth4B09XN: 648 8 0 0 0 0 0 0 1943284 10640 0 0 0 0 0 0 - lo: 435303245 1832522 0 0 0 0 0 0 435303245 1832522 0 0 0 0 0 0 - eth0:68210035552 520993275 0 0 0 0 0 0 9315587528 43451486 0 0 0 0 0 0 -lxcbr0: 0 0 0 0 0 0 0 0 2630299 28339 0 0 0 0 0 0 - wlan0: 10437182923 13899359 0 0 0 0 0 0 2851649360 11726200 0 0 0 0 0 0 -docker0: 64910168 1065585 0 0 0 0 0 0 2681662018 1929779 0 0 0 0 0 0 -ibr10:30: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -flannel.1: 18144009813 228499337 0 0 0 0 0 0 20758990068 258369223 0 64 0 0 0 0 - 💩0: 57750104 105557 0 0 0 0 0 72 404570255 304261 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/net/netstat b/collector/fixtures/proc/net/netstat index 811f623273..a504f800cc 100644 --- a/collector/fixtures/proc/net/netstat +++ b/collector/fixtures/proc/net/netstat @@ -1,4 +1,4 @@ -TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLoss TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPChallengeACK TCPSYNChallenge -TcpExt: 0 0 2 0 0 0 0 0 0 0 388812 0 0 0 0 6 102471 17 9 0 0 80568 0 168808 0 4471289 26 1433940 3744565 0 1 0 0 0 0 0 0 0 0 48 0 0 0 1 0 1 0 1 115 0 0 0 0 9 0 5 0 41 4 0 0 0 0 0 0 0 1 0 0 0 0 2 5 0 0 0 0 0 0 0 2 2 +TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLoss TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPChallengeACK TCPSYNChallenge TCPOFOQueue TCPRcvQDrop +TcpExt: 0 0 2 0 0 0 0 0 0 0 388812 0 0 0 0 6 102471 17 9 0 0 80568 0 168808 0 4471289 26 1433940 3744565 0 1 0 0 0 0 0 0 0 0 48 0 0 0 1 0 1 0 1 115 0 0 0 0 9 0 5 0 41 4 0 0 0 0 0 0 0 1 0 0 0 0 2 5 0 0 0 0 0 0 0 2 2 42 131 IpExt: InNoRoutes InTruncatedPkts InMcastPkts OutMcastPkts InBcastPkts OutBcastPkts InOctets OutOctets InMcastOctets OutMcastOctets InBcastOctets OutBcastOctets IpExt: 0 0 0 0 0 0 6286396970 2786264347 0 0 0 0 diff --git a/collector/fixtures/proc/net/rpc/nfs b/collector/fixtures/proc/net/rpc/nfs index ba2efd3d3d..9f330abaa4 100644 --- a/collector/fixtures/proc/net/rpc/nfs +++ b/collector/fixtures/proc/net/rpc/nfs @@ -2,4 +2,4 @@ net 70 70 69 45 rpc 1218785755 374636 1218815394 proc2 18 16 57 74 52 71 73 45 86 0 52 83 61 17 53 50 23 70 82 proc3 22 0 1061909262 48906 4077635 117661341 5 29391916 2570425 2993289 590 0 0 7815 15 1130 0 3983 92385 13332 2 1 23729 -proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 84 15 53 86 54 66 56 97 36 49 32 85 81 11 58 32 67 13 28 35 90 1 26 0 +proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 84 15 53 86 54 66 56 97 36 49 32 85 81 11 58 32 67 13 28 35 1 90 26 0 diff --git a/collector/fixtures/proc/net/rpc/nfsd b/collector/fixtures/proc/net/rpc/nfsd index 754f19d9d5..6a092bfd30 100644 --- a/collector/fixtures/proc/net/rpc/nfsd +++ b/collector/fixtures/proc/net/rpc/nfsd @@ -9,3 +9,4 @@ proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 proc4 2 2 10853 proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +wdeleg_getattr 15 diff --git a/collector/fixtures/proc/net/stat/arp_cache b/collector/fixtures/proc/net/stat/arp_cache new file mode 100644 index 0000000000..3176ca6134 --- /dev/null +++ b/collector/fixtures/proc/net/stat/arp_cache @@ -0,0 +1,3 @@ +entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls +00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c +00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018 diff --git a/collector/fixtures/proc/net/stat/ndisc_cache b/collector/fixtures/proc/net/stat/ndisc_cache new file mode 100644 index 0000000000..fd4c2f8f49 --- /dev/null +++ b/collector/fixtures/proc/net/stat/ndisc_cache @@ -0,0 +1,3 @@ +entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls +00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb +00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107 diff --git a/collector/fixtures/proc/net/stat/nf_conntrack b/collector/fixtures/proc/net/stat/nf_conntrack new file mode 100644 index 0000000000..c833c40e46 --- /dev/null +++ b/collector/fixtures/proc/net/stat/nf_conntrack @@ -0,0 +1,5 @@ +entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart +00000021 00000000 00000000 00000000 00000003 0000588a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +00000021 00000000 00000000 00000000 00000002 000056a4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000002 +00000021 00000000 00000000 00000000 00000001 000058d4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000001 +00000021 00000000 00000000 00000000 0000002f 00005688 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000004 diff --git a/collector/fixtures/proc/net/tcpstat b/collector/fixtures/proc/net/tcpstat deleted file mode 100644 index 352c00bbf3..0000000000 --- a/collector/fixtures/proc/net/tcpstat +++ /dev/null @@ -1,3 +0,0 @@ - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 00000000:0016 00000000:0000 0A 00000015:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 0F02000A:0016 0202000A:8B6B 01 00000015:00000001 02:000AC99B 00000000 0 0 3652 4 ffff88003d3ae040 21 4 31 47 46 diff --git a/collector/fixtures/proc/net/xfrm_stat b/collector/fixtures/proc/net/xfrm_stat new file mode 100644 index 0000000000..970c3e4ded --- /dev/null +++ b/collector/fixtures/proc/net/xfrm_stat @@ -0,0 +1,28 @@ +XfrmInError 1 +XfrmInBufferError 2 +XfrmInHdrError 4 +XfrmInNoStates 3 +XfrmInStateProtoError 40 +XfrmInStateModeError 100 +XfrmInStateSeqError 6000 +XfrmInStateExpired 7 +XfrmInStateMismatch 23451 +XfrmInStateInvalid 55555 +XfrmInTmplMismatch 51 +XfrmInNoPols 65432 +XfrmInPolBlock 100 +XfrmInPolError 10000 +XfrmOutError 1000000 +XfrmOutBundleGenError 43321 +XfrmOutBundleCheckError 555 +XfrmOutNoStates 869 +XfrmOutStateProtoError 4542 +XfrmOutStateModeError 8 +XfrmOutStateSeqError 543 +XfrmOutStateExpired 565 +XfrmOutPolBlock 43456 +XfrmOutPolDead 7656 +XfrmOutPolError 1454 +XfrmFwdHdrError 6654 +XfrmOutStateInvalid 28765 +XfrmAcquireError 24532 \ No newline at end of file diff --git a/collector/fixtures/proc/pressure/cpu b/collector/fixtures/proc/pressure/cpu index 14acc3a325..0aaced93da 100644 --- a/collector/fixtures/proc/pressure/cpu +++ b/collector/fixtures/proc/pressure/cpu @@ -1 +1,2 @@ some avg10=0.00 avg60=0.00 avg300=0.00 total=14036781 +full avg10=0.00 avg60=0.00 avg300=0.00 total=0 diff --git a/collector/fixtures/proc/pressure/irq b/collector/fixtures/proc/pressure/irq new file mode 100644 index 0000000000..76059c7572 --- /dev/null +++ b/collector/fixtures/proc/pressure/irq @@ -0,0 +1 @@ +full avg10=0.00 avg60=0.00 avg300=0.00 total=8494 \ No newline at end of file diff --git a/collector/fixtures/proc/slabinfo b/collector/fixtures/proc/slabinfo new file mode 100644 index 0000000000..8f2de4ba11 --- /dev/null +++ b/collector/fixtures/proc/slabinfo @@ -0,0 +1,6 @@ +slabinfo - version: 2.1 +# name : tunables : slabdata +tw_sock_TCP 704 864 256 32 2 : tunables 0 0 0 : slabdata 27 27 0 +dmaengine-unmap-128 1206 1320 1088 30 8 : tunables 0 0 0 : slabdata 44 44 0 +kmalloc-8192 132 148 8192 4 8 : tunables 0 0 0 : slabdata 37 37 0 +kmem_cache 320 320 256 32 2 : tunables 0 0 0 : slabdata 10 10 0 diff --git a/collector/fixtures/proc/softirqs b/collector/fixtures/proc/softirqs new file mode 100644 index 0000000000..a1dfef5ffe --- /dev/null +++ b/collector/fixtures/proc/softirqs @@ -0,0 +1,11 @@ + CPU0 CPU1 + HI: 7 1 + TIMER: 424191 108342 + NET_TX: 2301 2430 + NET_RX: 43066 104508 + BLOCK: 23776 24115 + IRQ_POLL: 0 0 + TASKLET: 372 1899 + SCHED: 378895 152852 + HRTIMER: 40 346 + RCU: 155929 146631 diff --git a/collector/fixtures/proc/spl/kstat/zfs/arcstats b/collector/fixtures/proc/spl/kstat/zfs/arcstats index 48a73a2c5a..1dbeed6f37 100644 --- a/collector/fixtures/proc/spl/kstat/zfs/arcstats +++ b/collector/fixtures/proc/spl/kstat/zfs/arcstats @@ -1,93 +1,94 @@ 6 1 0x01 91 4368 5266997922 97951858082072 name type data -hits 4 8772612 -misses 4 604635 +anon_evictable_data 4 0 +anon_evictable_metadata 4 0 +anon_size 4 1917440 +arc_loaned_bytes 4 0 +arc_meta_limit 4 6275982336 +arc_meta_max 4 449286096 +arc_meta_min 4 16777216 +arc_meta_used 4 308103632 +arc_need_free 4 0 +arc_no_grow 4 0 +arc_prune 4 0 +arc_sys_free 4 261496832 +arc_tempreserve 4 0 +c 4 1643208777 +c_max 4 8367976448 +c_min 4 33554432 +data_size 4 1295836160 +deleted 4 60403 demand_data_hits 4 7221032 demand_data_misses 4 73300 demand_metadata_hits 4 1464353 demand_metadata_misses 4 498170 -prefetch_data_hits 4 3615 -prefetch_data_misses 4 17094 -prefetch_metadata_hits 4 83612 -prefetch_metadata_misses 4 16071 -mru_hits 4 855535 -mru_ghost_hits 4 21100 -mfu_hits 4 7829854 -mfu_ghost_hits 4 821 -deleted 4 60403 -mutex_miss 4 2 -evict_skip 4 2265729 -evict_not_enough 4 680 +duplicate_buffers 4 0 +duplicate_buffers_size 4 0 +duplicate_reads 4 0 evict_l2_cached 4 0 evict_l2_eligible 4 8992514560 evict_l2_ineligible 4 992552448 evict_l2_skip 4 0 +evict_not_enough 4 680 +evict_skip 4 2265729 +hash_chain_max 4 3 +hash_chains 4 412 +hash_collisions 4 50564 hash_elements 4 42359 hash_elements_max 4 88245 -hash_collisions 4 50564 -hash_chains 4 412 -hash_chain_max 4 3 -p 4 516395305 -c 4 1643208777 -c_min 4 33554432 -c_max 4 8367976448 -size 4 1603939792 hdr_size 4 16361080 -data_size 4 1295836160 -metadata_size 4 175298560 -other_size 4 116443992 -anon_size 4 1917440 -anon_evictable_data 4 0 -anon_evictable_metadata 4 0 -mru_size 4 402593792 -mru_evictable_data 4 278091264 -mru_evictable_metadata 4 18606592 -mru_ghost_size 4 999728128 -mru_ghost_evictable_data 4 883765248 -mru_ghost_evictable_metadata 4 115962880 -mfu_size 4 1066623488 -mfu_evictable_data 4 1017613824 -mfu_evictable_metadata 4 9163776 -mfu_ghost_size 4 104936448 -mfu_ghost_evictable_data 4 96731136 -mfu_ghost_evictable_metadata 4 8205312 +hits 4 8772612 +l2_abort_lowmem 4 0 +l2_asize 4 0 +l2_cdata_free_on_write 4 0 +l2_cksum_bad 4 0 +l2_compress_failures 4 0 +l2_compress_successes 4 0 +l2_compress_zeros 4 0 +l2_evict_l1cached 4 0 +l2_evict_lock_retry 4 0 +l2_evict_reading 4 0 +l2_feeds 4 0 +l2_free_on_write 4 0 +l2_hdr_size 4 0 l2_hits 4 0 +l2_io_error 4 0 l2_misses 4 0 -l2_feeds 4 0 -l2_rw_clash 4 0 l2_read_bytes 4 0 +l2_rw_clash 4 0 +l2_size 4 0 l2_write_bytes 4 0 -l2_writes_sent 4 0 l2_writes_done 4 0 l2_writes_error 4 0 l2_writes_lock_retry 4 0 -l2_evict_lock_retry 4 0 -l2_evict_reading 4 0 -l2_evict_l1cached 4 0 -l2_free_on_write 4 0 -l2_cdata_free_on_write 4 0 -l2_abort_lowmem 4 0 -l2_cksum_bad 4 0 -l2_io_error 4 0 -l2_size 4 0 -l2_asize 4 0 -l2_hdr_size 4 0 -l2_compress_successes 4 0 -l2_compress_zeros 4 0 -l2_compress_failures 4 0 -memory_throttle_count 4 0 -duplicate_buffers 4 0 -duplicate_buffers_size 4 0 -duplicate_reads 4 0 +l2_writes_sent 4 0 +memory_available_bytes 3 -922337203685477580 memory_direct_count 4 542 memory_indirect_count 4 3006 -arc_no_grow 4 0 -arc_tempreserve 4 0 -arc_loaned_bytes 4 0 -arc_prune 4 0 -arc_meta_used 4 308103632 -arc_meta_limit 4 6275982336 -arc_meta_max 4 449286096 -arc_meta_min 4 16777216 -arc_need_free 4 0 -arc_sys_free 4 261496832 +memory_throttle_count 4 0 +metadata_size 4 175298560 +mfu_evictable_data 4 1017613824 +mfu_evictable_metadata 4 9163776 +mfu_ghost_evictable_data 4 96731136 +mfu_ghost_evictable_metadata 4 8205312 +mfu_ghost_hits 4 821 +mfu_ghost_size 4 104936448 +mfu_hits 4 7829854 +mfu_size 4 1066623488 +misses 4 604635 +mru_evictable_data 4 278091264 +mru_evictable_metadata 4 18606592 +mru_ghost_evictable_data 4 883765248 +mru_ghost_evictable_metadata 4 115962880 +mru_ghost_hits 4 21100 +mru_ghost_size 4 999728128 +mru_hits 4 855535 +mru_size 4 402593792 +mutex_miss 4 2 +other_size 4 116443992 +p 4 516395305 +prefetch_data_hits 4 3615 +prefetch_data_misses 4 17094 +prefetch_metadata_hits 4 83612 +prefetch_metadata_misses 4 16071 +size 4 1603939792 diff --git a/collector/fixtures/proc/spl/kstat/zfs/dbuf_stats b/collector/fixtures/proc/spl/kstat/zfs/dbufstats similarity index 100% rename from collector/fixtures/proc/spl/kstat/zfs/dbuf_stats rename to collector/fixtures/proc/spl/kstat/zfs/dbufstats diff --git a/collector/fixtures/proc/spl/kstat/zfs/pool1/state b/collector/fixtures/proc/spl/kstat/zfs/pool1/state new file mode 100644 index 0000000000..1424865cf5 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/pool1/state @@ -0,0 +1 @@ +ONLINE diff --git a/collector/fixtures/proc/spl/kstat/zfs/pool2/state b/collector/fixtures/proc/spl/kstat/zfs/pool2/state new file mode 100644 index 0000000000..c3ddd0e661 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/pool2/state @@ -0,0 +1 @@ +SUSPENDED diff --git a/collector/fixtures/proc/spl/kstat/zfs/pool3/io b/collector/fixtures/proc/spl/kstat/zfs/pool3/io new file mode 100644 index 0000000000..ef2a58fe7f --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/pool3/io @@ -0,0 +1,3 @@ +12 3 0x00 1 80 79205351707403 395818011156865 +nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt +1884160 3206144 22 132 7155162 104112268 79210489694949 24168078 104112268 79210489849220 0 0 diff --git a/collector/fixtures/proc/spl/kstat/zfs/pool3/objset-1 b/collector/fixtures/proc/spl/kstat/zfs/pool3/objset-1 new file mode 100644 index 0000000000..9799d262a7 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/pool3/objset-1 @@ -0,0 +1,9 @@ +23 1 0x01 7 2160 221578688875 6665999035587 +name type data +dataset_name 7 pool3 +writes 4 0 +nwritten 4 0 +reads 4 0 +nread 4 0 +nunlinks 4 0 +nunlinked 4 0 diff --git a/collector/fixtures/proc/spl/kstat/zfs/pool3/objset-2 b/collector/fixtures/proc/spl/kstat/zfs/pool3/objset-2 new file mode 100644 index 0000000000..e395504b3a --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/pool3/objset-2 @@ -0,0 +1,9 @@ +24 1 0x01 7 2160 221611904716 7145015038451 +name type data +dataset_name 7 pool3/dataset with space +writes 4 4 +nwritten 4 12302 +reads 4 2 +nread 4 28 +nunlinks 4 3 +nunlinked 4 3 diff --git a/collector/fixtures/proc/spl/kstat/zfs/pool3/state b/collector/fixtures/proc/spl/kstat/zfs/pool3/state new file mode 100644 index 0000000000..1424865cf5 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/pool3/state @@ -0,0 +1 @@ +ONLINE diff --git a/collector/fixtures/proc/spl/kstat/zfs/poolz1/state b/collector/fixtures/proc/spl/kstat/zfs/poolz1/state new file mode 100644 index 0000000000..be5b2ef03f --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/poolz1/state @@ -0,0 +1 @@ +DEGRADED diff --git a/collector/fixtures/proc/swaps b/collector/fixtures/proc/swaps new file mode 100644 index 0000000000..c0fbf7787b --- /dev/null +++ b/collector/fixtures/proc/swaps @@ -0,0 +1,2 @@ +Filename Type Size Used Priority +/dev/zram0 partition 8388604 76 100 diff --git a/collector/fixtures/proc/sys/kernel/random/poolsize b/collector/fixtures/proc/sys/kernel/random/poolsize new file mode 100644 index 0000000000..801c306ed3 --- /dev/null +++ b/collector/fixtures/proc/sys/kernel/random/poolsize @@ -0,0 +1 @@ +4096 diff --git a/collector/fixtures/proc/sys/kernel/seccomp/actions_avail b/collector/fixtures/proc/sys/kernel/seccomp/actions_avail new file mode 100644 index 0000000000..a608f9fd07 --- /dev/null +++ b/collector/fixtures/proc/sys/kernel/seccomp/actions_avail @@ -0,0 +1 @@ +kill_process kill_thread trap errno user_notif trace log allow diff --git a/collector/fixtures/proc/zoneinfo b/collector/fixtures/proc/zoneinfo new file mode 100644 index 0000000000..82ff3d6315 --- /dev/null +++ b/collector/fixtures/proc/zoneinfo @@ -0,0 +1,265 @@ +Node 0, zone DMA + per-node stats + nr_inactive_anon 95612 + nr_active_anon 1175853 + nr_inactive_file 723339 + nr_active_file 688810 + nr_unevictable 213111 + nr_slab_reclaimable 121763 + nr_slab_unreclaimable 56182 + nr_isolated_anon 0 + nr_isolated_file 0 + workingset_nodes 0 + workingset_refault 0 + workingset_activate 0 + workingset_restore 0 + workingset_nodereclaim 0 + nr_anon_pages 1156608 + nr_mapped 423143 + nr_file_pages 1740118 + nr_dirty 103 + nr_writeback 0 + nr_writeback_temp 0 + nr_shmem 330517 + nr_shmem_hugepages 0 + nr_shmem_pmdmapped 0 + nr_file_hugepages 0 + nr_file_pmdmapped 0 + nr_anon_transparent_hugepages 0 + nr_vmscan_write 0 + nr_vmscan_immediate_reclaim 0 + nr_dirtied 1189097 + nr_written 1181554 + nr_kernel_misc_reclaimable 0 + nr_foll_pin_acquired 3 + nr_foll_pin_released 3 + pages free 2949 + min 8 + low 11 + high 14 + spanned 4095 + present 3997 + managed 3973 + protection: (0, 2039, 31932, 31932, 31932) + nr_free_pages 2949 + nr_zone_inactive_anon 0 + nr_zone_active_anon 0 + nr_zone_inactive_file 0 + nr_zone_active_file 0 + nr_zone_unevictable 0 + nr_zone_write_pending 0 + nr_mlock 0 + nr_page_table_pages 0 + nr_kernel_stack 0 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 1 + numa_miss 0 + numa_foreign 0 + numa_interleave 1 + numa_local 1 + numa_other 0 + pagesets + cpu: 0 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 1 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 2 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 3 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 4 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 5 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 6 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 7 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + node_unreclaimable: 0 + start_pfn: 1 +Node 0, zone DMA32 + pages free 528427 + min 1078 + low 1600 + high 2122 + spanned 1044480 + present 546847 + managed 530339 + protection: (0, 0, 29893, 29893, 29893) + nr_free_pages 528427 + nr_zone_inactive_anon 0 + nr_zone_active_anon 0 + nr_zone_inactive_file 0 + nr_zone_active_file 0 + nr_zone_unevictable 0 + nr_zone_write_pending 0 + nr_mlock 0 + nr_page_table_pages 0 + nr_kernel_stack 0 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 13 + numa_miss 0 + numa_foreign 0 + numa_interleave 1 + numa_local 13 + numa_other 0 + pagesets + cpu: 0 + count: 357 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 1 + count: 0 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 2 + count: 338 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 3 + count: 0 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 4 + count: 62 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 5 + count: 63 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 6 + count: 0 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 7 + count: 63 + high: 378 + batch: 63 + vm stats threshold: 48 + node_unreclaimable: 0 + start_pfn: 4096 +Node 0, zone Normal + pages free 4539739 + min 15809 + low 23461 + high 31113 + spanned 7806976 + present 7806976 + managed 7654794 + protection: (0, 0, 0, 0, 0) + nr_free_pages 4539739 + nr_zone_inactive_anon 95612 + nr_zone_active_anon 1175853 + nr_zone_inactive_file 723339 + nr_zone_active_file 688810 + nr_zone_unevictable 213111 + nr_zone_write_pending 103 + nr_mlock 12 + nr_page_table_pages 13921 + nr_kernel_stack 18864 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 62836441 + numa_miss 0 + numa_foreign 0 + numa_interleave 23174 + numa_local 62836441 + numa_other 0 + pagesets + cpu: 0 + count: 351 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 1 + count: 112 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 2 + count: 368 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 3 + count: 358 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 4 + count: 304 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 5 + count: 112 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 6 + count: 488 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 7 + count: 342 + high: 378 + batch: 63 + vm stats threshold: 72 + node_unreclaimable: 0 + start_pfn: 1048576 +Node 0, zone Movable + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Node 0, zone Device + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) \ No newline at end of file diff --git a/collector/fixtures/sys.ttar b/collector/fixtures/sys.ttar index 3e52c60957..345cbcd2e3 100644 --- a/collector/fixtures/sys.ttar +++ b/collector/fixtures/sys.ttar @@ -2,1815 +2,8091 @@ Directory: sys Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus +Directory: sys/block Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu +Directory: sys/block/md0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices +Directory: sys/block/md0/md Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu0 -SymlinkTo: ../../../devices/system/cpu/cpu0 +Path: sys/block/md0/md/array_state +Lines: 1 +clean +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu1 -SymlinkTo: ../../../devices/system/cpu/cpu1 +Path: sys/block/md0/md/chunk_size +Lines: 1 +524288 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu2 -SymlinkTo: ../../../devices/system/cpu/cpu2 +Directory: sys/block/md0/md/dev-sdg +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu3 -SymlinkTo: ../../../devices/system/cpu/cpu3 +Path: sys/block/md0/md/dev-sdg/state +Lines: 1 +in_sync +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/node +Directory: sys/block/md0/md/dev-sdh Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/node/devices -Mode: 755 +Path: sys/block/md0/md/dev-sdh/state +Lines: 1 +in_sync +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/node/devices/node0 -SymlinkTo: ../../../devices/system/node/node0 +Path: sys/block/md0/md/level +Lines: 1 +raid0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/node/devices/node1 -SymlinkTo: ../../../devices/system/node/node1 +Path: sys/block/md0/md/metadata_version +Lines: 1 +1.2 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class -Mode: 755 +Path: sys/block/md0/md/raid_disks +Lines: 1 +2 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/hwmon +Directory: sys/block/md0/md/rd0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon0 -SymlinkTo: ../../devices/platform/coretemp.0/hwmon/hwmon0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon1 -SymlinkTo: ../../devices/platform/coretemp.1/hwmon/hwmon1 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon2 -SymlinkTo: ../../devices/platform/applesmc.768/hwmon/hwmon2 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon3 -SymlinkTo: ../../devices/platform/nct6775.656/hwmon/hwmon3 +Path: sys/block/md0/md/rd0/state +Lines: 1 +in_sync +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/hwmon/hwmon4 +Directory: sys/block/md0/md/rd1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp1_crit +Path: sys/block/md0/md/rd1/state Lines: 1 -100000 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp1_crit_alarm +Path: sys/block/md0/md/uuid Lines: 1 -0 +155f29ff-1716-4107-b362-52307ef86cac Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp1_input +Directory: sys/block/md1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/block/md1/md +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md1/md/array_state Lines: 1 -55000 +clean Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp1_label +Path: sys/block/md1/md/chunk_size Lines: 1 -foosensor +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp1_max +Path: sys/block/md1/md/degraded Lines: 1 -100000 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp2_crit +Directory: sys/block/md1/md/dev-sdi +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md1/md/dev-sdi/state Lines: 1 -100000 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp2_crit_alarm +Directory: sys/block/md1/md/dev-sdj +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md1/md/dev-sdj/state Lines: 1 -0 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp2_input +Path: sys/block/md1/md/level Lines: 1 -54000 +raid1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp2_label +Path: sys/block/md1/md/metadata_version Lines: 1 -foosensor +1.2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/hwmon/hwmon4/temp2_max +Path: sys/block/md1/md/raid_disks Lines: 1 -100000 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband +Directory: sys/block/md1/md/rd0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/i40iw0 +Path: sys/block/md1/md/rd0/state +Lines: 1 +in_sync +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/block/md1/md/rd1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/board_id +Path: sys/block/md1/md/rd1/state Lines: 1 -I40IW Board ID +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/fw_ver +Path: sys/block/md1/md/sync_action Lines: 1 -0.2 +idle Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/hca_type +Path: sys/block/md1/md/sync_completed Lines: 1 -I40IW +none Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/i40iw0/ports -Mode: 755 +Path: sys/block/md1/md/uuid +Lines: 1 +0fbf5f2c-add2-43c2-bd78-a4be3ab709ef +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/i40iw0/ports/1 +Directory: sys/block/md10 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/i40iw0/ports/1/counters +Directory: sys/block/md10/md Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/VL15_dropped +Path: sys/block/md10/md/array_state Lines: 1 -N/A (no PMA) +clean Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/excessive_buffer_overrun_errors +Path: sys/block/md10/md/chunk_size Lines: 1 -N/A (no PMA) +524288 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/link_downed +Path: sys/block/md10/md/degraded Lines: 1 -N/A (no PMA) +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/link_error_recovery -Lines: 1 -N/A (no PMA) -Mode: 644 +Directory: sys/block/md10/md/dev-sdu +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/local_link_integrity_errors +Path: sys/block/md10/md/dev-sdu/state Lines: 1 -N/A (no PMA) +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -N/A (no PMA) -Mode: 644 +Directory: sys/block/md10/md/dev-sdv +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_data +Path: sys/block/md10/md/dev-sdv/state Lines: 1 -N/A (no PMA) +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_errors -Lines: 1 -N/A (no PMA) -Mode: 644 +Directory: sys/block/md10/md/dev-sdw +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_packets +Path: sys/block/md10/md/dev-sdw/state Lines: 1 -N/A (no PMA) +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_remote_physical_errors +Directory: sys/block/md10/md/dev-sdx +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md10/md/dev-sdx/state Lines: 1 -N/A (no PMA) +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_switch_relay_errors +Path: sys/block/md10/md/level Lines: 1 -N/A (no PMA) +raid10 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_constraint_errors +Path: sys/block/md10/md/metadata_version Lines: 1 -N/A (no PMA) +1.2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_data +Path: sys/block/md10/md/raid_disks Lines: 1 -N/A (no PMA) +4 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_discards +Directory: sys/block/md10/md/rd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md10/md/rd0/state Lines: 1 -N/A (no PMA) +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_packets +Directory: sys/block/md10/md/rd1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md10/md/rd1/state Lines: 1 -N/A (no PMA) +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_wait +Directory: sys/block/md10/md/rd2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md10/md/rd2/state Lines: 1 -N/A (no PMA) +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/counters/symbol_error +Directory: sys/block/md10/md/rd3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md10/md/rd3/state Lines: 1 -N/A (no PMA) +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/phys_state +Path: sys/block/md10/md/sync_action Lines: 1 -5: LinkUp +idle Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/rate +Path: sys/block/md10/md/sync_completed Lines: 1 -10 Gb/sec (4X) +none Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/i40iw0/ports/1/state +Path: sys/block/md10/md/uuid Lines: 1 -4: ACTIVE +0c15f7e7-b159-4b1f-a5cd-a79b5c04b6f5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/mlx4_0 +Directory: sys/block/md4 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/board_id +Directory: sys/block/md4/md +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md4/md/array_state Lines: 1 -SM_1141000001000 +clean Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/fw_ver +Path: sys/block/md4/md/chunk_size Lines: 1 -2.31.5050 +524288 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/hca_type +Path: sys/block/md4/md/degraded Lines: 1 -MT4099 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/mlx4_0/ports +Directory: sys/block/md4/md/dev-sdk Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/mlx4_0/ports/1 -Mode: 755 +Path: sys/block/md4/md/dev-sdk/state +Lines: 1 +in_sync +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/mlx4_0/ports/1/counters +Directory: sys/block/md4/md/dev-sdl Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Path: sys/block/md4/md/dev-sdl/state Lines: 1 -0 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery +Directory: sys/block/md4/md/dev-sdm +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md4/md/dev-sdm/state Lines: 1 -0 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/multicast_rcv_packets +Path: sys/block/md4/md/level Lines: 1 -93 +raid4 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/multicast_xmit_packets +Path: sys/block/md4/md/metadata_version Lines: 1 -16 +1.2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Path: sys/block/md4/md/raid_disks Lines: 1 -0 +3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data +Directory: sys/block/md4/md/rd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md4/md/rd0/state Lines: 1 -4631917 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_discards +Directory: sys/block/md4/md/rd1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md4/md/rd1/state Lines: 1 -0 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Directory: sys/block/md4/md/rd2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md4/md/rd2/state Lines: 1 -0 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Path: sys/block/md4/md/sync_action Lines: 1 -6825908347 +idle Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Path: sys/block/md4/md/sync_completed Lines: 1 -0 +none Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data +Path: sys/block/md4/md/uuid Lines: 1 -3733440 +67f415d5-2c0c-4b69-8e0d-7e20ef553457 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Directory: sys/block/md5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/block/md5/md +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md5/md/array_state Lines: 1 -5 +clean Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Path: sys/block/md5/md/chunk_size Lines: 1 -6235865 +524288 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Path: sys/block/md5/md/degraded Lines: 1 -4294967295 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/unicast_rcv_packets +Directory: sys/block/md5/md/dev-sdaa +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md5/md/dev-sdaa/state Lines: 1 -61148 +spare Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters/unicast_xmit_packets +Directory: sys/block/md5/md/dev-sdn +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md5/md/dev-sdn/state Lines: 1 -61239 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/mlx4_0/ports/1/counters_ext +Directory: sys/block/md5/md/dev-sdo Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_multicast_rcv_packets +Path: sys/block/md5/md/dev-sdo/state Lines: 1 -93 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_multicast_xmit_packets +Directory: sys/block/md5/md/dev-sdp +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md5/md/dev-sdp/state Lines: 1 -16 +faulty Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_rcv_data_64 +Path: sys/block/md5/md/level Lines: 1 -4631917 +raid5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_rcv_packets_64 +Path: sys/block/md5/md/metadata_version Lines: 1 -0 +1.2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_unicast_rcv_packets +Path: sys/block/md5/md/raid_disks Lines: 1 -61148 +3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_unicast_xmit_packets +Directory: sys/block/md5/md/rd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md5/md/rd0/state Lines: 1 -61239 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_xmit_data_64 +Directory: sys/block/md5/md/rd1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md5/md/rd1/state Lines: 1 -3733440 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_xmit_packets_64 +Directory: sys/block/md5/md/rd2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md5/md/rd2/state Lines: 1 -0 +faulty Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/phys_state +Path: sys/block/md5/md/sync_action Lines: 1 -5: LinkUp +idle Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/rate +Path: sys/block/md5/md/sync_completed Lines: 1 -40 Gb/sec (4X QDR) +none Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/1/state +Path: sys/block/md5/md/uuid Lines: 1 -4: ACTIVE +7615b98d-f2ba-4d99-bee8-6202d8e130b9 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/mlx4_0/ports/2 +Directory: sys/block/md6 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/mlx4_0/ports/2/counters +Directory: sys/block/md6/md Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Path: sys/block/md6/md/array_state Lines: 1 -0 +active Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery +Path: sys/block/md6/md/chunk_size Lines: 1 -0 +524288 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters/multicast_rcv_packets +Path: sys/block/md6/md/degraded Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters/multicast_xmit_packets -Lines: 1 -0 -Mode: 644 +Directory: sys/block/md6/md/dev-sdq +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data +Path: sys/block/md6/md/dev-sdq/state Lines: 1 -0 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data -Lines: 1 -0 -Mode: 644 +Directory: sys/block/md6/md/dev-sdr +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters/unicast_rcv_packets +Path: sys/block/md6/md/dev-sdr/state Lines: 1 -0 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters/unicast_xmit_packets +Directory: sys/block/md6/md/dev-sds +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md6/md/dev-sds/state Lines: 1 -0 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/infiniband/mlx4_0/ports/2/counters_ext +Directory: sys/block/md6/md/dev-sdt Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_multicast_rcv_packets +Path: sys/block/md6/md/dev-sdt/state Lines: 1 -93 +spare Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_multicast_xmit_packets +Path: sys/block/md6/md/level Lines: 1 -16 +raid6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_rcv_data_64 +Path: sys/block/md6/md/metadata_version Lines: 1 -4631917 +1.2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_rcv_packets_64 +Path: sys/block/md6/md/raid_disks Lines: 1 -0 +4 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_unicast_rcv_packets +Directory: sys/block/md6/md/rd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md6/md/rd0/state Lines: 1 -61148 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_unicast_xmit_packets +Directory: sys/block/md6/md/rd1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md6/md/rd1/state Lines: 1 -61239 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_xmit_data_64 +Directory: sys/block/md6/md/rd2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md6/md/rd2/state Lines: 1 -3733440 +in_sync Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_xmit_packets_64 +Directory: sys/block/md6/md/rd3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/block/md6/md/rd3/state Lines: 1 -0 +spare Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/phys_state +Path: sys/block/md6/md/sync_action Lines: 1 -5: LinkUp +recover Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/rate +Path: sys/block/md6/md/sync_completed Lines: 1 -40 Gb/sec (4X QDR) +1569888 / 2093056 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/infiniband/mlx4_0/ports/2/state +Path: sys/block/md6/md/uuid Lines: 1 -4: ACTIVE +5f529b25-6efd-46e4-99a2-31f6f597be6b Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net +Directory: sys/block/sda Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/bond0 +Directory: sys/block/sda/queue Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/addr_assign_type +Path: sys/block/sda/queue/add_random Lines: 1 -3 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/addr_len +Path: sys/block/sda/queue/chunk_sectors Lines: 1 -6 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/address +Path: sys/block/sda/queue/dax Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/bond0/bonding -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/bonding/slaves -Lines: 0 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/broadcast +Path: sys/block/sda/queue/discard_granularity Lines: 1 -ff:ff:ff:ff:ff:ff +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/carrier +Path: sys/block/sda/queue/discard_max_bytes Lines: 1 -1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/carrier_changes +Path: sys/block/sda/queue/discard_max_hw_bytes Lines: 1 -2 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/carrier_down_count +Path: sys/block/sda/queue/discard_zeroes_data Lines: 1 -1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/carrier_up_count +Path: sys/block/sda/queue/fua Lines: 1 -1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/dev_id +Path: sys/block/sda/queue/hw_sector_size Lines: 1 -0x20 +512 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/dormant +Path: sys/block/sda/queue/io_poll Lines: 1 -1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/duplex +Path: sys/block/sda/queue/io_poll_delay Lines: 1 -full +-1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/flags +Path: sys/block/sda/queue/io_timeout Lines: 1 -0x1303 +30000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/ifalias -Lines: 0 -Mode: 644 +Directory: sys/block/sda/queue/iosched +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/ifindex +Path: sys/block/sda/queue/iosched/back_seek_max Lines: 1 -2 +16384 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/iflink +Path: sys/block/sda/queue/iosched/back_seek_penalty Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/link_mode +Path: sys/block/sda/queue/iosched/fifo_expire_async Lines: 1 -1 +250 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/mtu +Path: sys/block/sda/queue/iosched/fifo_expire_sync Lines: 1 -1500 +125 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/name_assign_type +Path: sys/block/sda/queue/iosched/low_latency Lines: 1 -2 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/netdev_group +Path: sys/block/sda/queue/iosched/max_budget Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/operstate +Path: sys/block/sda/queue/iosched/slice_idle Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/phys_port_name -Lines: 0 +8 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/phys_switch_id -Lines: 0 +Path: sys/block/sda/queue/iosched/slice_idle_us +Lines: 1 +8000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/speed +Path: sys/block/sda/queue/iosched/strict_guarantees Lines: 1 -1000 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/tx_queue_len +Path: sys/block/sda/queue/iosched/timeout_sync Lines: 1 -1000 +125 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bond0/type +Path: sys/block/sda/queue/iostats Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bonding_masters +Path: sys/block/sda/queue/logical_block_size Lines: 1 -bond0 dmz int +512 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/addr_assign_type +Path: sys/block/sda/queue/max_discard_segments Lines: 1 -3 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/addr_len +Path: sys/block/sda/queue/max_hw_sectors_kb Lines: 1 -6 +32767 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/address +Path: sys/block/sda/queue/max_integrity_segments Lines: 1 -01:01:01:01:01:01 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz/bonding -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/bonding/slaves +Path: sys/block/sda/queue/max_sectors_kb Lines: 1 -eth0 eth4 +1280 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/broadcast +Path: sys/block/sda/queue/max_segment_size Lines: 1 -ff:ff:ff:ff:ff:ff +65536 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/carrier +Path: sys/block/sda/queue/max_segments Lines: 1 -1 +168 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/carrier_changes +Path: sys/block/sda/queue/minimum_io_size Lines: 1 -2 +512 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/carrier_down_count +Path: sys/block/sda/queue/nomerges Lines: 1 -1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/carrier_up_count +Path: sys/block/sda/queue/nr_requests Lines: 1 -1 +64 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/dev_id +Path: sys/block/sda/queue/nr_zones Lines: 1 -0x20 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/dormant +Path: sys/block/sda/queue/optimal_io_size Lines: 1 -1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/duplex +Path: sys/block/sda/queue/physical_block_size Lines: 1 -full +512 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/flags +Path: sys/block/sda/queue/read_ahead_kb Lines: 1 -0x1303 +128 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/ifalias -Lines: 0 +Path: sys/block/sda/queue/rotational +Lines: 1 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/ifindex +Path: sys/block/sda/queue/rq_affinity Lines: 1 -2 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/iflink +Path: sys/block/sda/queue/scheduler Lines: 1 -2 +mq-deadline kyber [bfq] none Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/link_mode +Path: sys/block/sda/queue/wbt_lat_usec Lines: 1 -1 +75000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/mtu +Path: sys/block/sda/queue/write_cache Lines: 1 -1500 +write back Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/name_assign_type +Path: sys/block/sda/queue/write_same_max_bytes Lines: 1 -2 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/netdev_group +Path: sys/block/sda/queue/write_zeroes_max_bytes Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/operstate +Path: sys/block/sda/queue/zoned Lines: 1 -up +none Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/phys_port_id -Lines: 0 -Mode: 644 +Directory: sys/bus +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/phys_port_name -Lines: 0 -Mode: 644 +Directory: sys/bus/cpu +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/phys_switch_id -Lines: 0 -Mode: 644 +Directory: sys/bus/cpu/devices +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz/slave_eth0 +Path: sys/bus/cpu/devices/cpu0 +SymlinkTo: ../../../devices/system/cpu/cpu0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/bus/cpu/devices/cpu1 +SymlinkTo: ../../../devices/system/cpu/cpu1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/bus/cpu/devices/cpu2 +SymlinkTo: ../../../devices/system/cpu/cpu2 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/bus/cpu/devices/cpu3 +SymlinkTo: ../../../devices/system/cpu/cpu3 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/bus/node Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz/slave_eth0/bonding_slave +Directory: sys/bus/node/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/slave_eth0/bonding_slave/mii_status -Lines: 1 -up -Mode: 644 +Path: sys/bus/node/devices/node0 +SymlinkTo: ../../../devices/system/node/node0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/slave_eth0/operstate -Lines: 1 -up -Mode: 644 +Path: sys/bus/node/devices/node1 +SymlinkTo: ../../../devices/system/node/node1 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz/slave_eth4 +Directory: sys/bus/pci Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz/slave_eth4/bonding_slave +Directory: sys/bus/pci/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/slave_eth4/bonding_slave/mii_status -Lines: 1 -up -Mode: 644 +Path: sys/bus/pci/devices/0000:00:02.1 +SymlinkTo: ../../../devices/pci0000:00/0000:00:02.1 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/slave_eth4/operstate -Lines: 1 -up -Mode: 644 +Path: sys/bus/pci/devices/0000:01:00.0 +SymlinkTo: ../../../devices/pci0000:00/0000:00:02.1/0000:01:00.0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/speed +Path: sys/bus/pci/devices/0000:45:00.0 +SymlinkTo: ../../../devices/pci0000:40/0000:40:01.3/0000:45:00.0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/dmi +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/dmi/id +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/bios_date Lines: 1 -1000 -Mode: 644 +04/12/2021 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/tx_queue_len +Path: sys/class/dmi/id/bios_release Lines: 1 -1000 -Mode: 644 +2.2 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/type +Path: sys/class/dmi/id/bios_vendor Lines: 1 -1 -Mode: 644 +Dell Inc. +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/eth0 -SymlinkTo: ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ +Path: sys/class/dmi/id/bios_version +Lines: 1 +2.2.4 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int -Mode: 755 +Path: sys/class/dmi/id/board_name +Lines: 1 +07PXPY +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/addr_assign_type +Path: sys/class/dmi/id/board_serial Lines: 1 -3 -Mode: 644 +.7N62AI2.GRTCL6944100GP. +Mode: 400 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/addr_len +Path: sys/class/dmi/id/board_vendor Lines: 1 -6 -Mode: 644 +Dell Inc. +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/address +Path: sys/class/dmi/id/board_version Lines: 1 -01:01:01:01:01:01 -Mode: 644 +A01 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int/bonding -Mode: 755 +Path: sys/class/dmi/id/chassis_asset_tag +Lines: 1 + +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/bonding/slaves +Path: sys/class/dmi/id/chassis_serial Lines: 1 -eth5 eth1 -Mode: 644 +7N62AI2 +Mode: 400 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/broadcast +Path: sys/class/dmi/id/chassis_type Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 +23 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/carrier +Path: sys/class/dmi/id/chassis_vendor Lines: 1 -1 -Mode: 644 +Dell Inc. +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/carrier_changes +Path: sys/class/dmi/id/chassis_version Lines: 1 -2 -Mode: 644 + +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/carrier_down_count +Path: sys/class/dmi/id/modalias Lines: 1 -1 -Mode: 644 +dmi:bvnDellInc.:bvr2.2.4:bd04/12/2021:br2.2:svnDellInc.:pnPowerEdgeR6515:pvr:rvnDellInc.:rn07PXPY:rvrA01:cvnDellInc.:ct23:cvr: +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/carrier_up_count +Path: sys/class/dmi/id/product_family Lines: 1 -1 -Mode: 644 +PowerEdge +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/dev_id +Path: sys/class/dmi/id/product_name Lines: 1 -0x20 -Mode: 644 +PowerEdge R6515 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/dormant +Path: sys/class/dmi/id/product_serial Lines: 1 -1 -Mode: 644 +7N62AI2 +Mode: 400 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/duplex +Path: sys/class/dmi/id/product_sku Lines: 1 -full -Mode: 644 +SKU=NotProvided;ModelName=PowerEdge R6515 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/flags +Path: sys/class/dmi/id/product_uuid Lines: 1 -0x1303 -Mode: 644 +83340ca8-cb49-4474-8c29-d2088ca84dd9 +Mode: 400 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/ifalias -Lines: 0 -Mode: 644 +Path: sys/class/dmi/id/product_version +Lines: 1 +�[� +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/ifindex +Path: sys/class/dmi/id/sys_vendor Lines: 1 -2 -Mode: 644 +Dell Inc. +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/iflink +Path: sys/class/dmi/id/uevent Lines: 1 -2 +MODALIAS=dmi:bvnDellInc.:bvr2.2.4:bd04/12/2021:br2.2:svnDellInc.:pnPowerEdgeR6515:pvr:rvnDellInc.:rn07PXPY:rvrA01:cvnDellInc.:ct23:cvr: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/link_mode +Directory: sys/class/fc_host +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/fc_host/host0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/fc_host/host0/dev_loss_tmo Lines: 1 -1 +30 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/mtu +Path: sys/class/fc_host/host0/fabric_name Lines: 1 -1500 +0x0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/name_assign_type +Path: sys/class/fc_host/host0/node_name Lines: 1 -2 +0x2000e0071bce95f2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/netdev_group +Path: sys/class/fc_host/host0/port_id Lines: 1 -0 +0x000002 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/operstate +Path: sys/class/fc_host/host0/port_name Lines: 1 -up +0x1000e0071bce95f2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/phys_port_id -Lines: 0 +Path: sys/class/fc_host/host0/port_state +Lines: 1 +Online Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/phys_port_name -Lines: 0 +Path: sys/class/fc_host/host0/port_type +Lines: 1 +Point-To-Point (direct nport connection) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/phys_switch_id -Lines: 0 +Path: sys/class/fc_host/host0/speed +Lines: 1 +16 Gbit Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int/slave_eth1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int/slave_eth1/bonding_slave +Directory: sys/class/fc_host/host0/statistics Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/slave_eth1/bonding_slave/mii_status +Path: sys/class/fc_host/host0/statistics/dumped_frames Lines: 1 -down +0xffffffffffffffff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/slave_eth1/operstate +Path: sys/class/fc_host/host0/statistics/error_frames Lines: 1 -down +0x0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int/slave_eth5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int/slave_eth5/bonding_slave -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/slave_eth5/bonding_slave/mii_status +Path: sys/class/fc_host/host0/statistics/fcp_packet_aborts Lines: 1 -up +0x13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/slave_eth5/operstate +Path: sys/class/fc_host/host0/statistics/invalid_crc_count Lines: 1 -up +0x2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/speed +Path: sys/class/fc_host/host0/statistics/invalid_tx_word_count Lines: 1 -1000 +0x8 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/tx_queue_len +Path: sys/class/fc_host/host0/statistics/link_failure_count Lines: 1 -1000 +0x9 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/type +Path: sys/class/fc_host/host0/statistics/loss_of_signal_count Lines: 1 -1 +0x11 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/online +Path: sys/class/fc_host/host0/statistics/loss_of_sync_count Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/power_supply/AC/power -Mode: 755 +0x10 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/async +Path: sys/class/fc_host/host0/statistics/nos_count Lines: 1 -disabled +0x12 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/autosuspend_delay_ms -Lines: 0 +Path: sys/class/fc_host/host0/statistics/rx_frames +Lines: 1 +0x3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/control +Path: sys/class/fc_host/host0/statistics/rx_words Lines: 1 -auto +0x4 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/runtime_active_kids +Path: sys/class/fc_host/host0/statistics/seconds_since_last_reset Lines: 1 -0 -Mode: 444 +0x7 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/runtime_active_time +Path: sys/class/fc_host/host0/statistics/tx_frames Lines: 1 -0 -Mode: 444 +0x5 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/runtime_enabled +Path: sys/class/fc_host/host0/statistics/tx_words Lines: 1 -disabled -Mode: 444 +0x6 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/runtime_status +Path: sys/class/fc_host/host0/supported_classes Lines: 1 -unsupported -Mode: 444 +Class 3 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/runtime_suspended_time +Path: sys/class/fc_host/host0/supported_speeds Lines: 1 -0 -Mode: 444 +4 Gbit, 8 Gbit, 16 Gbit +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/runtime_usage +Path: sys/class/fc_host/host0/symbolic_name Lines: 1 -0 -Mode: 444 +Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup +Directory: sys/class/fc_host/host1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/fc_host/host1/speed Lines: 1 -enabled +8 Gbit Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup_abort_count -Lines: 1 -0 -Mode: 444 +Directory: sys/class/fc_host/host1/statistics +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup_active +Path: sys/class/fc_host/host1/statistics/dumped_frames Lines: 1 -0 -Mode: 444 +0x0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup_active_count +Path: sys/class/fc_host/host1/statistics/error_frames Lines: 1 -1 -Mode: 444 +0x13 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup_count +Path: sys/class/fc_host/host1/statistics/fcp_packet_aborts Lines: 1 -0 -Mode: 444 +0xffffffffffffffff +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup_expire_count +Path: sys/class/fc_host/host1/statistics/invalid_crc_count Lines: 1 -0 -Mode: 444 +0x20 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup_last_time_ms +Path: sys/class/fc_host/host1/statistics/invalid_tx_word_count Lines: 1 -7888 -Mode: 444 +0x80 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup_max_time_ms +Path: sys/class/fc_host/host1/statistics/link_failure_count Lines: 1 -2 -Mode: 444 +0x90 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup_prevent_sleep_time_ms +Path: sys/class/fc_host/host1/statistics/loss_of_signal_count Lines: 1 -0 -Mode: 444 +0x110 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/power/wakeup_total_time_ms +Path: sys/class/fc_host/host1/statistics/loss_of_sync_count Lines: 1 -2 -Mode: 444 +0x100 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/type +Path: sys/class/fc_host/host1/statistics/nos_count Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 +0x120 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/alarm +Path: sys/class/fc_host/host1/statistics/rx_frames Lines: 1 -2253000 +0x30 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/capacity +Path: sys/class/fc_host/host1/statistics/rx_words Lines: 1 -81 -Mode: 444 +0x40 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/capacity_level +Path: sys/class/fc_host/host1/statistics/seconds_since_last_reset Lines: 1 -Normal -Mode: 444 +0x70 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/charge_start_threshold +Path: sys/class/fc_host/host1/statistics/tx_frames Lines: 1 -95 +0x50 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/charge_stop_threshold +Path: sys/class/fc_host/host1/statistics/tx_words Lines: 1 -100 +0x60 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 +Directory: sys/class/hwmon +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/energy_full -Lines: 1 -45070000 -Mode: 444 +Path: sys/class/hwmon/hwmon0 +SymlinkTo: ../../devices/platform/coretemp.0/hwmon/hwmon0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 +Path: sys/class/hwmon/hwmon1 +SymlinkTo: ../../devices/platform/coretemp.1/hwmon/hwmon1 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/energy_now -Lines: 1 -36580000 -Mode: 444 +Path: sys/class/hwmon/hwmon2 +SymlinkTo: ../../devices/platform/applesmc.768/hwmon/hwmon2 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 +Path: sys/class/hwmon/hwmon3 +SymlinkTo: ../../devices/platform/nct6775.656/hwmon/hwmon3 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/model_name +Directory: sys/class/hwmon/hwmon4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/hwmon/hwmon4/freq1_input Lines: 1 -LNV-45N1 -Mode: 444 +214000000 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/power_supply/BAT0/power -Mode: 755 +Path: sys/class/hwmon/hwmon4/freq1_label +Lines: 1 +sclk +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power/async +Path: sys/class/hwmon/hwmon4/freq2_input Lines: 1 -disabled +300000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power/autosuspend_delay_ms -Lines: 0 +Path: sys/class/hwmon/hwmon4/freq2_label +Lines: 1 +mclk Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power/control +Path: sys/class/hwmon/hwmon4/temp1_crit Lines: 1 -auto +100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power/runtime_active_kids +Path: sys/class/hwmon/hwmon4/temp1_crit_alarm Lines: 1 0 -Mode: 444 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power/runtime_active_time +Path: sys/class/hwmon/hwmon4/temp1_input Lines: 1 -0 -Mode: 444 +55000 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power/runtime_enabled +Path: sys/class/hwmon/hwmon4/temp1_label Lines: 1 -disabled -Mode: 444 +foosensor +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power/runtime_status +Path: sys/class/hwmon/hwmon4/temp1_max Lines: 1 -unsupported -Mode: 444 +100000 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power/runtime_suspended_time +Path: sys/class/hwmon/hwmon4/temp2_crit Lines: 1 -0 -Mode: 444 +100000 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power/runtime_usage +Path: sys/class/hwmon/hwmon4/temp2_crit_alarm Lines: 1 0 -Mode: 444 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/power_now +Path: sys/class/hwmon/hwmon4/temp2_input Lines: 1 -5002000 -Mode: 444 +54000 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/present +Path: sys/class/hwmon/hwmon4/temp2_label Lines: 1 -1 -Mode: 444 +foosensor +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/serial_number +Path: sys/class/hwmon/hwmon4/temp2_max Lines: 1 -38109 -Mode: 444 +100000 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 +Path: sys/class/hwmon/hwmon5 +SymlinkTo: ../../devices/platform/bogus.0/hwmon/hwmon5/ # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 +Directory: sys/class/infiniband +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 +Directory: sys/class/infiniband/i40iw0 +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=11660000 -POWER_SUPPLY_POWER_NOW=5002000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=45070000 -POWER_SUPPLY_ENERGY_NOW=36580000 -POWER_SUPPLY_CAPACITY=81 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 +Path: sys/class/infiniband/i40iw0/board_id +Lines: 1 +I40IW Board ID Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/voltage_min_design +Path: sys/class/infiniband/i40iw0/fw_ver Lines: 1 -10800000 -Mode: 444 +0.2 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/power_supply/BAT0/voltage_now +Path: sys/class/infiniband/i40iw0/hca_type Lines: 1 -11660000 -Mode: 444 +I40IW +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/powercap +Directory: sys/class/infiniband/i40iw0/ports Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/powercap/intel-rapl +Directory: sys/class/infiniband/i40iw0/ports/1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl/enabled +Directory: sys/class/infiniband/i40iw0/ports/1/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/VL15_dropped Lines: 1 -1 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl/uevent -Lines: 0 +Path: sys/class/infiniband/i40iw0/ports/1/counters/excessive_buffer_overrun_errors +Lines: 1 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/powercap/intel-rapl:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw +Path: sys/class/infiniband/i40iw0/ports/1/counters/link_downed Lines: 1 -95000000 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/constraint_0_name +Path: sys/class/infiniband/i40iw0/ports/1/counters/link_error_recovery Lines: 1 -long_term +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw +Path: sys/class/infiniband/i40iw0/ports/1/counters/local_link_integrity_errors Lines: 1 -4090000000 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/constraint_0_time_window_us +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_constraint_errors Lines: 1 -999424 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_data Lines: 1 -0 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/constraint_1_name +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_errors Lines: 1 -short_term +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_packets Lines: 1 -4090000000 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/constraint_1_time_window_us +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_remote_physical_errors Lines: 1 -2440 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/enabled +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_switch_relay_errors Lines: 1 -1 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/energy_uj +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_constraint_errors Lines: 1 -240422366267 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/max_energy_range_uj +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_data Lines: 1 -262143328850 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/name +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_discards Lines: 1 -package-0 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0/uevent -Lines: 0 +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_packets +Lines: 1 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/powercap/intel-rapl:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw -Lines: 0 +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_wait +Lines: 1 +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0:0/constraint_0_name +Path: sys/class/infiniband/i40iw0/ports/1/counters/symbol_error Lines: 1 -long_term +N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw +Path: sys/class/infiniband/i40iw0/ports/1/link_layer Lines: 1 -0 +InfiniBand +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/phys_state +Lines: 1 +5: LinkUp Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us +Path: sys/class/infiniband/i40iw0/ports/1/rate Lines: 1 -976 +10 Gb/sec (4X) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0:0/enabled +Path: sys/class/infiniband/i40iw0/ports/1/state Lines: 1 -0 +4: ACTIVE Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0:0/energy_uj +Directory: sys/class/infiniband/mlx4_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/board_id Lines: 1 -118821284256 +SM_1141000001000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0:0/max_energy_range_uj +Path: sys/class/infiniband/mlx4_0/fw_ver Lines: 1 -262143328850 +2.31.5050 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0:0/name +Path: sys/class/infiniband/mlx4_0/hca_type Lines: 1 -core +MT4099 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/powercap/intel-rapl:0:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/thermal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/thermal/cooling_device0 -SymlinkTo: ../../devices/virtual/thermal/cooling_device0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/thermal/thermal_zone0 -SymlinkTo: ../../devices/virtual/thermal/thermal_zone0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:03.0 +Directory: sys/class/infiniband/mlx4_0/ports Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0 +Directory: sys/class/infiniband/mlx4_0/ports/1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net +Directory: sys/class/infiniband/mlx4_0/ports/1/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0 -Mode: 755 +Path: sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/addr_assign_type +Path: sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery Lines: 1 -3 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/addr_len +Path: sys/class/infiniband/mlx4_0/ports/1/counters/multicast_rcv_packets Lines: 1 -6 +93 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/address +Path: sys/class/infiniband/mlx4_0/ports/1/counters/multicast_xmit_packets Lines: 1 -01:01:01:01:01:01 +16 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/broadcast +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors Lines: 1 -ff:ff:ff:ff:ff:ff +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data Lines: 1 -1 +4631917 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_changes +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_discards Lines: 1 -2 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_down_count +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors Lines: 1 -1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_up_count +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets Lines: 1 -1 +6825908347 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/dev_id +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors Lines: 1 -0x20 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/dormant +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data Lines: 1 -1 +3733440 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/duplex +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards Lines: 1 -full +5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/flags +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets Lines: 1 -0x1303 +6235865 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ifalias -Lines: 0 +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Lines: 1 +4294967295 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ifindex +Path: sys/class/infiniband/mlx4_0/ports/1/counters/unicast_rcv_packets Lines: 1 -2 +61148 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/iflink +Path: sys/class/infiniband/mlx4_0/ports/1/counters/unicast_xmit_packets Lines: 1 -2 +61239 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/link_mode +Directory: sys/class/infiniband/mlx4_0/ports/1/counters_ext +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_multicast_rcv_packets Lines: 1 -1 +93 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/mtu +Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_multicast_xmit_packets Lines: 1 -1500 +16 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/name_assign_type +Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_rcv_data_64 Lines: 1 -2 +4631917 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/netdev_group +Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_rcv_packets_64 Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/operstate +Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_unicast_rcv_packets Lines: 1 -up +61148 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_port_id -Lines: 0 +Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_unicast_xmit_packets +Lines: 1 +61239 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_port_name -Lines: 0 +Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_xmit_data_64 +Lines: 1 +3733440 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_switch_id -Lines: 0 +Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_xmit_packets_64 +Lines: 1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/speed +Path: sys/class/infiniband/mlx4_0/ports/1/link_layer Lines: 1 -1000 +InfiniBand +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/phys_state +Lines: 1 +5: LinkUp Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/tx_queue_len +Path: sys/class/infiniband/mlx4_0/ports/1/rate Lines: 1 -1000 +40 Gb/sec (4X QDR) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/type +Path: sys/class/infiniband/mlx4_0/ports/1/state Lines: 1 -1 +4: ACTIVE Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0 +Directory: sys/class/infiniband/mlx4_0/ports/2 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4 +Directory: sys/class/infiniband/mlx4_0/ports/2/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 +Path: sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 +Path: sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 +Path: sys/class/infiniband/mlx4_0/ports/2/counters/multicast_rcv_packets +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 +Path: sys/class/infiniband/mlx4_0/ports/2/counters/multicast_xmit_packets +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 +Path: sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 +Path: sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Path: sys/class/infiniband/mlx4_0/ports/2/counters/unicast_rcv_packets Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Path: sys/class/infiniband/mlx4_0/ports/2/counters/unicast_xmit_packets +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/infiniband/mlx4_0/ports/2/counters_ext Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_multicast_rcv_packets Lines: 1 -0 +93 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_multicast_xmit_packets +Lines: 1 +16 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_rcv_data_64 +Lines: 1 +4631917 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_rcv_packets_64 Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_unicast_rcv_packets +Lines: 1 +61148 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_unicast_xmit_packets +Lines: 1 +61239 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_xmit_data_64 +Lines: 1 +3733440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_xmit_packets_64 Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Path: sys/class/infiniband/mlx4_0/ports/2/link_layer Lines: 1 -100 +InfiniBand +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/bond0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/bond0/bonding +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/bonding/slaves +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/phys_port_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/phys_port_name +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/phys_switch_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/speed +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bonding_masters +Lines: 1 +bond0 dmz int +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/dmz +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/dmz/bonding +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/bonding/slaves +Lines: 1 +eth0 eth4 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/phys_port_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/phys_port_name +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/phys_switch_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/dmz/slave_eth0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/dmz/slave_eth0/bonding_slave +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/slave_eth0/bonding_slave/mii_status +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/slave_eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/dmz/slave_eth4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/dmz/slave_eth4/bonding_slave +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/slave_eth4/bonding_slave/mii_status +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/slave_eth4/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/eth0 +SymlinkTo: ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/int +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/int/bonding +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/bonding/slaves +Lines: 1 +eth5 eth1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/phys_port_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/phys_port_name +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/phys_switch_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/int/slave_eth1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/int/slave_eth1/bonding_slave +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/slave_eth1/bonding_slave/mii_status +Lines: 1 +down +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/slave_eth1/operstate +Lines: 1 +down +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/int/slave_eth5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/int/slave_eth5/bonding_slave +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/slave_eth5/bonding_slave/mii_status +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/slave_eth5/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/nvme +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/nvme/nvme0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/nvme/nvme0/cntlid +Lines: 1 +1997 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/nvme/nvme0/firmware_rev +Lines: 1 +1B2QEXP7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/nvme/nvme0/model +Lines: 1 +Samsung SSD 970 PRO 512GB +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/nvme/nvme0/serial +Lines: 1 +S680HF8N190894I +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/nvme/nvme0/state +Lines: 1 +live +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/power_supply/AC +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/online +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/power_supply/AC/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup_abort_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup_active +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup_active_count +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup_expire_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup_last_time_ms +Lines: 1 +7888 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup_max_time_ms +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup_prevent_sleep_time_ms +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/wakeup_total_time_ms +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/type +Lines: 1 +Mains +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/uevent +Lines: 2 +POWER_SUPPLY_NAME=AC +POWER_SUPPLY_ONLINE=0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/power_supply/BAT0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/alarm +Lines: 1 +2253000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/capacity +Lines: 1 +81 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/capacity_level +Lines: 1 +Normal +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/charge_start_threshold +Lines: 1 +95 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/charge_stop_threshold +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/cycle_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/energy_full +Lines: 1 +45070000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/energy_full_design +Lines: 1 +47520000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/energy_now +Lines: 1 +36580000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/manufacturer +Lines: 1 +LGC +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/model_name +Lines: 1 +LNV-45N1�� +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/power_supply/BAT0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/power_now +Lines: 1 +5002000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/present +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/serial_number +Lines: 1 +38109 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/status +Lines: 1 +Discharging +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/technology +Lines: 1 +Li-ion +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/type +Lines: 1 +Battery +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/uevent +Lines: 16 +POWER_SUPPLY_NAME=BAT0 +POWER_SUPPLY_STATUS=Discharging +POWER_SUPPLY_PRESENT=1 +POWER_SUPPLY_TECHNOLOGY=Li-ion +POWER_SUPPLY_CYCLE_COUNT=0 +POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 +POWER_SUPPLY_VOLTAGE_NOW=11660000 +POWER_SUPPLY_POWER_NOW=5002000 +POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 +POWER_SUPPLY_ENERGY_FULL=45070000 +POWER_SUPPLY_ENERGY_NOW=36580000 +POWER_SUPPLY_CAPACITY=81 +POWER_SUPPLY_CAPACITY_LEVEL=Normal +POWER_SUPPLY_MODEL_NAME=LNV-45N1 +POWER_SUPPLY_MANUFACTURER=LGC +POWER_SUPPLY_SERIAL_NUMBER=38109 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/voltage_min_design +Lines: 1 +10800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/voltage_now +Lines: 1 +11660000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw +Lines: 1 +95000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_0_name +Lines: 1 +long_term +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_0_time_window_us +Lines: 1 +999424 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_1_name +Lines: 1 +short_term +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_1_time_window_us +Lines: 1 +2440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/energy_uj +Lines: 1 +240422366267 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/name +Lines: 1 +package-0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/constraint_0_name +Lines: 1 +long_term +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us +Lines: 1 +976 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/enabled +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/energy_uj +Lines: 1 +118821284256 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/name +Lines: 1 +core +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/scsi_tape +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/nst0 +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/nst0a +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/nst0l +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/nst0m +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/st0 +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/st0a +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/st0l +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/st0m +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/thermal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/thermal/cooling_device0 +SymlinkTo: ../../devices/virtual/thermal/cooling_device0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/thermal/thermal_zone0 +SymlinkTo: ../../devices/virtual/thermal/thermal_zone0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/watchdog +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/watchdog/watchdog0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/access_cs0 +Lines: 1 +0EOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/bootstatus +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/fw_version +Lines: 1 +2EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/identity +Lines: 1 +Software WatchdogEOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/nowayout +Lines: 1 +0EOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/options +Lines: 1 +0x8380EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/pretimeout +Lines: 1 +120EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/pretimeout_governor +Lines: 1 +noopEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/state +Lines: 1 +activeEOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/status +Lines: 1 +0x8000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/timeleft +Lines: 1 +300EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/watchdog/watchdog0/timeout +Lines: 1 +60EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/watchdog/watchdog1 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001/driver +SymlinkTo: ../../../../bus/pci_express/drivers/pcie_pme +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001/power/async +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001/subsystem +SymlinkTo: ../../../../bus/pci_express +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie001/uevent +Lines: 1 +DRIVER=pcie_pme +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie010 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie010/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie010/power/async +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie010/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie010/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie010/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie010/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie010/subsystem +SymlinkTo: ../../../../bus/pci_express +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:00:02.1:pcie010/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/aer_dev_correctable +Lines: 9 +RxErr 0 +BadTLP 0 +BadDLLP 0 +Rollover 0 +Timeout 0 +NonFatalErr 0 +CorrIntErr 0 +HeaderOF 0 +TOTAL_ERR_COR 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/aer_dev_fatal +Lines: 19 +Undefined 0 +DLP 0 +SDES 0 +TLP 0 +FCP 0 +CmpltTO 0 +CmpltAbrt 0 +UnxCmplt 0 +RxOF 0 +MalfTLP 0 +ECRC 0 +UnsupReq 0 +ACSViol 0 +UncorrIntErr 0 +BlockedTLP 0 +AtomicOpBlocked 0 +TLPBlockedErr 0 +PoisonTLPBlocked 0 +TOTAL_ERR_FATAL 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/aer_dev_nonfatal +Lines: 19 +Undefined 0 +DLP 0 +SDES 0 +TLP 0 +FCP 0 +CmpltTO 0 +CmpltAbrt 0 +UnxCmplt 0 +RxOF 0 +MalfTLP 0 +ECRC 0 +UnsupReq 0 +ACSViol 0 +UncorrIntErr 0 +BlockedTLP 0 +AtomicOpBlocked 0 +TLPBlockedErr 0 +PoisonTLPBlocked 0 +TOTAL_ERR_NONFATAL 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/ari_enabled +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/broken_parity_status +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/class +Lines: 1 +0x010802 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/config +Lines: 2 +©À +TNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTE€ýNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTE©À!PNULLBYTENULLBYTENULLBYTENULLBYTE€NULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEÿNULLBYTENULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/consistent_dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/current_link_speed +Lines: 1 +8.0 GT/s PCIe +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/current_link_width +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/d3cold_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/device +Lines: 1 +0x540a +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/driver +SymlinkTo: ../../../../bus/pci/drivers/nvme +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/driver_override +Lines: 1 +(null) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/firmware_node +SymlinkTo: ../../../LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:16/device:17 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/iommu +SymlinkTo: ../../0000:00:00.2/iommu/ivhd0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/iommu_group +SymlinkTo: ../../../../kernel/iommu_groups/11 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/irq +Lines: 1 +80 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/link +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/local_cpulist +Lines: 1 +0-15 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/local_cpus +Lines: 1 +ffff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/max_link_speed +Lines: 1 +16.0 GT/s PCIe +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/max_link_width +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/modalias +Lines: 1 +pci:v0000C0A9d0000540Asv0000C0A9sd00005021bc01sc08i02 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_bus +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs/81 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs/82 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs/83 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs/84 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs/85 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs/86 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs/87 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs/88 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/msi_irqs/89 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/numa_node +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/address +Lines: 1 +0000:01:00.0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/cntlid +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/cntrltype +Lines: 1 +io +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/dctype +Lines: 1 +none +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/dev +Lines: 1 +240:0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/device +SymlinkTo: ../../../0000:01:00.0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/firmware_rev +Lines: 1 +P9CR30A +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/device +SymlinkTo: ../../nvme0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/name +Lines: 1 +nvme +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/subsystem +SymlinkTo: ../../../../../../../class/hwmon +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp1_alarm +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp1_crit +Lines: 1 +94850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp1_input +Lines: 1 +43850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp1_label +Lines: 1 +Composite +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp1_max +Lines: 1 +84850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp1_min +Lines: 1 +-150 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp2_input +Lines: 1 +43850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp2_label +Lines: 1 +Sensor 1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp2_max +Lines: 1 +65261850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp2_min +Lines: 1 +-273150 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp3_input +Lines: 1 +45850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp3_label +Lines: 1 +Sensor 2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp3_max +Lines: 1 +65261850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp3_min +Lines: 1 +-273150 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp9_input +Lines: 1 +43850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp9_label +Lines: 1 +Sensor 8 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp9_max +Lines: 1 +65261850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/temp9_min +Lines: 1 +-273150 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/hwmon3/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/kato +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/model +Lines: 1 +CT2000P3SSD8 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/dev +Lines: 1 +239:0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/device +SymlinkTo: ../../nvme0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/subsystem +SymlinkTo: ../../../../../../../class/nvme-generic +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/ng0n1/uevent +Lines: 3 +MAJOR=239 +MINOR=0 +DEVNAME=ng0n1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/numa_node +Lines: 1 +-1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/alignment_offset +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/bdi +SymlinkTo: ../../../../../../virtual/bdi/259:0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/capability +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/csi +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/dev +Lines: 1 +259:0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/device +SymlinkTo: ../../nvme0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/discard_alignment +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/diskseq +Lines: 1 +9 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/events +Lines: 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/events_async +Lines: 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/events_poll_msecs +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/ext_range +Lines: 1 +256 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/hidden +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/holders +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/inflight +Lines: 1 + 0 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/integrity +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/integrity/device_is_integrity_capable +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/integrity/format +Lines: 1 +none +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/integrity/protection_interval_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/integrity/read_verify +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/integrity/tag_size +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/integrity/write_generate +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/metadata_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/0/cpu0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/0/cpu1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/0/cpu_list +Lines: 1 +0, 1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/0/nr_reserved_tags +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/0/nr_tags +Lines: 1 +1023 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/1/cpu2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/1/cpu3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/1/cpu_list +Lines: 1 +2, 3 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/1/nr_reserved_tags +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/1/nr_tags +Lines: 1 +1023 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/2/cpu4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/2/cpu5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/2/cpu_list +Lines: 1 +4, 5 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/2/nr_reserved_tags +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/2/nr_tags +Lines: 1 +1023 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/3/cpu6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/3/cpu7 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/3/cpu_list +Lines: 1 +6, 7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/3/nr_reserved_tags +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/3/nr_tags +Lines: 1 +1023 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/4/cpu8 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/4/cpu9 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/4/cpu_list +Lines: 1 +8, 9 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/4/nr_reserved_tags +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/4/nr_tags +Lines: 1 +1023 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/5/cpu10 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/5/cpu11 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/5/cpu_list +Lines: 1 +10, 11 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/5/nr_reserved_tags +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/5/nr_tags +Lines: 1 +1023 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/6/cpu12 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/6/cpu13 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/6/cpu_list +Lines: 1 +12, 13 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/6/nr_reserved_tags +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/6/nr_tags +Lines: 1 +1023 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/7 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/7/cpu14 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/7/cpu15 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/7/cpu_list +Lines: 1 +14, 15 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/7/nr_reserved_tags +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/mq/7/nr_tags +Lines: 1 +1023 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nsid +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nuse +Lines: 1 +3907029168 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/alignment_offset +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/dev +Lines: 1 +259:1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/discard_alignment +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/holders +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/inflight +Lines: 1 + 0 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/partition +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/ro +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/size +Lines: 1 +2201600 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/start +Lines: 1 +2048 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/stat +Lines: 1 + 575 1730 14410 185 2 0 2 0 0 17 186 1 0 2184688 1 0 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/subsystem +SymlinkTo: ../../../../../../../../class/block +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/trace +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/trace/act_mask +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/trace/enable +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/trace/end_lba +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/trace/pid +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/trace/start_lba +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p1/uevent +Lines: 6 +MAJOR=259 +MINOR=1 +DEVNAME=nvme0n1p1 +DEVTYPE=partition +DISKSEQ=9 +PARTN=1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/alignment_offset +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/dev +Lines: 1 +259:2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/discard_alignment +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/holders +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/inflight +Lines: 1 + 0 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/partition +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/ro +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/size +Lines: 1 +4194304 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/start +Lines: 1 +2203648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/stat +Lines: 1 + 144 19 8954 32 21 14 248 20 0 128 132 45 0 3566944 79 0 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/subsystem +SymlinkTo: ../../../../../../../../class/block +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/trace +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/trace/act_mask +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/trace/enable +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/trace/end_lba +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/trace/pid +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/trace/start_lba +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p2/uevent +Lines: 6 +MAJOR=259 +MINOR=2 +DEVNAME=nvme0n1p2 +DEVTYPE=partition +DISKSEQ=9 +PARTN=2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/alignment_offset +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/dev +Lines: 1 +259:3 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/discard_alignment +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/holders +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/holders/dm-0 +SymlinkTo: ../../../../../../../../virtual/block/dm-0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/inflight +Lines: 1 + 0 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/partition +Lines: 1 +3 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/ro +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/size +Lines: 1 +3900628992 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/start +Lines: 1 +6397952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/stat +Lines: 1 + 60223 15025 4174111 24812 83879 46834 2302280 32384 0 24667 57196 0 0 0 0 0 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/subsystem +SymlinkTo: ../../../../../../../../class/block +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/trace +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/trace/act_mask +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/trace/enable +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/trace/end_lba +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/trace/pid +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/trace/start_lba +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/nvme0n1p3/uevent +Lines: 6 +MAJOR=259 +MINOR=3 +DEVNAME=nvme0n1p3 +DEVTYPE=partition +DISKSEQ=9 +PARTN=3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/partscan +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/passthru_err_log_enabled +Lines: 1 +off +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/add_random +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/atomic_write_boundary_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/atomic_write_max_bytes +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/atomic_write_unit_max_bytes +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/atomic_write_unit_min_bytes +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/chunk_sectors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/dax +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/discard_granularity +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/discard_max_bytes +Lines: 1 +2199023255040 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/discard_max_hw_bytes +Lines: 1 +2199023255040 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/discard_zeroes_data +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/dma_alignment +Lines: 1 +3 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/fua +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/hw_sector_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/io_poll +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/io_poll_delay +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/io_timeout +Lines: 1 +30000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/iostats +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/logical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/max_discard_segments +Lines: 1 +256 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/max_hw_sectors_kb +Lines: 1 +128 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/max_integrity_segments +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/max_sectors_kb +Lines: 1 +128 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/max_segment_size +Lines: 1 +4294967295 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/max_segments +Lines: 1 +33 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/minimum_io_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/nomerges +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/nr_requests +Lines: 1 +1023 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/nr_zones +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/optimal_io_size +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/physical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/read_ahead_kb +Lines: 1 +128 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/rotational +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/rq_affinity +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/scheduler +Lines: 1 +[none] mq-deadline +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/stable_writes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/virt_boundary_mask +Lines: 1 +4095 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/wbt_lat_usec +Lines: 1 +2000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/write_cache +Lines: 1 +write back +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/write_same_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/write_zeroes_max_bytes +Lines: 1 +131072 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/zone_append_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/zone_write_granularity +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/queue/zoned +Lines: 1 +none +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/range +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/removable +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/ro +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/size +Lines: 1 +3907029168 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/slaves +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/stat +Lines: 1 + 61050 16774 4202091 25036 83902 46848 2302530 32404 0 20551 64225 46 0 5751632 80 3461 6703 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/subsystem +SymlinkTo: ../../../../../../../class/block +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/trace +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/trace/act_mask +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/trace/enable +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/trace/end_lba +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/trace/pid +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/trace/start_lba +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/uevent +Lines: 5 +MAJOR=259 +MINOR=0 +DEVNAME=nvme0n1 +DEVTYPE=disk +DISKSEQ=9 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/nvme0n1/wwid +Lines: 1 +nvme.c0a9-323332384536454444384137-435432303030503353534438-00000001 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/passthru_err_log_enabled +Lines: 1 +off +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/pm_qos_latency_tolerance_us +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/queue_count +Lines: 1 +9 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/serial +Lines: 1 +2328E6EDD8A7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/sqsize +Lines: 1 +1023 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/state +Lines: 1 +live +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/subsysnqn +Lines: 1 +nqn.2014.08.org.nvmexpress:c0a9c0a92328E6EDD8A7 CT2000P3SSD8 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/subsystem +SymlinkTo: ../../../../../../class/nvme +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/transport +Lines: 1 +pcie +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/nvme/nvme0/uevent +Lines: 4 +MAJOR=240 +MINOR=0 +DEVNAME=nvme0 +NVME_TRTYPE=pcie +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/pools +Lines: 3 +poolinfo - 0.1 +prp list 256 0 64 256 4 +prp list page 0 0 4096 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/async +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/control +Lines: 1 +on +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/runtime_active_time +Lines: 1 +3838519 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/runtime_enabled +Lines: 1 +forbidden +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/runtime_status +Lines: 1 +active +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/runtime_usage +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/wakeup +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/wakeup_abort_count +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/wakeup_active +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/wakeup_active_count +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/wakeup_count +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/wakeup_expire_count +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/wakeup_last_time_ms +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/wakeup_max_time_ms +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power/wakeup_total_time_ms +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/power_state +Lines: 1 +D0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/reset_method +Lines: 1 +flr bus +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/resource +Lines: 13 +0x00000000fd800000 0x00000000fd803fff 0x0000000000140204 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/revision +Lines: 1 +0x01 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/sriov_drivers_autoprobe +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/sriov_numvfs +Lines: 1 +4 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/sriov_totalvfs +Lines: 1 +8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/sriov_vf_total_msix +Lines: 1 +16 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/subsystem +SymlinkTo: ../../../../bus/pci +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/subsystem_device +Lines: 1 +0x5021 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/subsystem_vendor +Lines: 1 +0xc0a9 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/uevent +Lines: 6 +DRIVER=nvme +PCI_CLASS=10802 +PCI_ID=C0A9:540A +PCI_SUBSYS_ID=C0A9:5021 +PCI_SLOT_NAME=0000:01:00.0 +MODALIAS=pci:v0000C0A9d0000540Asv0000C0A9sd00005021bc01sc08i02 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/0000:01:00.0/vendor +Lines: 1 +0xc0a9 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/ari_enabled +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/broken_parity_status +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/class +Lines: 1 +0x060400 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/config +Lines: 1 +"4NULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEñNULLBYTENULLBYTE€ý€ýñÿNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEPNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEÿNULLBYTENULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/consistent_dma_mask_bits +Lines: 1 +32 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/current_link_speed +Lines: 1 +8.0 GT/s PCIe +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/current_link_width +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/d3cold_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/device +Lines: 1 +0x1634 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/dma_mask_bits +Lines: 1 +32 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/driver +SymlinkTo: ../../../bus/pci/drivers/pcieport +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/driver_override +Lines: 1 +(null) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/firmware_node +SymlinkTo: ../../LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:16 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/iommu +SymlinkTo: ../0000:00:00.2/iommu/ivhd0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/iommu_group +SymlinkTo: ../../../kernel/iommu_groups/2 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/irq +Lines: 1 +39 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/link +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/local_cpulist +Lines: 1 +0-15 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/local_cpus +Lines: 1 +ffff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/max_link_speed +Lines: 1 +8.0 GT/s PCIe +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/max_link_width +Lines: 1 +8 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/modalias +Lines: 1 +pci:v00001022d00001634sv000017AAsd00005095bc06sc04i00 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/msi_bus +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/msi_irqs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/msi_irqs/39 +Lines: 1 +msi +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/numa_node +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/pci_bus +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/cpuaffinity +Lines: 1 +ffff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/cpulistaffinity +Lines: 1 +0-15 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/device +SymlinkTo: ../../../0000:00:02.1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/subsystem +SymlinkTo: ../../../../../class/pci_bus +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/pci_bus/0000:01/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:02.1/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/async +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/autosuspend_delay_ms +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/runtime_active_kids +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/runtime_active_time +Lines: 1 +3838515 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/runtime_enabled +Lines: 1 +enabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/runtime_status +Lines: 1 +active +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/wakeup +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/wakeup_abort_count +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/wakeup_active +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/wakeup_active_count +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/wakeup_count +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/wakeup_expire_count +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/wakeup_last_time_ms +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/wakeup_max_time_ms +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power/wakeup_total_time_ms +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/power_state +Lines: 1 +D0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/reset_method +Lines: 1 +pm +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/resource +Lines: 17 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x00000000fd800000 0x00000000fd8fffff 0x0000000000000200 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/revision +Lines: 1 +0x00 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/secondary_bus_number +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/sriov_drivers_autoprobe +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/sriov_numvfs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/sriov_totalvfs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/sriov_vf_total_msix +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/subordinate_bus_number +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/subsystem +SymlinkTo: ../../../bus/pci +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/subsystem_device +Lines: 1 +0x5095 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/subsystem_vendor +Lines: 1 +0x17aa +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/uevent +Lines: 6 +DRIVER=pcieport +PCI_CLASS=60400 +PCI_ID=1022:1634 +PCI_SUBSYS_ID=17AA:5095 +PCI_SLOT_NAME=0000:00:02.1 +MODALIAS=pci:v00001022d00001634sv000017AAsd00005095bc06sc04i00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:02.1/vendor +Lines: 1 +0x1022 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:03.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_port_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_port_name +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_switch_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Lines: 1 +13 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Lines: 1 +13 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Lines: 1 +13 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Lines: 1 +13 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/writeback_rate_debug +Lines: 7 +rate: 1.1M/sec +dirty: 20.4G +target: 20.4G +proportional: 427.5k +integral: 790.0k +change: 321.5k/sec +next io: 17ms +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/aer_dev_correctable +Lines: 9 +RxErr 0 +BadTLP 0 +BadDLLP 0 +Rollover 0 +Timeout 0 +NonFatalErr 0 +CorrIntErr 0 +HeaderOF 0 +TOTAL_ERR_COR 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/aer_dev_fatal +Lines: 19 +Undefined 0 +DLP 0 +SDES 0 +TLP 0 +FCP 0 +CmpltTO 0 +CmpltAbrt 0 +UnxCmplt 0 +RxOF 0 +MalfTLP 0 +ECRC 0 +UnsupReq 0 +ACSViol 0 +UncorrIntErr 0 +BlockedTLP 0 +AtomicOpBlocked 0 +TLPBlockedErr 0 +PoisonTLPBlocked 0 +TOTAL_ERR_FATAL 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/aer_dev_nonfatal +Lines: 19 +Undefined 0 +DLP 0 +SDES 0 +TLP 0 +FCP 0 +CmpltTO 0 +CmpltAbrt 0 +UnxCmplt 0 +RxOF 0 +MalfTLP 0 +ECRC 0 +UnsupReq 0 +ACSViol 0 +UncorrIntErr 0 +BlockedTLP 0 +AtomicOpBlocked 0 +TLPBlockedErr 0 +PoisonTLPBlocked 0 +TOTAL_ERR_NONFATAL 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ari_enabled +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/broken_parity_status +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/class +Lines: 1 +0x020000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/consistent_dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/current_link_speed +Lines: 1 +5.0 GT/s PCIe +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/current_link_width +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/d3cold_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/device +Lines: 1 +0x1521 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/driver +SymlinkTo: ../../../../bus/pci/drivers/igb +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/driver_override +Lines: 1 +(null) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/firmware_node +SymlinkTo: ../../../LNXSYSTM:00/LNXSYBUS:00/PNP0A08:01/device:18/device:19 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/device +SymlinkTo: ../../../0000:45:00.0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/name +Lines: 1 +i350bb +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/subsystem +SymlinkTo: ../../../../../../class/hwmon +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/temp1_crit +Lines: 1 +110000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/temp1_input +Lines: 1 +50000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/temp1_label +Lines: 1 +loc1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/temp1_max +Lines: 1 +120000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/hwmon/hwmon0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/modalias +Lines: 1 +i2c:i350bb +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/name +Lines: 1 +i350bb +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power/async +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/subsystem +SymlinkTo: ../../../../../../bus/i2c +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/3-007c/uevent +Lines: 1 +MODALIAS=i2c:i350bb +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/dev +Lines: 1 +89:3 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/device +SymlinkTo: ../../../i2c-3 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/name +Lines: 1 +igb BB +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/subsystem +SymlinkTo: ../../../../../../../class/i2c-dev +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/i2c-dev/i2c-3/uevent +Lines: 3 +MAJOR=89 +MINOR=3 +DEVNAME=i2c-3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/name +Lines: 1 +igb BB +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/power/async +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/power/runtime_enabled +Lines: 1 +enabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/power/runtime_status +Lines: 1 +suspended +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/subsystem +SymlinkTo: ../../../../../bus/i2c +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/i2c-3/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/iommu +SymlinkTo: ../../0000:40:00.2/iommu/ivhd1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/iommu_group +SymlinkTo: ../../../../kernel/iommu_groups/25 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/irq +Lines: 1 +58 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/link +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/local_cpulist +Lines: 1 +0-63,128-191 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/local_cpus +Lines: 1 +00000000,00000000,ffffffff,ffffffff,00000000,00000000,ffffffff,ffffffff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/max_link_speed +Lines: 1 +5.0 GT/s PCIe +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/max_link_width +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/modalias +Lines: 1 +pci:v00008086d00001521sv00008086sd000000A3bc02sc00i00 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_bus +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs/147 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs/148 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs/149 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs/150 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs/151 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs/152 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs/153 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs/154 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/msi_irqs/155 +Lines: 1 +msix +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/addr_assign_type +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/addr_len +Lines: 1 +6 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/address +Lines: 1 +68:05:ca:f0:cb:12 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/backup_port +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/bpdu_guard +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/bridge +SymlinkTo: ../../../../../../virtual/net/vmbr0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/broadcast_flood +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/change_ack +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/config_pending +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/designated_bridge +Lines: 1 +8000.6805caf0cb12 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/designated_cost +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/designated_port +Lines: 1 +32769 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/designated_root +Lines: 1 +8000.6805caf0cb12 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/forward_delay_timer +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/group_fwd_mask +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/hairpin_mode +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/hold_timer +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/isolated +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/learning +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/message_age_timer +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/multicast_fast_leave +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/multicast_flood +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/multicast_router +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/multicast_to_unicast +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/neigh_suppress +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/path_cost +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/port_id +Lines: 1 +0x8001 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/port_no +Lines: 1 +0x1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/priority +Lines: 1 +32 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/proxyarp +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/proxyarp_wifi +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/root_block +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/state +Lines: 1 +3 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/brport/unicast_flood +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/carrier_changes +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/carrier_down_count +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/carrier_up_count +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/dev_id +Lines: 1 +0x0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/dev_port +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/device +SymlinkTo: ../../../0000:45:00.0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/dormant +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/duplex +Lines: 1 +full +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/gro_flush_timeout +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/ifindex +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/iflink +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/link_mode +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/master +SymlinkTo: ../../../../../virtual/net/vmbr0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/name_assign_type +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/napi_defer_hard_irqs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/operstate +Lines: 1 +up +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/phys_port_id +Lines: 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/phys_port_name +Lines: 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/phys_switch_id +Lines: 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/proto_down +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-0/rps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-0/rps_flow_cnt +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-1/rps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-1/rps_flow_cnt +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-2/rps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-2/rps_flow_cnt +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-3/rps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-3/rps_flow_cnt +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-4/rps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-4/rps_flow_cnt +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-5/rps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-5/rps_flow_cnt +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-6/rps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-6/rps_flow_cnt +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-7 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-7/rps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/rx-7/rps_flow_cnt +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/byte_queue_limits +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/byte_queue_limits/hold_time +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/byte_queue_limits/inflight +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/byte_queue_limits/limit +Lines: 1 +67893 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/byte_queue_limits/limit_max +Lines: 1 +1879048192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/byte_queue_limits/limit_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/byte_queue_limits/stall_cnt +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/byte_queue_limits/stall_max +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/byte_queue_limits/stall_thrs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/traffic_class +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/tx_maxrate +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/tx_timeout +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/xps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-0/xps_rxqs +Lines: 1 +00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/byte_queue_limits +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/byte_queue_limits/hold_time +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/byte_queue_limits/inflight +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/byte_queue_limits/limit +Lines: 1 +137370 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/byte_queue_limits/limit_max +Lines: 1 +1879048192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/byte_queue_limits/limit_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/byte_queue_limits/stall_cnt +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/byte_queue_limits/stall_max +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/byte_queue_limits/stall_thrs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/traffic_class +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/tx_maxrate +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/tx_timeout +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/xps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-1/xps_rxqs +Lines: 1 +00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/byte_queue_limits +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/byte_queue_limits/hold_time +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/byte_queue_limits/inflight +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/byte_queue_limits/limit +Lines: 1 +137370 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/byte_queue_limits/limit_max +Lines: 1 +1879048192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/byte_queue_limits/limit_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/byte_queue_limits/stall_cnt +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/byte_queue_limits/stall_max +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/byte_queue_limits/stall_thrs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/traffic_class +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/tx_maxrate +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/tx_timeout +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/xps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-2/xps_rxqs +Lines: 1 +00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/byte_queue_limits +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/byte_queue_limits/hold_time +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/byte_queue_limits/inflight +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/byte_queue_limits/limit +Lines: 1 +137370 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/byte_queue_limits/limit_max +Lines: 1 +1879048192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/byte_queue_limits/limit_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/byte_queue_limits/stall_cnt +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/byte_queue_limits/stall_max +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/byte_queue_limits/stall_thrs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/traffic_class +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/tx_maxrate +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/tx_timeout +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/xps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-3/xps_rxqs +Lines: 1 +00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/byte_queue_limits +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/byte_queue_limits/hold_time +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/byte_queue_limits/inflight +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/byte_queue_limits/limit +Lines: 1 +68879 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/byte_queue_limits/limit_max +Lines: 1 +1879048192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/byte_queue_limits/limit_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/byte_queue_limits/stall_cnt +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/byte_queue_limits/stall_max +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/byte_queue_limits/stall_thrs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/traffic_class +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/tx_maxrate +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/tx_timeout +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/xps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-4/xps_rxqs +Lines: 1 +00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/byte_queue_limits +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/byte_queue_limits/hold_time +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/byte_queue_limits/inflight +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/byte_queue_limits/limit +Lines: 1 +137370 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/byte_queue_limits/limit_max +Lines: 1 +1879048192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/byte_queue_limits/limit_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/byte_queue_limits/stall_cnt +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/byte_queue_limits/stall_max +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/byte_queue_limits/stall_thrs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/traffic_class +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/tx_maxrate +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/tx_timeout +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/xps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-5/xps_rxqs +Lines: 1 +00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/byte_queue_limits +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/byte_queue_limits/hold_time +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/byte_queue_limits/inflight +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/byte_queue_limits/limit +Lines: 1 +54464 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/byte_queue_limits/limit_max +Lines: 1 +1879048192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/byte_queue_limits/limit_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/byte_queue_limits/stall_cnt +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/byte_queue_limits/stall_max +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/byte_queue_limits/stall_thrs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/traffic_class +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/tx_maxrate +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/tx_timeout +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/xps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-6/xps_rxqs +Lines: 1 +00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/byte_queue_limits +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/byte_queue_limits/hold_time +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/byte_queue_limits/inflight +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/byte_queue_limits/limit +Lines: 1 +71700 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/byte_queue_limits/limit_max +Lines: 1 +1879048192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/byte_queue_limits/limit_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/byte_queue_limits/stall_cnt +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/byte_queue_limits/stall_max +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/byte_queue_limits/stall_thrs +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/traffic_class +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/tx_maxrate +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/tx_timeout +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/xps_cpus +Lines: 1 +00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/queues/tx-7/xps_rxqs +Lines: 1 +00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/speed +Lines: 1 +1000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/collisions +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/multicast +Lines: 1 +656633 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_bytes +Lines: 1 +10013625365 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_compressed +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_crc_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_dropped +Lines: 1 +29220 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_fifo_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_frame_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_length_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_missed_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_nohandler +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_over_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/rx_packets +Lines: 1 +46422718 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_aborted_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_bytes +Lines: 1 +10275718925 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_carrier_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_compressed +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_dropped +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_fifo_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_heartbeat_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_packets +Lines: 1 +47308115 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/statistics/tx_window_errors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/subsystem +SymlinkTo: ../../../../../../class/net +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/testing +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/threaded +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/type +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/uevent +Lines: 2 +INTERFACE=ens10f0 +IFINDEX=2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/net/ens10f0/upper_vmbr0 +SymlinkTo: ../../../../../virtual/net/vmbr0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/numa_node +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/async +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/control +Lines: 1 +on +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/runtime_active_time +Lines: 1 +862796974 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/runtime_enabled +Lines: 1 +forbidden +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/runtime_status +Lines: 1 +active +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/runtime_usage +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/wakeup +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/wakeup_abort_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/wakeup_active +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/wakeup_active_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/wakeup_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/wakeup_expire_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/wakeup_last_time_ms +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/wakeup_max_time_ms +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power/wakeup_total_time_ms +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/power_state +Lines: 1 +D0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/clock_name +Lines: 1 +6805caf0cb12 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/dev +Lines: 1 +246:0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/device +SymlinkTo: ../../../0000:45:00.0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/fifo +Lines: 1 +NULLBYTEEOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/max_adjustment +Lines: 1 +62499999 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/max_vclocks +Lines: 1 +20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/n_alarms +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/n_external_timestamps +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/n_periodic_outputs +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/n_programmable_pins +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/n_vclocks +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/pins +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/pins/SDP0 +Lines: 1 +0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/pins/SDP1 +Lines: 1 +0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/pins/SDP2 +Lines: 1 +0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/pins/SDP3 +Lines: 1 +0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power/autosuspend_delay_ms +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power/control Lines: 1 -289 +auto Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power/runtime_active_kids Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power/runtime_active_time Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power/runtime_enabled Lines: 1 -0 -Mode: 644 +disabled +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power/runtime_status +Lines: 1 +unsupported +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power/runtime_suspended_time Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/power/runtime_usage Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/pps_available Lines: 1 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/subsystem +SymlinkTo: ../../../../../../class/ptp +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/ptp/ptp0/uevent +Lines: 3 +MAJOR=246 +MINOR=0 +DEVNAME=ptp0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/reset_method Lines: 1 -0 +flr bus Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/resource +Lines: 13 +0x0000000097100000 0x00000000971fffff 0x0000000000040200 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000003060 0x000000000000307f 0x0000000000040101 +0x000000009720c000 0x000000009720ffff 0x0000000000040200 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000097280000 0x00000000972fffff 0x0000000000046200 +0x000000f0a03e0000 0x000000f0a03fffff 0x000000000014220c +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x000000f0a03c0000 0x000000f0a03dffff 0x000000000014220c +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/resource0 +Lines: 0 +Mode: 600 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/resource2 +Lines: 0 +Mode: 600 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/resource3 +Lines: 0 +Mode: 600 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/revision Lines: 1 -0 -Mode: 644 +0x01 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/rom +Lines: 0 +Mode: 600 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/sriov_drivers_autoprobe Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/sriov_numvfs Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/sriov_offset Lines: 1 -0 -Mode: 644 +128 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/sriov_stride +Lines: 1 +4 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/sriov_totalvfs Lines: 1 -0 -Mode: 644 +7 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/sriov_vf_device Lines: 1 -0 -Mode: 644 +1520 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/sriov_vf_total_msix Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/subsystem +SymlinkTo: ../../../../bus/pci +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/subsystem_device Lines: 1 -0 -Mode: 644 +0x00a3 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/subsystem_vendor Lines: 1 -0 +0x8086 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/uevent +Lines: 6 +DRIVER=igb +PCI_CLASS=20000 +PCI_ID=8086:1521 +PCI_SUBSYS_ID=8086:00A3 +PCI_SLOT_NAME=0000:45:00.0 +MODALIAS=pci:v00008086d00001521sv00008086sd000000A3bc02sc00i00 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/vendor Lines: 1 -0 -Mode: 644 +0x8086 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/vpd +Lines: 2 +‚:NULLBYTEIntel (r) Ethernet Network Adapter I350-T4 for OCP NIC 3.0dNULLBYTEV1:Intel (r) Ethernet Network Adapter I350-T4 for OCP NIC 3.0PN +K53978-004SN 6805CAF0CB12V24521RV‚xEOF +Mode: 600 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/active_count Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/active_time_ms Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/device +SymlinkTo: ../../../0000:45:00.0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/event_count Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/expire_count Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/last_change_ms Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/max_time_ms Lines: 1 -100 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/name Lines: 1 -546 -Mode: 644 +0000:45:00.0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/prevent_suspend_time_ms Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/subsystem +SymlinkTo: ../../../../../../class/wakeup # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/total_time_ms Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/writeback_rate_debug -Lines: 7 -rate: 1.1M/sec -dirty: 20.4G -target: 20.4G -proportional: 427.5k -integral: 790.0k -change: 321.5k/sec -next io: 17ms +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/uevent +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 +Path: sys/devices/pci0000:40/0000:40:01.3/0000:45:00.0/wakeup/wakeup87/wakeup_count +Lines: 1 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 +Path: sys/devices/pci0000:40/0000:40:01.3/class +Lines: 1 +0x060400 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 +Path: sys/devices/pci0000:40/0000:40:01.3/d3cold_allowed +Lines: 1 +1 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 +Path: sys/devices/pci0000:40/0000:40:01.3/device +Lines: 1 +0x1483 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 +Path: sys/devices/pci0000:40/0000:40:01.3/power_state +Lines: 1 +D0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Path: sys/devices/pci0000:40/0000:40:01.3/revision Lines: 1 -0 -Mode: 644 +0x00 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Path: sys/devices/pci0000:40/0000:40:01.3/subsystem_device Lines: 1 -512 -Mode: 644 +0x1453 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 +Path: sys/devices/pci0000:40/0000:40:01.3/subsystem_vendor +Lines: 1 +0x1022 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Path: sys/devices/pci0000:40/0000:40:01.3/vendor Lines: 1 -0 -Mode: 644 +0x1022 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform Mode: 755 @@ -1825,7 +8101,7 @@ Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan1_label Lines: 1 -Left side +Left side Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan1_manual @@ -1859,7 +8135,7 @@ Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan2_label Lines: 1 -Right side +Right side Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan2_manual @@ -1900,6 +8176,45 @@ Lines: 1 applesmc Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/bogus.0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/bogus.0/hwmon +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/bogus.0/hwmon/hwmon5 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_input +Lines: 1 +55000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_label +Lines: 1 +Physical id 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/bogus.0/hwmon/hwmon5/name +Lines: 1 +bogus +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/coretemp.0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2441,6 +8756,22 @@ Mode: 644 Directory: sys/devices/system Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/clocksource +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/clocksource/clocksource0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/clocksource/clocksource0/available_clocksource +Lines: 1 +tsc hpet acpi_pm +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/clocksource/clocksource0/current_clocksource +Lines: 1 +tsc +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2752,6 +9083,49 @@ Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/isolated +Lines: 1 +1,3-5,9 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/offline +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/online +Lines: 1 +0-3 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/vulnerabilities +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/vulnerabilities/itlb_multihit +Lines: 1 +Not affected +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/vulnerabilities/mds +Lines: 1 +Vulnerable +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/vulnerabilities/retbleed +Lines: 1 +Mitigation: untrained return thunk; SMT enabled with STIBP protection +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/vulnerabilities/spectre_v1 +Lines: 1 +Mitigation: usercopy/swapgs barriers and __user pointer sanitization +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/vulnerabilities/spectre_v2 +Lines: 1 +Mitigation: Retpolines, IBPB: conditional, STIBP: always-on, RSB filling, PBRSB-eIBRS: Not affected +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/edac Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -3107,7 +9481,7 @@ Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads Lines: 1 -0 +13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute @@ -3150,7 +9524,7 @@ Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads Lines: 1 -0 +13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour @@ -3193,7 +9567,7 @@ Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads Lines: 1 -0 +13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total @@ -3236,7 +9610,7 @@ Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads Lines: 1 -0 +13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth @@ -3466,6 +9840,14 @@ Lines: 1 4096 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/commit_stats +Lines: 4 +commits 258051 +last_commit_ms 1000 +max_commit_ms 51462 +total_commit_ms 47836090EOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/collector/fixtures/textfile/different_metric_types.out b/collector/fixtures/textfile/different_metric_types.out index 83211e9186..c01c197cce 100644 --- a/collector/fixtures/textfile/different_metric_types.out +++ b/collector/fixtures/textfile/different_metric_types.out @@ -26,7 +26,7 @@ events_total{foo="bar"} 10 events_total{foo="baz"} 20 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge -node_textfile_mtime_seconds{file="metrics.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/different_metric_types/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/glob_extra_dimension.out b/collector/fixtures/textfile/glob_extra_dimension.out new file mode 100644 index 0000000000..bbf7f45413 --- /dev/null +++ b/collector/fixtures/textfile/glob_extra_dimension.out @@ -0,0 +1,49 @@ +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/histogram_extra_dimension/metrics.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/summary_extra_dimension/metrics.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. +# TYPE prometheus_rule_evaluation_duration_seconds summary +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.9"} 0.001765451 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.99"} 0.018672076 +prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="alerting"} 214.85081044700146 +prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="alerting"} 185209 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.5"} 4.3132e-05 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.9"} 8.9295e-05 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.99"} 0.000193657 +prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="recording"} 185091.01317759082 +prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="recording"} 1.0020195e+08 +prometheus_rule_evaluation_duration_seconds{handler="foo",rule_type="alerting",quantile="0.5"} 0.000571464 +prometheus_rule_evaluation_duration_seconds_sum{handler="foo",rule_type="alerting"} 0 +prometheus_rule_evaluation_duration_seconds_count{handler="foo",rule_type="alerting"} 0 +# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_range histogram +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum{foo="bar"} 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count{foo="bar"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum{foo="baz"} 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count{foo="baz"} 1.691853e+06 diff --git a/collector/fixtures/textfile/histogram.out b/collector/fixtures/textfile/histogram.out index f7977d4503..f649e19a2f 100644 --- a/collector/fixtures/textfile/histogram.out +++ b/collector/fixtures/textfile/histogram.out @@ -1,6 +1,6 @@ # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge -node_textfile_mtime_seconds{file="metrics.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/histogram/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/histogram_extra_dimension.out b/collector/fixtures/textfile/histogram_extra_dimension.out index 6125e8c10f..2f6aa854ad 100644 --- a/collector/fixtures/textfile/histogram_extra_dimension.out +++ b/collector/fixtures/textfile/histogram_extra_dimension.out @@ -1,6 +1,6 @@ # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge -node_textfile_mtime_seconds{file="metrics.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/histogram_extra_dimension/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/inconsistent_metrics.out b/collector/fixtures/textfile/inconsistent_metrics.out index 987a5a5a37..45ad4535e6 100644 --- a/collector/fixtures/textfile/inconsistent_metrics.out +++ b/collector/fixtures/textfile/inconsistent_metrics.out @@ -23,7 +23,7 @@ http_requests_total{baz="",code="503",foo="",handler="query_range",method="get"} http_requests_total{baz="bar",code="200",foo="",handler="",method="get"} 93 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge -node_textfile_mtime_seconds{file="metrics.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/inconsistent_metrics/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/metrics_merge_different_help.out b/collector/fixtures/textfile/metrics_merge_different_help.out new file mode 100644 index 0000000000..b9385a752a --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_different_help.out @@ -0,0 +1,11 @@ +# HELP events_total A nice help message. +# TYPE events_total counter +events_total{file="a",foo="bar"} 10 +events_total{file="a",foo="baz"} 20 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help/a.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help/b.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 1 diff --git a/collector/fixtures/textfile/metrics_merge_different_help/a.prom b/collector/fixtures/textfile/metrics_merge_different_help/a.prom new file mode 100644 index 0000000000..9188bbdeec --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_different_help/a.prom @@ -0,0 +1,5 @@ +# HELP events_total A nice help message. +# TYPE events_total counter +events_total{foo="bar",file="a"} 10 +events_total{foo="baz",file="a"} 20 + diff --git a/collector/fixtures/textfile/metrics_merge_different_help/b.prom b/collector/fixtures/textfile/metrics_merge_different_help/b.prom new file mode 100644 index 0000000000..259d43d0a5 --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_different_help/b.prom @@ -0,0 +1,5 @@ +# HELP events_total A different help message. +# TYPE events_total counter +events_total{foo="bar",file="b"} 30 +events_total{foo="baz",file="b"} 40 + diff --git a/collector/fixtures/textfile/metrics_merge_empty_help.out b/collector/fixtures/textfile/metrics_merge_empty_help.out new file mode 100644 index 0000000000..2abe2cea9f --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_empty_help.out @@ -0,0 +1,13 @@ +# HELP events_total Metric read from fixtures/textfile/metrics_merge_empty_help/a.prom, fixtures/textfile/metrics_merge_empty_help/b.prom +# TYPE events_total counter +events_total{file="a",foo="bar"} 10 +events_total{file="a",foo="baz"} 20 +events_total{file="b",foo="bar"} 30 +events_total{file="b",foo="baz"} 40 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_empty_help/a.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_empty_help/b.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/metrics_merge_empty_help/a.prom b/collector/fixtures/textfile/metrics_merge_empty_help/a.prom new file mode 100644 index 0000000000..3036448d0c --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_empty_help/a.prom @@ -0,0 +1,5 @@ +# HELP events_total +# TYPE events_total counter +events_total{foo="bar",file="a"} 10 +events_total{foo="baz",file="a"} 20 + diff --git a/collector/fixtures/textfile/metrics_merge_empty_help/b.prom b/collector/fixtures/textfile/metrics_merge_empty_help/b.prom new file mode 100644 index 0000000000..efe9505a9a --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_empty_help/b.prom @@ -0,0 +1,5 @@ +# HELP events_total +# TYPE events_total counter +events_total{foo="bar",file="b"} 30 +events_total{foo="baz",file="b"} 40 + diff --git a/collector/fixtures/textfile/metrics_merge_no_help.out b/collector/fixtures/textfile/metrics_merge_no_help.out new file mode 100644 index 0000000000..268e3c4806 --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_no_help.out @@ -0,0 +1,13 @@ +# HELP events_total Metric read from fixtures/textfile/metrics_merge_no_help/a.prom, fixtures/textfile/metrics_merge_no_help/b.prom +# TYPE events_total counter +events_total{file="a",foo="bar"} 10 +events_total{file="a",foo="baz"} 20 +events_total{file="b",foo="bar"} 30 +events_total{file="b",foo="baz"} 40 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_no_help/a.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_no_help/b.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/metrics_merge_no_help/a.prom b/collector/fixtures/textfile/metrics_merge_no_help/a.prom new file mode 100644 index 0000000000..e11c618bf0 --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_no_help/a.prom @@ -0,0 +1,4 @@ +# TYPE events_total counter +events_total{foo="bar",file="a"} 10 +events_total{foo="baz",file="a"} 20 + diff --git a/collector/fixtures/textfile/metrics_merge_no_help/b.prom b/collector/fixtures/textfile/metrics_merge_no_help/b.prom new file mode 100644 index 0000000000..984fbf46a4 --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_no_help/b.prom @@ -0,0 +1,4 @@ +# TYPE events_total counter +events_total{foo="bar",file="b"} 30 +events_total{foo="baz",file="b"} 40 + diff --git a/collector/fixtures/textfile/metrics_merge_same_help.out b/collector/fixtures/textfile/metrics_merge_same_help.out new file mode 100644 index 0000000000..5fddae3d77 --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_same_help.out @@ -0,0 +1,13 @@ +# HELP events_total The same help. +# TYPE events_total counter +events_total{file="a",foo="bar"} 10 +events_total{file="a",foo="baz"} 20 +events_total{file="b",foo="bar"} 30 +events_total{file="b",foo="baz"} 40 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_same_help/a.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_same_help/b.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/metrics_merge_same_help/a.prom b/collector/fixtures/textfile/metrics_merge_same_help/a.prom new file mode 100644 index 0000000000..a40cd6e627 --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_same_help/a.prom @@ -0,0 +1,5 @@ +# HELP events_total The same help. +# TYPE events_total counter +events_total{foo="bar",file="a"} 10 +events_total{foo="baz",file="a"} 20 + diff --git a/collector/fixtures/textfile/metrics_merge_same_help/b.prom b/collector/fixtures/textfile/metrics_merge_same_help/b.prom new file mode 100644 index 0000000000..77728727b4 --- /dev/null +++ b/collector/fixtures/textfile/metrics_merge_same_help/b.prom @@ -0,0 +1,5 @@ +# HELP events_total The same help. +# TYPE events_total counter +events_total{foo="bar",file="b"} 30 +events_total{foo="baz",file="b"} 40 + diff --git a/collector/fixtures/textfile/summary.out b/collector/fixtures/textfile/summary.out index 0e1ac6a200..c83dba97f3 100644 --- a/collector/fixtures/textfile/summary.out +++ b/collector/fixtures/textfile/summary.out @@ -22,7 +22,7 @@ event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307 event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge -node_textfile_mtime_seconds{file="metrics.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/summary/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/summary_extra_dimension.out b/collector/fixtures/textfile/summary_extra_dimension.out index 032c03399d..d49e8a1d2f 100644 --- a/collector/fixtures/textfile/summary_extra_dimension.out +++ b/collector/fixtures/textfile/summary_extra_dimension.out @@ -1,6 +1,6 @@ # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge -node_textfile_mtime_seconds{file="metrics.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/summary_extra_dimension/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/two_metric_files.out b/collector/fixtures/textfile/two_metric_files.out index d8bb7b9308..fbff74dd5f 100644 --- a/collector/fixtures/textfile/two_metric_files.out +++ b/collector/fixtures/textfile/two_metric_files.out @@ -1,7 +1,7 @@ # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge -node_textfile_mtime_seconds{file="metrics1.prom"} 1 -node_textfile_mtime_seconds{file="metrics2.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/two_metric_files/metrics1.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/two_metric_files/metrics2.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 diff --git a/collector/fixtures/udev.ttar b/collector/fixtures/udev.ttar new file mode 100644 index 0000000000..f4db1d901c --- /dev/null +++ b/collector/fixtures/udev.ttar @@ -0,0 +1,529 @@ +# Archive created by ttar -C collector/fixtures -c -f collector/fixtures/udev.ttar udev +Directory: udev +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: udev/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b11:0 +Lines: 38 +S:disk/by-id/usb-AMI_Virtual_CDROM0_AAAABBBBCCCC1-0:0 +S:disk/by-path/pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0 +S:cdrom +L:-100 +I:83543243 +E:ID_CDROM=1 +E:SYSTEMD_MOUNT_DEVICE_BOUND=1 +E:ID_VENDOR=AMI +E:ID_VENDOR_ENC=AMI\x20\x20\x20\x20\x20 +E:ID_VENDOR_ID=c096 +E:ID_MODEL=Virtual_CDROM0 +E:ID_MODEL_ENC=Virtual\x20CDROM0\x20\x20 +E:ID_MODEL_ID=ee31 +E:ID_REVISION=1.00 +E:ID_SERIAL=AMI_Virtual_CDROM0_AAAABBBBCCCC1-0:0 +E:ID_SERIAL_SHORT=AAAABBBBCCCC1 +E:ID_TYPE=cd/dvd +E:ID_INSTANCE=0:0 +E:ID_BUS=usb +E:ID_USB_INTERFACES=:905639: +E:ID_USB_INTERFACE_NUM=00 +E:ID_USB_DRIVER=usb-storage +E:ID_PATH=pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0 +E:ID_PATH_TAG=pci-0000_00_14_0-usb-0_1_1_1_0-scsi-0_0_0_0 +E:SCSI_TPGS=0 +E:SCSI_TYPE=cd/dvd +E:SCSI_VENDOR=AMI +E:SCSI_VENDOR_ENC=AMI\x20\x20\x20\x20\x20 +E:SCSI_MODEL=Virtual_CDROM0 +E:SCSI_MODEL_ENC=Virtual\x20CDROM0\x20\x20 +E:SCSI_REVISION=1.00 +E:ID_SCSI=1 +E:ID_SCSI_INQUIRY=1 +E:ID_FS_TYPE= +E:ID_FOR_SEAT=block-pci-0000_00_14_0-usb-0_1_1_1_0-scsi-0_0_0_0 +G:uaccess +G:systemd +G:seat +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b179:0 +Lines: 13 +S:disk/by-path/platform-df2969f3.mmc +S:disk/by-id/mmc-SC64G_0x83e36d93 +W:1 +I:7679747 +E:ID_NAME=SC64G +E:ID_SERIAL=0x83e36d93 +E:ID_PATH=platform-df2969f3.mmc +E:ID_PATH_TAG=platform-df2969f3_mmc +E:ID_PART_TABLE_UUID=1954c9df +E:ID_PART_TABLE_TYPE=dos +E:ID_DRIVE_FLASH_SD=1 +E:ID_DRIVE_MEDIA_FLASH_SD=1 +G:systemd +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b179:1 +Lines: 30 +S:disk/by-id/mmc-SC64G_0x83e36d93-part1 +S:disk/by-path/platform-df2969f3.mmc-part1 +S:disk/by-label/boot +S:disk/by-uuid/6284-658D +S:disk/by-partuuid/1954c9df-01 +W:12 +I:8463403 +E:ID_NAME=SC64G +E:ID_SERIAL=0x83e36d93 +E:ID_PATH=platform-df2969f3.mmc +E:ID_PATH_TAG=platform-df2969f3_mmc +E:ID_PART_TABLE_UUID=1954c9df +E:ID_PART_TABLE_TYPE=dos +E:ID_DRIVE_FLASH_SD=1 +E:ID_DRIVE_MEDIA_FLASH_SD=1 +E:ID_FS_LABEL=boot +E:ID_FS_LABEL_ENC=boot +E:ID_FS_UUID=6284-658D +E:ID_FS_UUID_ENC=6284-658D +E:ID_FS_VERSION=FAT32 +E:ID_FS_TYPE=vfat +E:ID_FS_USAGE=filesystem +E:ID_PART_ENTRY_SCHEME=dos +E:ID_PART_ENTRY_UUID=1954c9df-01 +E:ID_PART_ENTRY_TYPE=0xc +E:ID_PART_ENTRY_NUMBER=1 +E:ID_PART_ENTRY_OFFSET=8192 +E:ID_PART_ENTRY_SIZE=524288 +E:ID_PART_ENTRY_DISK=179:0 +G:systemd +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b179:2 +Lines: 30 +S:disk/by-id/mmc-SC64G_0x83e36d93-part2 +S:disk/by-path/platform-df2969f3.mmc-part2 +S:disk/by-label/rootfs +S:disk/by-uuid/83324ce8-a6f3-4e35-ad64-dbb3d6b87a32 +S:disk/by-partuuid/1954c9df-02 +W:2 +I:7676649 +E:ID_NAME=SC64G +E:ID_SERIAL=0x83e36d93 +E:ID_PATH=platform-df2969f3.mmc +E:ID_PATH_TAG=platform-df2969f3_mmc +E:ID_PART_TABLE_UUID=1954c9df +E:ID_PART_TABLE_TYPE=dos +E:ID_DRIVE_FLASH_SD=1 +E:ID_DRIVE_MEDIA_FLASH_SD=1 +E:ID_FS_LABEL=rootfs +E:ID_FS_LABEL_ENC=rootfs +E:ID_FS_UUID=83324ce8-a6f3-4e35-ad64-dbb3d6b87a32 +E:ID_FS_UUID_ENC=83324ce8-a6f3-4e35-ad64-dbb3d6b87a32 +E:ID_FS_VERSION=1.0 +E:ID_FS_TYPE=ext4 +E:ID_FS_USAGE=filesystem +E:ID_PART_ENTRY_SCHEME=dos +E:ID_PART_ENTRY_UUID=1954c9df-02 +E:ID_PART_ENTRY_TYPE=0x83 +E:ID_PART_ENTRY_NUMBER=2 +E:ID_PART_ENTRY_OFFSET=532480 +E:ID_PART_ENTRY_SIZE=124203008 +E:ID_PART_ENTRY_DISK=179:0 +G:systemd +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b252:0 +Lines: 20 +S:disk/by-id/dm-name-nvme0n1_crypt +S:mapper/nvme0n1_crypt +S:disk/by-id/lvm-pv-uuid-c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB +S:disk/by-id/dm-uuid-CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt +I:72859885 +E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 +E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 +E:DM_UDEV_RULES=1 +E:DM_UDEV_RULES_VSN=2 +E:DM_NAME=nvme0n1_crypt +E:DM_UUID=CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt +E:DM_SUSPENDED=0 +E:ID_FS_UUID=c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB +E:ID_FS_UUID_ENC=c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB +E:ID_FS_VERSION=LVM2 001 +E:ID_FS_TYPE=LVM2_member +E:ID_FS_USAGE=raid +G:systemd +Q:systemd +V:1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b252:1 +Lines: 24 +S:disk/by-id/dm-uuid-LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T +S:mapper/system-swap_1 +S:disk/by-id/dm-name-system-swap_1 +S:disk/by-uuid/5272bb60-04b5-49cd-b730-be57c7604450 +S:system/swap_1 +I:78705530 +E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 +E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 +E:DM_UDEV_RULES=1 +E:DM_UDEV_RULES_VSN=2 +E:DM_NAME=system-swap_1 +E:DM_UUID=LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T +E:DM_SUSPENDED=0 +E:DM_VG_NAME=system +E:DM_LV_NAME=swap_1 +E:DM_LV_LAYER= +E:ID_FS_UUID=5272bb60-04b5-49cd-b730-be57c7604450 +E:ID_FS_UUID_ENC=5272bb60-04b5-49cd-b730-be57c7604450 +E:ID_FS_VERSION=1 +E:ID_FS_TYPE=swap +E:ID_FS_USAGE=other +G:systemd +Q:systemd +V:1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b252:2 +Lines: 24 +S:disk/by-id/dm-name-system-root +S:disk/by-id/dm-uuid-LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7 +S:mapper/system-root +S:disk/by-uuid/3deafd0d-faff-4695-8d15-51061ae1f51b +S:system/root +I:77655410 +E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 +E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 +E:DM_UDEV_RULES=1 +E:DM_UDEV_RULES_VSN=2 +E:DM_NAME=system-root +E:DM_UUID=LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7 +E:DM_SUSPENDED=0 +E:DM_VG_NAME=system +E:DM_LV_NAME=root +E:DM_LV_LAYER= +E:ID_FS_UUID=3deafd0d-faff-4695-8d15-51061ae1f51b +E:ID_FS_UUID_ENC=3deafd0d-faff-4695-8d15-51061ae1f51b +E:ID_FS_VERSION=1.0 +E:ID_FS_TYPE=ext4 +E:ID_FS_USAGE=filesystem +G:systemd +Q:systemd +V:1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b252:3 +Lines: 24 +S:disk/by-id/dm-name-system-var +S:disk/by-id/dm-uuid-LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP +S:mapper/system-var +S:disk/by-uuid/5c772222-f7d4-4c8e-87e8-e97df6b7a45e +S:system/var +I:79395348 +E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 +E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 +E:DM_UDEV_RULES=1 +E:DM_UDEV_RULES_VSN=2 +E:DM_NAME=system-var +E:DM_UUID=LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP +E:DM_SUSPENDED=0 +E:DM_VG_NAME=system +E:DM_LV_NAME=var +E:DM_LV_LAYER= +E:ID_FS_UUID=5c772222-f7d4-4c8e-87e8-e97df6b7a45e +E:ID_FS_UUID_ENC=5c772222-f7d4-4c8e-87e8-e97df6b7a45e +E:ID_FS_VERSION=1.0 +E:ID_FS_TYPE=ext4 +E:ID_FS_USAGE=filesystem +G:systemd +Q:systemd +V:1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b252:4 +Lines: 24 +S:system/tmp +S:disk/by-uuid/a9479d44-60e1-4015-a1e5-bb065e6dd11b +S:disk/by-id/dm-uuid-LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H +S:mapper/system-tmp +S:disk/by-id/dm-name-system-tmp +I:75852450 +E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 +E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 +E:DM_UDEV_RULES=1 +E:DM_UDEV_RULES_VSN=2 +E:DM_NAME=system-tmp +E:DM_UUID=LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H +E:DM_SUSPENDED=0 +E:DM_VG_NAME=system +E:DM_LV_NAME=tmp +E:DM_LV_LAYER= +E:ID_FS_UUID=a9479d44-60e1-4015-a1e5-bb065e6dd11b +E:ID_FS_UUID_ENC=a9479d44-60e1-4015-a1e5-bb065e6dd11b +E:ID_FS_VERSION=1.0 +E:ID_FS_TYPE=ext4 +E:ID_FS_USAGE=filesystem +G:systemd +Q:systemd +V:1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b252:5 +Lines: 24 +S:disk/by-uuid/b05b726a-c718-4c4d-8641-7c73a7696d83 +S:mapper/system-home +S:system/home +S:disk/by-id/dm-name-system-home +S:disk/by-id/dm-uuid-LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf +I:72604009 +E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 +E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 +E:DM_UDEV_RULES=1 +E:DM_UDEV_RULES_VSN=2 +E:DM_NAME=system-home +E:DM_UUID=LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf +E:DM_SUSPENDED=0 +E:DM_VG_NAME=system +E:DM_LV_NAME=home +E:DM_LV_LAYER= +E:ID_FS_UUID=b05b726a-c718-4c4d-8641-7c73a7696d83 +E:ID_FS_UUID_ENC=b05b726a-c718-4c4d-8641-7c73a7696d83 +E:ID_FS_VERSION=1.0 +E:ID_FS_TYPE=ext4 +E:ID_FS_USAGE=filesystem +G:systemd +Q:systemd +V:1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b254:0 +Lines: 10 +S:disk/by-path/pci-0000:00:06.0 +S:disk/by-path/virtio-pci-0000:00:06.0 +W:1 +I:8524171 +E:ID_PATH=pci-0000:00:06.0 +E:ID_PATH_TAG=pci-0000_00_06_0 +E:ID_PART_TABLE_UUID=653b59fd +E:ID_PART_TABLE_TYPE=dos +E:ID_FS_TYPE= +G:systemd +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b259:0 +Lines: 17 +S:disk/by-path/pci-0000:02:00.0-nvme-1 +S:disk/by-id/nvme-eui.p3vbbiejx5aae2r3 +S:disk/by-id/nvme-SAMSUNG_EHFTF55LURSY-000Y9_S252B6CU1HG3M1 +I:79621327 +E:ID_SERIAL_SHORT=S252B6CU1HG3M1 +E:ID_WWN=eui.p3vbbiejx5aae2r3 +E:ID_MODEL=SAMSUNG EHFTF55LURSY-000Y9 +E:ID_REVISION=4NBTUY95 +E:ID_SERIAL=SAMSUNG_EHFTF55LURSY-000Y9_S252B6CU1HG3M1 +E:ID_PATH=pci-0000:02:00.0-nvme-1 +E:ID_PATH_TAG=pci-0000_02_00_0-nvme-1 +E:ID_PART_TABLE_UUID=f301fdbd-fd1f-46d4-9fb8-c9aeb757f050 +E:ID_PART_TABLE_TYPE=gpt +E:ID_FS_TYPE= +G:systemd +Q:systemd +V:1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b8:0 +Lines: 60 +S:disk/by-id/lvm-pv-uuid-cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw +S:disk/by-id/scsi-SATA_TOSHIBA_KSDB4U86_2160A0D5FVGG +S:disk/by-id/ata-TOSHIBA_KSDB4U866TE_2160A0D5FVGG +S:disk/by-path/pci-0000:3b:00.0-sas-phy7-lun-0 +S:disk/by-id/scsi-37c72382b8de36a64 +S:disk/by-id/wwn-0x7c72382b8de36a64 +W:702 +I:73815117 +E:ID_ATA=1 +E:ID_TYPE=disk +E:ID_BUS=ata +E:ID_MODEL=TOSHIBA_KSDB4U86 +E:ID_MODEL_ENC=TOSHIBA\x20KSDB4U86 +E:ID_REVISION=0102 +E:ID_SERIAL=TOSHIBA_KSDB4U866TE_DTB0QRJR2EIG +E:ID_SERIAL_SHORT=2160A0D5FVGG +E:ID_ATA_WRITE_CACHE=1 +E:ID_ATA_WRITE_CACHE_ENABLED=0 +E:ID_ATA_FEATURE_SET_PM=1 +E:ID_ATA_FEATURE_SET_PM_ENABLED=1 +E:ID_ATA_FEATURE_SET_SECURITY=1 +E:ID_ATA_FEATURE_SET_SECURITY_ENABLED=0 +E:ID_ATA_FEATURE_SET_SECURITY_ERASE_UNIT_MIN=66892 +E:ID_ATA_FEATURE_SET_SECURITY_ENHANCED_ERASE_UNIT_MIN=66892 +E:ID_ATA_FEATURE_SET_SMART=1 +E:ID_ATA_FEATURE_SET_SMART_ENABLED=1 +E:ID_ATA_FEATURE_SET_APM=1 +E:ID_ATA_FEATURE_SET_APM_ENABLED=1 +E:ID_ATA_FEATURE_SET_APM_CURRENT_VALUE=128 +E:ID_ATA_DOWNLOAD_MICROCODE=1 +E:ID_ATA_SATA=1 +E:ID_ATA_SATA_SIGNAL_RATE_GEN2=1 +E:ID_ATA_SATA_SIGNAL_RATE_GEN1=1 +E:ID_ATA_ROTATION_RATE_RPM=7200 +E:ID_WWN=0x7c72382b8de36a64 +E:ID_WWN_WITH_EXTENSION=0x7c72382b8de36a64 +E:ID_PATH=pci-0000:3b:00.0-sas-phy7-lun-0 +E:ID_PATH_TAG=pci-0000_3b_00_0-sas-phy7-lun-0 +E:ID_FS_UUID=cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw +E:ID_FS_UUID_ENC=cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw +E:ID_FS_VERSION=LVM2 001 +E:ID_FS_TYPE=LVM2_member +E:ID_FS_USAGE=raid +E:SCSI_TPGS=0 +E:SCSI_TYPE=disk +E:SCSI_VENDOR=ATA +E:SCSI_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 +E:SCSI_MODEL=TOSHIBA_KSDB4U86 +E:SCSI_MODEL_ENC=TOSHIBA\x20KSDB4U86 +E:SCSI_REVISION=0102 +E:ID_SCSI=1 +E:ID_SCSI_INQUIRY=1 +E:ID_VENDOR=ATA +E:ID_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 +E:SCSI_IDENT_SERIAL=2160A0D5FVGG +E:SCSI_IDENT_LUN_NAA_REG=7c72382b8de36a64 +E:SYSTEMD_READY=1 +E:SYSTEMD_ALIAS=/dev/block/8:0 +E:SYSTEMD_WANTS=lvm2-pvscan@8:0.service +G:systemd +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b8:16 +Lines: 62 +S:disk/by-id/scsi-3e1b87abbb16bd84e +S:disk/by-id/wwn-0xe1b87abbb16bd84e +S:disk/by-path/pci-0000:00:1f.2-ata-1 +S:disk/by-id/scsi-0ATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E +S:disk/by-id/scsi-SATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E +S:disk/by-id/scsi-1ATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E +S:disk/by-id/ata-SuperMicro_SSD_SMC0E1B87ABBB16BD84E +W:58 +I:147686920 +E:ID_ATA=1 +E:ID_TYPE=disk +E:ID_BUS=ata +E:ID_MODEL=SuperMicro_SSD +E:ID_MODEL_ENC=SuperMicro\x20SSD\x20\x20 +E:ID_REVISION=0R +E:ID_SERIAL=SuperMicro_SSD_SMC0E1B87ABBB16BD84E +E:ID_SERIAL_SHORT=SMC0E1B87ABBB16BD84E +E:ID_ATA_WRITE_CACHE=1 +E:ID_ATA_WRITE_CACHE_ENABLED=1 +E:ID_ATA_FEATURE_SET_HPA=1 +E:ID_ATA_FEATURE_SET_HPA_ENABLED=1 +E:ID_ATA_FEATURE_SET_PM=1 +E:ID_ATA_FEATURE_SET_PM_ENABLED=1 +E:ID_ATA_FEATURE_SET_SECURITY=1 +E:ID_ATA_FEATURE_SET_SECURITY_ENABLED=0 +E:ID_ATA_FEATURE_SET_SECURITY_ERASE_UNIT_MIN=4 +E:ID_ATA_FEATURE_SET_SECURITY_ENHANCED_ERASE_UNIT_MIN=4 +E:ID_ATA_FEATURE_SET_SMART=1 +E:ID_ATA_FEATURE_SET_SMART_ENABLED=1 +E:ID_ATA_FEATURE_SET_AAM=1 +E:ID_ATA_FEATURE_SET_AAM_ENABLED=0 +E:ID_ATA_FEATURE_SET_AAM_VENDOR_RECOMMENDED_VALUE=0 +E:ID_ATA_FEATURE_SET_AAM_CURRENT_VALUE=0 +E:ID_ATA_DOWNLOAD_MICROCODE=1 +E:ID_ATA_SATA=1 +E:ID_ATA_SATA_SIGNAL_RATE_GEN2=1 +E:ID_ATA_SATA_SIGNAL_RATE_GEN1=1 +E:ID_ATA_ROTATION_RATE_RPM=0 +E:ID_WWN=0xe1b87abbb16bd84e +E:ID_WWN_WITH_EXTENSION=0xe1b87abbb16bd84e +E:ID_PATH=pci-0000:00:1f.2-ata-1 +E:ID_PATH_TAG=pci-0000_00_1f_2-ata-1 +E:ID_PART_TABLE_UUID=45980145-24e2-4302-a7f0-364c68cfaf59 +E:ID_PART_TABLE_TYPE=gpt +E:SCSI_TPGS=0 +E:SCSI_TYPE=disk +E:SCSI_VENDOR=ATA +E:SCSI_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 +E:SCSI_MODEL=SuperMicro_SSD +E:SCSI_MODEL_ENC=SuperMicro\x20SSD\x20\x20 +E:SCSI_REVISION=0R +E:ID_SCSI=1 +E:ID_SCSI_INQUIRY=1 +E:ID_VENDOR=ATA +E:ID_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 +E:SCSI_IDENT_SERIAL=SMC0E1B87ABBB16BD84E +E:SCSI_IDENT_LUN_VENDOR=SMC0E1B87ABBB16BD84E +E:SCSI_IDENT_LUN_T10=ATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E +E:SCSI_IDENT_LUN_ATA=SuperMicro_SSD_SMC0E1B87ABBB16BD84E +E:SCSI_IDENT_LUN_NAA_REG=e1b87abbb16bd84e +E:ID_FS_TYPE= +G:systemd +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: udev/data/b8:32 +Lines: 62 +S:disk/by-path/pci-0000:00:1f.2-ata-4 +S:disk/by-id/scsi-SATA_INTEL_SSDS9X9SI0_3EWB5Y25CWQWA7EH1U +S:disk/by-id/scsi-0ATA_INTEL_SSDS9X9SI0_3EWB5Y25CWQWA7EH1U +S:disk/by-id/scsi-1ATA_INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U +S:disk/by-id/lvm-pv-uuid-QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb +S:disk/by-id/ata-INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U +S:disk/by-id/scsi-358907ddc573a5de +S:disk/by-id/wwn-0x58907ddc573a5de +W:10 +I:145572852 +E:ID_ATA=1 +E:ID_TYPE=disk +E:ID_BUS=ata +E:ID_MODEL=INTEL_SSDS9X9SI0 +E:ID_MODEL_ENC=INTEL\x20SSDS9X9SI0 +E:ID_REVISION=0100 +E:ID_SERIAL=INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U +E:ID_SERIAL_SHORT=3EWB5Y25CWQWA7EH1U +E:ID_ATA_WRITE_CACHE=1 +E:ID_ATA_WRITE_CACHE_ENABLED=0 +E:ID_ATA_FEATURE_SET_PM=1 +E:ID_ATA_FEATURE_SET_PM_ENABLED=1 +E:ID_ATA_FEATURE_SET_SECURITY=1 +E:ID_ATA_FEATURE_SET_SECURITY_ENABLED=0 +E:ID_ATA_FEATURE_SET_SECURITY_ERASE_UNIT_MIN=4 +E:ID_ATA_FEATURE_SET_SECURITY_ENHANCED_ERASE_UNIT_MIN=4 +E:ID_ATA_FEATURE_SET_SMART=1 +E:ID_ATA_FEATURE_SET_SMART_ENABLED=1 +E:ID_ATA_DOWNLOAD_MICROCODE=1 +E:ID_ATA_SATA=1 +E:ID_ATA_SATA_SIGNAL_RATE_GEN2=1 +E:ID_ATA_SATA_SIGNAL_RATE_GEN1=1 +E:ID_ATA_ROTATION_RATE_RPM=0 +E:ID_WWN=0x58907ddc573a5de +E:ID_WWN_WITH_EXTENSION=0x58907ddc573a5de +E:ID_PATH=pci-0000:00:1f.2-ata-4 +E:ID_PATH_TAG=pci-0000_00_1f_2-ata-4 +E:ID_FS_UUID=QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb +E:ID_FS_UUID_ENC=QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb +E:ID_FS_VERSION=LVM2 001 +E:ID_FS_TYPE=LVM2_member +E:ID_FS_USAGE=raid +E:SCSI_TPGS=0 +E:SCSI_TYPE=disk +E:SCSI_VENDOR=ATA +E:SCSI_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 +E:SCSI_MODEL=INTEL_SSDS9X9SI0 +E:SCSI_MODEL_ENC=INTEL\x20SSDS9X9SI0 +E:SCSI_REVISION=0100 +E:ID_SCSI=1 +E:ID_SCSI_INQUIRY=1 +E:ID_VENDOR=ATA +E:ID_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 +E:SCSI_IDENT_SERIAL=3EWB5Y25CWQWA7EH1U +E:SCSI_IDENT_LUN_VENDOR=3EWB5Y25CWQWA7EH1U +E:SCSI_IDENT_LUN_T10=ATA_INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U +E:SCSI_IDENT_LUN_ATA=INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U +E:SCSI_IDENT_LUN_NAA_REG=58907ddc573a5de +E:SYSTEMD_READY=1 +E:SYSTEMD_ALIAS=/dev/block/8:32 +E:SYSTEMD_WANTS=lvm2-pvscan@8:32.service +G:systemd +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/collector/fixtures/usr/lib/os-release b/collector/fixtures/usr/lib/os-release new file mode 100644 index 0000000000..f228f22264 --- /dev/null +++ b/collector/fixtures/usr/lib/os-release @@ -0,0 +1,12 @@ +NAME="Ubuntu" +VERSION="20.04.2 LTS (Focal Fossa)" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 20.04.2 LTS" +VERSION_ID="20.04" +HOME_URL="https://www.ubuntu.com/" +SUPPORT_URL="https://help.ubuntu.com/" +BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" +PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" +VERSION_CODENAME=focal +UBUNTU_CODENAME=focal diff --git a/collector/fixtures_bindmount/proc/1/mountinfo b/collector/fixtures_bindmount/proc/1/mountinfo new file mode 100644 index 0000000000..e4f1bcc549 --- /dev/null +++ b/collector/fixtures_bindmount/proc/1/mountinfo @@ -0,0 +1,6 @@ +29 1 259:0 / /host rw,seclabel,relatime,data=ordered shared:1 - ext4 /dev/nvme1n0 rw +30 1 260:0 / /host/media/volume1 rw,seclabel,relatime,data=ordered shared:1 - ext4 /dev/nvme1n1 rw +31 1 261:0 / /host/media/volume2 rw,seclabel,relatime,data=ordered shared:1 - ext4 /dev/nvme1n2 rw +31 26 0:26 / /dev/shm rw,nosuid,nodev shared:4 - tmpfs tmpfs rw,inode64 +32 28 0:27 / /run/lock rw,nosuid,nodev,noexec,relatime shared:6 - tmpfs tmpfs rw,size=5120k,inode64 +33 24 0:28 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime shared:9 - cgroup2 cgroup2 rw diff --git a/collector/fixtures_bindmount/proc/mounts b/collector/fixtures_bindmount/proc/mounts deleted file mode 100644 index 32f9567e98..0000000000 --- a/collector/fixtures_bindmount/proc/mounts +++ /dev/null @@ -1,6 +0,0 @@ -/dev/nvme1n0 /host ext4 rw,seclabel,relatime,data=ordered 0 0 -/dev/nvme1n1 /host/media/volume1 ext4 rw,seclabel,relatime,data=ordered 0 0 -/dev/nvme1n2 /host/media/volume2 ext4 rw,seclabel,relatime,data=ordered 0 0 -tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0 -tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 -tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0 diff --git a/collector/fixtures_hidepid/proc/mounts b/collector/fixtures_hidepid/proc/mounts deleted file mode 100644 index fb6a96359c..0000000000 --- a/collector/fixtures_hidepid/proc/mounts +++ /dev/null @@ -1 +0,0 @@ -rootfs / rootfs rw 0 0 diff --git a/collector/fixtures_hidepid/proc/self/mountinfo b/collector/fixtures_hidepid/proc/self/mountinfo new file mode 100644 index 0000000000..dde22f67af --- /dev/null +++ b/collector/fixtures_hidepid/proc/self/mountinfo @@ -0,0 +1 @@ +29 1 259:2 / / rw,relatime shared:1 - ext4 /dev/nvme0n1p2 rw,errors=remount-ro diff --git a/collector/helper.go b/collector/helper.go index df5cd26c1c..21dddf887c 100644 --- a/collector/helper.go +++ b/collector/helper.go @@ -14,14 +14,14 @@ package collector import ( - "bytes" - "io/ioutil" + "os" + "regexp" "strconv" "strings" ) func readUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } @@ -32,15 +32,19 @@ func readUintFromFile(path string) (uint64, error) { return value, nil } -// Take a []byte{} and return a string based on null termination. -// This is useful for situations where the OS has returned a null terminated -// string to use. -// If this function happens to receive a byteArray that contains no nulls, we -// simply convert the array to a string with no bounding. -func bytesToString(byteArray []byte) string { - n := bytes.IndexByte(byteArray, 0) - if n < 0 { - return string(byteArray) - } - return string(byteArray[:n]) +var metricNameRegex = regexp.MustCompile(`_*[^0-9A-Za-z_]+_*`) + +// SanitizeMetricName sanitize the given metric name by replacing invalid characters by underscores. +// +// OpenMetrics and the Prometheus exposition format require the metric name +// to consist only of alphanumericals and "_", ":" and they must not start +// with digits. Since colons in MetricFamily are reserved to signal that the +// MetricFamily is the result of a calculation or aggregation of a general +// purpose monitoring system, colons will be replaced as well. +// +// Note: If not subsequently prepending a namespace and/or subsystem (e.g., +// with prometheus.BuildFQName), the caller must ensure that the supplied +// metricName does not begin with a digit. +func SanitizeMetricName(metricName string) string { + return metricNameRegex.ReplaceAllString(metricName, "_") } diff --git a/collector/helper_test.go b/collector/helper_test.go index 0424d480f3..9c1296edda 100644 --- a/collector/helper_test.go +++ b/collector/helper_test.go @@ -17,47 +17,21 @@ import ( "testing" ) -func TestBytesToString(t *testing.T) { - tests := []struct { - name string - b []byte - expected string - }{ - { - "Single null byte", - []byte{0}, - "", - }, - { - "Empty byte array", - []byte{}, - "", - }, - { - "Not null terminated", - []byte{65, 66, 67}, - "ABC", - }, - { - "Null randomly in array", - []byte{65, 66, 67, 0, 65, 0, 65}, - "ABC", - }, - { - "Array starts with null and contains other valid bytes", - []byte{0, 65, 66, 67, 0}, - "", - }, +func TestSanitizeMetricName(t *testing.T) { + testcases := map[string]string{ + "": "", + "rx_errors": "rx_errors", + "Queue[0] AllocFails": "Queue_0_AllocFails", + "Tx LPI entry count": "Tx_LPI_entry_count", + "port.VF_admin_queue_requests": "port_VF_admin_queue_requests", + "[3]: tx_bytes": "_3_tx_bytes", + " err": "_err", } - for _, tt := range tests { - name := tt.name - b := tt.b - result := bytesToString(b) - expected := tt.expected - - if result != expected { - t.Errorf("bytesToString(%#v): Name: %s, expected %#v, got %#v)", b, name, expected, result) + for metricName, expected := range testcases { + got := SanitizeMetricName(metricName) + if expected != got { + t.Errorf("Expected '%s' but got '%s'", expected, got) } } } diff --git a/collector/hwmon_linux.go b/collector/hwmon_linux.go index 261a7c5ae0..fe4ac817aa 100644 --- a/collector/hwmon_linux.go +++ b/collector/hwmon_linux.go @@ -11,26 +11,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nohwmon +//go:build !nohwmon package collector import ( "errors" - "io/ioutil" + "fmt" + "log/slog" "os" "path/filepath" "regexp" + "slices" "strconv" "strings" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) var ( + collectorHWmonChipInclude = kingpin.Flag("collector.hwmon.chip-include", "Regexp of hwmon chip to include (mutually exclusive to device-exclude).").String() + collectorHWmonChipExclude = kingpin.Flag("collector.hwmon.chip-exclude", "Regexp of hwmon chip to exclude (mutually exclusive to device-include).").String() + collectorHWmonSensorInclude = kingpin.Flag("collector.hwmon.sensor-include", "Regexp of hwmon sensor to include (mutually exclusive to sensor-exclude).").String() + collectorHWmonSensorExclude = kingpin.Flag("collector.hwmon.sensor-exclude", "Regexp of hwmon sensor to exclude (mutually exclusive to sensor-include).").String() + hwmonInvalidMetricChars = regexp.MustCompile("[^a-z0-9:_]") hwmonFilenameFormat = regexp.MustCompile(`^(?P[^0-9]+)(?P[0-9]*)?(_(?P.+))?$`) hwmonLabelDesc = []string{"chip", "sensor"} @@ -38,7 +44,7 @@ var ( hwmonSensorTypes = []string{ "vrm", "beep_enable", "update_interval", "in", "cpu", "fan", "pwm", "temp", "curr", "power", "energy", "humidity", - "intrusion", + "intrusion", "freq", } ) @@ -47,13 +53,20 @@ func init() { } type hwMonCollector struct { - logger log.Logger + deviceFilter deviceFilter + sensorFilter deviceFilter + logger *slog.Logger } // NewHwMonCollector returns a new Collector exposing /sys/class/hwmon stats // (similar to lm-sensors). -func NewHwMonCollector(logger log.Logger) (Collector, error) { - return &hwMonCollector{logger}, nil +func NewHwMonCollector(logger *slog.Logger) (Collector, error) { + + return &hwMonCollector{ + logger: logger, + deviceFilter: newDeviceFilter(*collectorHWmonChipExclude, *collectorHWmonChipInclude), + sensorFilter: newDeviceFilter(*collectorHWmonSensorExclude, *collectorHWmonSensorInclude), + }, nil } func cleanMetricName(name string) string { @@ -77,7 +90,7 @@ func addValueFile(data map[string]map[string]string, sensor string, prop string, data[sensor][prop] = value } -// sysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// sysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. func sysReadFile(file string) ([]byte, error) { f, err := os.Open(file) if err != nil { @@ -86,7 +99,7 @@ func sysReadFile(file string) ([]byte, error) { defer f.Close() // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. + // Go's os.ReadFile implementation to poll forever. // // Since we either want to read data or bail immediately, do the simplest // possible read using system call directly. @@ -95,6 +108,9 @@ func sysReadFile(file string) ([]byte, error) { if err != nil { return nil, err } + if n < 0 { + return nil, fmt.Errorf("failed to read file: %q, read returned negative bytes value: %d", file, n) + } return b[:n], nil } @@ -127,7 +143,7 @@ func explodeSensorFilename(filename string) (ok bool, sensorType string, sensorN } func collectSensorData(dir string, data map[string]map[string]string) error { - sensorFiles, dirError := ioutil.ReadDir(dir) + sensorFiles, dirError := os.ReadDir(dir) if dirError != nil { return dirError } @@ -138,11 +154,8 @@ func collectSensorData(dir string, data map[string]map[string]string) error { continue } - for _, t := range hwmonSensorTypes { - if t == sensorType { - addValueFile(data, sensorType+strconv.Itoa(sensorNum), sensorProperty, filepath.Join(dir, file.Name())) - break - } + if slices.Contains(hwmonSensorTypes, sensorType) { + addValueFile(data, sensorType+strconv.Itoa(sensorNum), sensorProperty, filepath.Join(dir, file.Name())) } } return nil @@ -154,6 +167,11 @@ func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) er return err } + if c.deviceFilter.ignored(hwmonName) { + c.logger.Debug("ignoring hwmon chip", "chip", hwmonName) + return nil + } + data := make(map[string]map[string]string) err = collectSensorData(dir, data) if err != nil { @@ -188,16 +206,23 @@ func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) er // Format all sensors. for sensor, sensorData := range data { + // Filtering for sensors is done on concatenated device name and sensor name + // separated by a semicolon. This allows for excluding or including of specific + // sensors on specific devices. For example, to exclude the sensor "temp3" on + // the device "platform_coretemp_0", use "platform_coretemp_0;temp3" + if c.sensorFilter.ignored(hwmonName + ";" + sensor) { + c.logger.Debug("ignoring sensor", "sensor", sensor) + continue + } + _, sensorType, _, _ := explodeSensorFilename(sensor) labels := []string{hwmonName, sensor} if labelText, ok := sensorData["label"]; ok { - label := cleanMetricName(labelText) - if label != "" { - desc := prometheus.NewDesc("node_hwmon_sensor_label", "Label for given chip and sensor", - []string{"chip", "sensor", "label"}, nil) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, 1.0, hwmonName, sensor, label) - } + label := strings.ToValidUTF8(labelText, "�") + desc := prometheus.NewDesc("node_hwmon_sensor_label", "Label for given chip and sensor", + []string{"chip", "sensor", "label"}, nil) + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, 1.0, hwmonName, sensor, label) } if sensorType == "beep_enable" { @@ -329,6 +354,15 @@ func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) er continue } + if sensorType == "freq" && element == "input" { + if label, ok := sensorData["label"]; ok { + sensorLabel := cleanMetricName(label) + desc := prometheus.NewDesc(name+"_freq_mhz", "Hardware monitor for GPU frequency in MHz", hwmonLabelDesc, nil) + ch <- prometheus.MustNewConstMetric( + desc, prometheus.GaugeValue, parsedValue/1000000.0, append(labels[:len(labels)-1], sensorLabel)...) + } + continue + } // fallback, just dump the metric as is desc := prometheus.NewDesc(name, "Hardware monitor "+sensorType+" element "+element, hwmonLabelDesc, nil) @@ -348,7 +382,7 @@ func (c *hwMonCollector) hwmonName(dir string) (string, error) { // However the path of the device has to be stable: // - /sys/devices// // Some hardware monitors have a "name" file that exports a human - // readbale name that can be used. + // readable name that can be used. // human readable names would be bat0 or coretemp, while a path string // could be platform_applesmc.768 @@ -373,7 +407,7 @@ func (c *hwMonCollector) hwmonName(dir string) (string, error) { } // preference 2: is there a name file - sysnameRaw, nameErr := ioutil.ReadFile(filepath.Join(dir, "name")) + sysnameRaw, nameErr := os.ReadFile(filepath.Join(dir, "name")) if nameErr == nil && string(sysnameRaw) != "" { cleanName := cleanMetricName(string(sysnameRaw)) if cleanName != "" { @@ -401,7 +435,7 @@ func (c *hwMonCollector) hwmonName(dir string) (string, error) { // hwmonHumanReadableChipName is similar to the methods in hwmonName, but with // different precedences -- we can allow duplicates here. func (c *hwMonCollector) hwmonHumanReadableChipName(dir string) (string, error) { - sysnameRaw, nameErr := ioutil.ReadFile(filepath.Join(dir, "name")) + sysnameRaw, nameErr := os.ReadFile(filepath.Join(dir, "name")) if nameErr != nil { return "", nameErr } @@ -422,34 +456,39 @@ func (c *hwMonCollector) Update(ch chan<- prometheus.Metric) error { hwmonPathName := filepath.Join(sysFilePath("class"), "hwmon") - hwmonFiles, err := ioutil.ReadDir(hwmonPathName) + hwmonFiles, err := os.ReadDir(hwmonPathName) if err != nil { if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "hwmon collector metrics are not available for this system") + c.logger.Debug("hwmon collector metrics are not available for this system") return ErrNoData } return err } + var lastErr error for _, hwDir := range hwmonFiles { hwmonXPathName := filepath.Join(hwmonPathName, hwDir.Name()) + fileInfo, err := os.Lstat(hwmonXPathName) + if err != nil { + continue + } - if hwDir.Mode()&os.ModeSymlink > 0 { - hwDir, err = os.Stat(hwmonXPathName) + if fileInfo.Mode()&os.ModeSymlink > 0 { + fileInfo, err = os.Stat(hwmonXPathName) if err != nil { continue } } - if !hwDir.IsDir() { + if !fileInfo.IsDir() { continue } - if lastErr := c.updateHwmon(ch, hwmonXPathName); lastErr != nil { - err = lastErr + if err = c.updateHwmon(ch, hwmonXPathName); err != nil { + lastErr = err } } - return err + return lastErr } diff --git a/collector/infiniband_linux.go b/collector/infiniband_linux.go index d93821026e..078047a0d7 100644 --- a/collector/infiniband_linux.go +++ b/collector/infiniband_linux.go @@ -11,19 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !noinfiniband +//go:build !noinfiniband package collector import ( "errors" "fmt" + "log/slog" "os" "strconv" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) @@ -31,7 +29,7 @@ import ( type infinibandCollector struct { fs sysfs.FS metricDescs map[string]*prometheus.Desc - logger log.Logger + logger *slog.Logger subsystem string } @@ -40,7 +38,7 @@ func init() { } // NewInfiniBandCollector returns a new Collector exposing InfiniBand stats. -func NewInfiniBandCollector(logger log.Logger) (Collector, error) { +func NewInfiniBandCollector(logger *slog.Logger) (Collector, error) { var i infinibandCollector var err error @@ -60,8 +58,10 @@ func NewInfiniBandCollector(logger log.Logger) (Collector, error) { "legacy_unicast_packets_transmitted_total": "Number of unicast packets transmitted", "legacy_data_transmitted_bytes_total": "Number of data octets transmitted on all links", "legacy_packets_transmitted_total": "Number of data packets received on all links", + "excessive_buffer_overrun_errors_total": "Number of times that OverrunErrors consecutive flow control update periods occurred, each having at least one overrun error.", "link_downed_total": "Number of times the link failed to recover from an error state and went down", "link_error_recovery_total": "Number of times the link successfully recovered from an error state", + "local_link_integrity_errors_total": "Number of times that the count of local physical errors exceeded the threshold specified by LocalPhyErrors.", "multicast_packets_received_total": "Number of multicast packets received (including errors)", "multicast_packets_transmitted_total": "Number of multicast packets transmitted (including errors)", "physical_state_id": "Physical state of the InfiniBand port (0: no change, 1: sleep, 2: polling, 3: disable, 4: shift, 5: link up, 6: link error recover, 7: phytest)", @@ -79,6 +79,42 @@ func NewInfiniBandCollector(logger log.Logger) (Collector, error) { "state_id": "State of the InfiniBand port (0: no change, 1: down, 2: init, 3: armed, 4: active, 5: act defer)", "unicast_packets_received_total": "Number of unicast packets received (including errors)", "unicast_packets_transmitted_total": "Number of unicast packets transmitted (including errors)", + "port_receive_remote_physical_errors_total": "Number of packets marked with the EBP (End of Bad Packet) delimiter received on the port.", + "port_receive_switch_relay_errors_total": "Number of packets that could not be forwarded by the switch.", + "symbol_error_total": "Number of minor link errors detected on one or more physical lanes.", + "vl15_dropped_total": "Number of incoming VL15 packets dropped due to resource limitations.", + + // https://enterprise-support.nvidia.com/s/article/understanding-mlx5-linux-counters-and-status-parameters + "duplicate_requests_packets_total": "The number of received packets. A duplicate request is a request that had been previously executed.", + "implied_nak_seq_errors_total": "The number of time the requested decided an ACK. with a PSN larger than the expected PSN for an RDMA read or response.", + "lifespan_seconds": "The maximum period in ms which defines the aging of the counter reads. Two consecutive reads within this period might return the same values.", + "local_ack_timeout_errors_total": "The number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side. The QP retry limit was not exceed, therefore it is still recoverable error.", + "np_cnp_packets_sent_total": "The number of CNP packets sent by the Notification Point when it noticed congestion experienced in the RoCEv2 IP header (ECN bits). The counters was added in MLNX_OFED 4.1", + "np_ecn_marked_roce_packets_received_total": "The number of RoCEv2 packets received by the notification point which were marked for experiencing the congestion (ECN bits where '11' on the ingress RoCE traffic) . The counters was added in MLNX_OFED 4.1", + "out_of_buffer_drops_total": "The number of drops occurred due to lack of WQE for the associated QPs.", + "out_of_sequence_packets_received_total": "The number of out of sequence packets received.", + "packet_sequence_errors_total": "The number of received NAK sequence error packets. The QP retry limit was not exceeded.", + "req_cqes_errors_total": "The number of times requester detected CQEs completed with errors. The counters was added in MLNX_OFED 4.1", + "req_cqes_flush_errors_total": "The number of times requester detected CQEs completed with flushed errors. The counters was added in MLNX_OFED 4.1", + "req_remote_access_errors_total": "The number of times requester detected remote access errors. The counters was added in MLNX_OFED 4.1", + "req_remote_invalid_request_errors_total": "The number of times requester detected remote invalid request errors. The counters was added in MLNX_OFED 4.1", + "resp_cqes_errors_total": "The number of times responder detected CQEs completed with errors. The counters was added in MLNX_OFED 4.1", + "resp_cqes_flush_errors_total": "The number of times responder detected CQEs completed with flushed errors. The counters was added in MLNX_OFED 4.1", + "resp_local_length_errors_total": "The number of times responder detected local length errors. The counters was added in MLNX_OFED 4.1", + "resp_remote_access_errors_total": "The number of times responder detected remote access errors. The counters was added in MLNX_OFED 4.1", + "rnr_nak_retry_packets_received_total": "The number of received RNR NAK packets. The QP retry limit was not exceeded.", + "roce_adp_retransmits_total": "The number of adaptive retransmissions for RoCE traffic. The counter was added in MLNX_OFED rev 5.0-1.0.0.0 and kernel v5.6.0", + "roce_adp_retransmits_timeout_total": "The number of times RoCE traffic reached timeout due to adaptive retransmission. The counter was added in MLNX_OFED rev 5.0-1.0.0.0 and kernel v5.6.0", + "roce_slow_restart_used_total": "The number of times RoCE slow restart was used. The counter was added in MLNX_OFED rev 5.0-1.0.0.0 and kernel v5.6.0", + "roce_slow_restart_cnps_total": "The number of times RoCE slow restart generated CNP packets. The counter was added in MLNX_OFED rev 5.0-1.0.0.0 and kernel v5.6.0", + "roce_slow_restart_total": "The number of times RoCE slow restart changed state to slow restart. The counter was added in MLNX_OFED rev 5.0-1.0.0.0 and kernel v5.6.0", + "rp_cnp_packets_handled_total": "The number of CNP packets handled by the Reaction Point HCA to throttle the transmission rate. The counters was added in MLNX_OFED 4.1", + "rp_cnp_ignored_packets_received_total": "The number of CNP packets received and ignored by the Reaction Point HCA. This counter should not raise if RoCE Congestion Control was enabled in the network. If this counter raise, verify that ECN was enabled on the adapter.", + "rx_atomic_requests_total": "The number of received ATOMIC request for the associated QPs.", + "rx_dct_connect_requests_total": "The number of received connection requests for the associated DCTs.", + "rx_read_requests_total": "The number of received READ requests for the associated QPs.", + "rx_write_requests_total": "The number of received WRITE requests for the associated QPs.", + "rx_icrc_encapsulated_errors_total": "The number of RoCE packets with ICRC errors. This counter was added in MLNX_OFED 4.4 and kernel 4.19", } i.metricDescs = make(map[string]*prometheus.Desc) @@ -110,7 +146,7 @@ func (c *infinibandCollector) Update(ch chan<- prometheus.Metric) error { devices, err := c.fs.InfiniBandClass() if err != nil { if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "infiniband statistics not found, skipping") + c.logger.Debug("infiniband statistics not found, skipping") return ErrNoData } return fmt.Errorf("error obtaining InfiniBand class info: %w", err) @@ -141,8 +177,10 @@ func (c *infinibandCollector) Update(ch chan<- prometheus.Metric) error { c.pushCounter(ch, "legacy_unicast_packets_transmitted_total", port.Counters.LegacyPortUnicastXmitPackets, port.Name, portStr) c.pushCounter(ch, "legacy_data_transmitted_bytes_total", port.Counters.LegacyPortXmitData64, port.Name, portStr) c.pushCounter(ch, "legacy_packets_transmitted_total", port.Counters.LegacyPortXmitPackets64, port.Name, portStr) + c.pushCounter(ch, "excessive_buffer_overrun_errors_total", port.Counters.ExcessiveBufferOverrunErrors, port.Name, portStr) c.pushCounter(ch, "link_downed_total", port.Counters.LinkDowned, port.Name, portStr) c.pushCounter(ch, "link_error_recovery_total", port.Counters.LinkErrorRecovery, port.Name, portStr) + c.pushCounter(ch, "local_link_integrity_errors_total", port.Counters.LocalLinkIntegrityErrors, port.Name, portStr) c.pushCounter(ch, "multicast_packets_received_total", port.Counters.MulticastRcvPackets, port.Name, portStr) c.pushCounter(ch, "multicast_packets_transmitted_total", port.Counters.MulticastXmitPackets, port.Name, portStr) c.pushCounter(ch, "port_constraint_errors_received_total", port.Counters.PortRcvConstraintErrors, port.Name, portStr) @@ -157,6 +195,45 @@ func (c *infinibandCollector) Update(ch chan<- prometheus.Metric) error { c.pushCounter(ch, "port_transmit_wait_total", port.Counters.PortXmitWait, port.Name, portStr) c.pushCounter(ch, "unicast_packets_received_total", port.Counters.UnicastRcvPackets, port.Name, portStr) c.pushCounter(ch, "unicast_packets_transmitted_total", port.Counters.UnicastXmitPackets, port.Name, portStr) + c.pushCounter(ch, "port_receive_remote_physical_errors_total", port.Counters.PortRcvRemotePhysicalErrors, port.Name, portStr) + c.pushCounter(ch, "port_receive_switch_relay_errors_total", port.Counters.PortRcvSwitchRelayErrors, port.Name, portStr) + c.pushCounter(ch, "symbol_error_total", port.Counters.SymbolError, port.Name, portStr) + c.pushCounter(ch, "vl15_dropped_total", port.Counters.VL15Dropped, port.Name, portStr) + + // port.HwCounters + if port.HwCounters.Lifespan != nil { + c.pushMetric(ch, "lifespan_seconds", *(port.HwCounters.Lifespan)/1000, port.Name, portStr, prometheus.GaugeValue) + } + + c.pushCounter(ch, "duplicate_requests_packets_total", port.HwCounters.DuplicateRequest, port.Name, portStr) + c.pushCounter(ch, "implied_nak_seq_errors_total", port.HwCounters.ImpliedNakSeqErr, port.Name, portStr) + c.pushCounter(ch, "local_ack_timeout_errors_total", port.HwCounters.LocalAckTimeoutErr, port.Name, portStr) + c.pushCounter(ch, "np_cnp_packets_sent_total", port.HwCounters.NpCnpSent, port.Name, portStr) + c.pushCounter(ch, "np_ecn_marked_roce_packets_received_total", port.HwCounters.NpEcnMarkedRocePackets, port.Name, portStr) + c.pushCounter(ch, "out_of_buffer_drops_total", port.HwCounters.OutOfBuffer, port.Name, portStr) + c.pushCounter(ch, "out_of_sequence_packets_received_total", port.HwCounters.OutOfSequence, port.Name, portStr) + c.pushCounter(ch, "packet_sequence_errors_total", port.HwCounters.PacketSeqErr, port.Name, portStr) + c.pushCounter(ch, "req_cqes_errors_total", port.HwCounters.ReqCqeError, port.Name, portStr) + c.pushCounter(ch, "req_cqes_flush_errors_total", port.HwCounters.ReqCqeFlushError, port.Name, portStr) + c.pushCounter(ch, "req_remote_access_errors_total", port.HwCounters.ReqRemoteAccessErrors, port.Name, portStr) + c.pushCounter(ch, "req_remote_invalid_request_errors_total", port.HwCounters.ReqRemoteInvalidRequest, port.Name, portStr) + c.pushCounter(ch, "resp_cqes_errors_total", port.HwCounters.RespCqeError, port.Name, portStr) + c.pushCounter(ch, "resp_cqes_flush_errors_total", port.HwCounters.RespCqeFlushError, port.Name, portStr) + c.pushCounter(ch, "resp_local_length_errors_total", port.HwCounters.RespLocalLengthError, port.Name, portStr) + c.pushCounter(ch, "resp_remote_access_errors_total", port.HwCounters.RespRemoteAccessErrors, port.Name, portStr) + c.pushCounter(ch, "rnr_nak_retry_packets_received_total", port.HwCounters.RnrNakRetryErr, port.Name, portStr) + c.pushCounter(ch, "roce_adp_retransmits_total", port.HwCounters.RoceAdpRetrans, port.Name, portStr) + c.pushCounter(ch, "roce_adp_retransmits_timeout_total", port.HwCounters.RoceAdpRetransTo, port.Name, portStr) + c.pushCounter(ch, "roce_slow_restart_used_total", port.HwCounters.RoceSlowRestart, port.Name, portStr) + c.pushCounter(ch, "roce_slow_restart_cnps_total", port.HwCounters.RoceSlowRestartCnps, port.Name, portStr) + c.pushCounter(ch, "roce_slow_restart_total", port.HwCounters.RoceSlowRestartTrans, port.Name, portStr) + c.pushCounter(ch, "rp_cnp_packets_handled_total", port.HwCounters.RpCnpHandled, port.Name, portStr) + c.pushCounter(ch, "rp_cnp_ignored_packets_received_total", port.HwCounters.RpCnpIgnored, port.Name, portStr) + c.pushCounter(ch, "rx_atomic_requests_total", port.HwCounters.RxAtomicRequests, port.Name, portStr) + c.pushCounter(ch, "rx_dct_connect_requests_total", port.HwCounters.RxDctConnect, port.Name, portStr) + c.pushCounter(ch, "rx_read_requests_total", port.HwCounters.RxReadRequests, port.Name, portStr) + c.pushCounter(ch, "rx_write_requests_total", port.HwCounters.RxWriteRequests, port.Name, portStr) + c.pushCounter(ch, "rx_icrc_encapsulated_errors_total", port.HwCounters.RxIcrcEncapsulated, port.Name, portStr) } } diff --git a/collector/interrupts_common.go b/collector/interrupts_common.go index e681cfc5c9..4e4bad6ff1 100644 --- a/collector/interrupts_common.go +++ b/collector/interrupts_common.go @@ -11,33 +11,44 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux openbsd -// +build !nointerrupts +//go:build (linux || openbsd) && !nointerrupts package collector import ( - "github.com/go-kit/kit/log" + "log/slog" + + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" ) type interruptsCollector struct { - desc typedDesc - logger log.Logger + desc typedDesc + logger *slog.Logger + nameFilter deviceFilter + includeZeros bool } func init() { registerCollector("interrupts", defaultDisabled, NewInterruptsCollector) } +var ( + interruptsInclude = kingpin.Flag("collector.interrupts.name-include", "Regexp of interrupts name to include (mutually exclusive to --collector.interrupts.name-exclude).").String() + interruptsExclude = kingpin.Flag("collector.interrupts.name-exclude", "Regexp of interrupts name to exclude (mutually exclusive to --collector.interrupts.name-include).").String() + interruptsIncludeZeros = kingpin.Flag("collector.interrupts.include-zeros", "Include interrupts that have a zero value").Default("true").Bool() +) + // NewInterruptsCollector returns a new Collector exposing interrupts stats. -func NewInterruptsCollector(logger log.Logger) (Collector, error) { +func NewInterruptsCollector(logger *slog.Logger) (Collector, error) { return &interruptsCollector{ desc: typedDesc{prometheus.NewDesc( namespace+"_interrupts_total", "Interrupt details.", interruptLabelNames, nil, ), prometheus.CounterValue}, - logger: logger, + logger: logger, + nameFilter: newDeviceFilter(*interruptsExclude, *interruptsInclude), + includeZeros: *interruptsIncludeZeros, }, nil } diff --git a/collector/interrupts_linux.go b/collector/interrupts_linux.go index 5fcbebc95d..d70b0e042f 100644 --- a/collector/interrupts_linux.go +++ b/collector/interrupts_linux.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nointerrupts +//go:build !nointerrupts package collector @@ -38,10 +38,19 @@ func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) (err error) { } for name, interrupt := range interrupts { for cpuNo, value := range interrupt.values { + filterName := name + ";" + interrupt.info + ";" + interrupt.devices + if c.nameFilter.ignored(filterName) { + c.logger.Debug("ignoring interrupt name", "filter_name", filterName) + continue + } fv, err := strconv.ParseFloat(value, 64) if err != nil { return fmt.Errorf("invalid value %s in interrupts: %w", value, err) } + if !c.includeZeros && fv == 0.0 { + c.logger.Debug("ignoring interrupt with zero value", "filter_name", filterName, "cpu", cpuNo) + continue + } ch <- c.desc.mustNewConstMetric(fv, strconv.Itoa(cpuNo), name, interrupt.info, interrupt.devices) } } @@ -76,22 +85,29 @@ func parseInterrupts(r io.Reader) (map[string]interrupt, error) { cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - if len(parts) < cpuNum+2 { // irq + one column per cpu + details, - continue // we ignore ERR and MIS for now - } - intName := parts[0][:len(parts[0])-1] // remove trailing : - intr := interrupt{ - values: parts[1 : cpuNum+1], - } + // On aarch64 there can be zero space between the name/label + // and the values, so we need to split on `:` before using + // strings.Fields() to split on fields. + group := strings.SplitN(scanner.Text(), ":", 2) + if len(group) > 1 { + parts := strings.Fields(group[1]) - if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt - intr.info = parts[cpuNum+1] - intr.devices = strings.Join(parts[cpuNum+2:], " ") - } else { - intr.info = strings.Join(parts[cpuNum+1:], " ") + if len(parts) < cpuNum+1 { // irq + one column per cpu + details, + continue // we ignore ERR and MIS for now + } + intName := strings.TrimLeft(group[0], " ") + intr := interrupt{ + values: parts[0:cpuNum], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.info = parts[cpuNum] + intr.devices = strings.Join(parts[cpuNum+1:], " ") + } else { + intr.info = strings.Join(parts[cpuNum:], " ") + } + interrupts[intName] = intr } - interrupts[intName] = intr } return interrupts, scanner.Err() diff --git a/collector/interrupts_linux_test.go b/collector/interrupts_linux_test.go index 02acb896a0..d920916524 100644 --- a/collector/interrupts_linux_test.go +++ b/collector/interrupts_linux_test.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nointerrupts + package collector import ( @@ -31,10 +33,39 @@ func TestInterrupts(t *testing.T) { } if want, got := "5031", interrupts["NMI"].values[1]; want != got { - t.Errorf("want interrupts %s, got %s", want, got) + t.Errorf("want interrupts value %s, got %s", want, got) } if want, got := "4968", interrupts["NMI"].values[3]; want != got { - t.Errorf("want interrupts %s, got %s", want, got) + t.Errorf("want interrupts value %s, got %s", want, got) + } + + if want, got := "IR-IO-APIC-edge", interrupts["12"].info; want != got { + t.Errorf("want interrupts info %s, got %s", want, got) + } + + if want, got := "i8042", interrupts["12"].devices; want != got { + t.Errorf("want interrupts devices %s, got %s", want, got) + } + +} + +// https://github.com/prometheus/node_exporter/issues/2557 +// On aarch64 the interrupts file can have zero spaces between the label of +// the row and the first value if the value is large +func TestInterruptsArm(t *testing.T) { + file, err := os.Open("fixtures/proc/interrupts_aarch64") + if err != nil { + t.Fatal(err) + } + defer file.Close() + + interrupts, err := parseInterrupts(file) + if err != nil { + t.Fatal(err) + } + + if _, ok := interrupts["IPI0"]; !ok { + t.Errorf("IPI0 label not found in interrupts") } } diff --git a/collector/interrupts_openbsd.go b/collector/interrupts_openbsd.go index bf38fe38d7..b9ab0d906d 100644 --- a/collector/interrupts_openbsd.go +++ b/collector/interrupts_openbsd.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nointerrupts +//go:build !nointerrupts && !amd64 package collector @@ -105,10 +105,20 @@ func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error { } for dev, interrupt := range interrupts { for cpuNo, value := range interrupt.values { + interruptType := fmt.Sprintf("%d", interrupt.vector) + filterName := interruptType + ";" + dev + if c.nameFilter.ignored(filterName) { + c.logger.Debug("ignoring interrupt name", "filter_name", filterName) + continue + } + if !c.includeZeros && value == 0.0 { + c.logger.Debug("ignoring interrupt with zero value", "filter_name", filterName, "cpu", cpuNo) + continue + } ch <- c.desc.mustNewConstMetric( value, strconv.Itoa(cpuNo), - strconv.Itoa(interrupt.vector), + interruptType, dev, ) } diff --git a/collector/interrupts_openbsd_amd64.go b/collector/interrupts_openbsd_amd64.go new file mode 100644 index 0000000000..5f40449696 --- /dev/null +++ b/collector/interrupts_openbsd_amd64.go @@ -0,0 +1,121 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nointerrupts + +package collector + +import ( + "fmt" + "strconv" + "unsafe" + + "github.com/prometheus/node_exporter/collector/utils" + + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +const ( + KERN_INTRCNT = 63 + KERN_INTRCNT_NUM = 1 + KERN_INTRCNT_CNT = 2 + KERN_INTRCNT_NAME = 3 + KERN_INTRCNT_VECTOR = 4 +) + +func nintr() _C_int { + mib := [3]_C_int{unix.CTL_KERN, KERN_INTRCNT, KERN_INTRCNT_NUM} + buf, err := sysctl(mib[:]) + if err != nil { + return 0 + } + return *(*_C_int)(unsafe.Pointer(&buf[0])) +} + +func intr(idx _C_int) (itr interrupt, err error) { + mib := [4]_C_int{unix.CTL_KERN, KERN_INTRCNT, KERN_INTRCNT_NAME, idx} + buf, err := sysctl(mib[:]) + if err != nil { + return + } + dev := *(*[128]byte)(unsafe.Pointer(&buf[0])) + itr.device = utils.SafeBytesToString(dev[:]) + + mib[2] = KERN_INTRCNT_VECTOR + buf, err = sysctl(mib[:]) + if err != nil { + return + } + itr.vector = *(*int)(unsafe.Pointer(&buf[0])) + + mib[2] = KERN_INTRCNT_CNT + buf, err = sysctl(mib[:]) + if err != nil { + return + } + count := *(*uint64)(unsafe.Pointer(&buf[0])) + itr.values = []float64{float64(count)} + return +} + +var interruptLabelNames = []string{"cpu", "type", "devices"} + +func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error { + interrupts, err := getInterrupts() + if err != nil { + return fmt.Errorf("couldn't get interrupts: %s", err) + } + for dev, interrupt := range interrupts { + for cpuNo, value := range interrupt.values { + interruptType := fmt.Sprintf("%d", interrupt.vector) + filterName := interruptType + ";" + dev + if c.nameFilter.ignored(filterName) { + c.logger.Debug("ignoring interrupt name", "filter_name", filterName) + continue + } + if !c.includeZeros && value == 0.0 { + c.logger.Debug("ignoring interrupt with zero value", "filter_name", filterName, "cpu", cpuNo) + continue + } + ch <- c.desc.mustNewConstMetric( + value, + strconv.Itoa(cpuNo), + interruptType, + dev, + ) + } + } + return nil +} + +type interrupt struct { + vector int + device string + values []float64 +} + +func getInterrupts() (map[string]interrupt, error) { + var interrupts = map[string]interrupt{} + n := nintr() + + for i := _C_int(0); i < n; i++ { + itr, err := intr(i) + if err != nil { + return nil, err + } + interrupts[itr.device] = itr + } + + return interrupts, nil +} diff --git a/collector/ipvs_linux.go b/collector/ipvs_linux.go index c2e9d709f8..2ad30e07e9 100644 --- a/collector/ipvs_linux.go +++ b/collector/ipvs_linux.go @@ -11,23 +11,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noipvs +//go:build !noipvs package collector import ( "errors" "fmt" + "log/slog" "os" "sort" "strconv" "strings" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" - kingpin "gopkg.in/alecthomas/kingpin.v2" ) type ipvsCollector struct { @@ -36,7 +35,7 @@ type ipvsCollector struct { backendLabels []string backendConnectionsActive, backendConnectionsInact, backendWeight typedDesc connections, incomingPackets, outgoingPackets, incomingBytes, outgoingBytes typedDesc - logger log.Logger + logger *slog.Logger } type ipvsBackendStatus struct { @@ -72,11 +71,11 @@ func init() { // NewIPVSCollector sets up a new collector for IPVS metrics. It accepts the // "procfs" config parameter to override the default proc location (/proc). -func NewIPVSCollector(logger log.Logger) (Collector, error) { +func NewIPVSCollector(logger *slog.Logger) (Collector, error) { return newIPVSCollector(logger) } -func newIPVSCollector(logger log.Logger) (*ipvsCollector, error) { +func newIPVSCollector(logger *slog.Logger) (*ipvsCollector, error) { var ( c ipvsCollector err error @@ -142,7 +141,7 @@ func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error { if err != nil { // Cannot access ipvs metrics, report no error. if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "ipvs collector metrics are not available for this system") + c.logger.Debug("ipvs collector metrics are not available for this system") return ErrNoData } return fmt.Errorf("could not get IPVS stats: %w", err) diff --git a/collector/ipvs_linux_test.go b/collector/ipvs_linux_test.go index 43c68395b4..b6870d8052 100644 --- a/collector/ipvs_linux_test.go +++ b/collector/ipvs_linux_test.go @@ -11,22 +11,24 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noipvs + package collector import ( "errors" "fmt" - "io/ioutil" + "io" + "log/slog" "net/http" "net/http/httptest" + "os" "strings" "testing" - "github.com/go-kit/kit/log" - + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "gopkg.in/alecthomas/kingpin.v2" ) func TestIPVSCollector(t *testing.T) { @@ -111,7 +113,7 @@ func TestIPVSCollector(t *testing.T) { if _, err := kingpin.CommandLine.Parse(args); err != nil { t.Fatal(err) } - collector, err := newIPVSCollector(log.NewNopLogger()) + collector, err := newIPVSCollector(slog.New(slog.NewTextHandler(io.Discard, nil))) if err != nil { if test.err == nil { t.Fatal(err) @@ -179,7 +181,7 @@ func TestIPVSCollectorResponse(t *testing.T) { if _, err := kingpin.CommandLine.Parse(args); err != nil { t.Fatal(err) } - collector, err := NewIPVSCollector(log.NewNopLogger()) + collector, err := NewIPVSCollector(slog.New(slog.NewTextHandler(io.Discard, nil))) if err != nil { t.Fatal(err) } @@ -189,7 +191,7 @@ func TestIPVSCollectorResponse(t *testing.T) { rw := httptest.NewRecorder() promhttp.InstrumentMetricHandler(registry, promhttp.HandlerFor(registry, promhttp.HandlerOpts{})).ServeHTTP(rw, &http.Request{}) - wantMetrics, err := ioutil.ReadFile(test.metricsFile) + wantMetrics, err := os.ReadFile(test.metricsFile) if err != nil { t.Fatalf("unable to read input test file %s: %s", test.metricsFile, err) } @@ -209,9 +211,8 @@ func TestIPVSCollectorResponse(t *testing.T) { if want == got { // this is a line we are interested in, and it is correct continue wantLoop - } else { - gotLinesIdx++ } + gotLinesIdx++ } // if this point is reached, the line we want was missing t.Fatalf("Missing expected output line(s), first missing line is %s", want) diff --git a/collector/kernel_hung_linux.go b/collector/kernel_hung_linux.go new file mode 100644 index 0000000000..cf842d0b1e --- /dev/null +++ b/collector/kernel_hung_linux.go @@ -0,0 +1,63 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noprocesses + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +type kernelHungCollector struct { + fs procfs.FS + logger *slog.Logger +} + +func init() { + registerCollector("kernel_hung", defaultDisabled, NewKernelHungCollector) +} + +func NewKernelHungCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + return &kernelHungCollector{ + fs: fs, + logger: logger, + }, nil +} + +var ( + taskDetectCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "kernel_hung", "task_detect_count"), + "Total number of interrupts serviced.", + nil, nil, + ) +) + +func (c *kernelHungCollector) Update(ch chan<- prometheus.Metric) error { + kernelHung, err := c.fs.KernelHung() + if err != nil { + return err + } + + ch <- prometheus.MustNewConstMetric(taskDetectCount, prometheus.CounterValue, float64(*kernelHung.HungTaskDetectCount)) + + return nil +} diff --git a/collector/ksmd_linux.go b/collector/ksmd_linux.go index 44d6f8d3eb..2a68f43f94 100644 --- a/collector/ksmd_linux.go +++ b/collector/ksmd_linux.go @@ -11,15 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noksmd +//go:build !noksmd package collector import ( "fmt" + "log/slog" "path/filepath" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -30,7 +30,7 @@ var ( type ksmdCollector struct { metricDescs map[string]*prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -49,7 +49,7 @@ func getCanonicalMetricName(filename string) string { } // NewKsmdCollector returns a new Collector exposing kernel/system statistics. -func NewKsmdCollector(logger log.Logger) (Collector, error) { +func NewKsmdCollector(logger *slog.Logger) (Collector, error) { subsystem := "ksmd" descs := make(map[string]*prometheus.Desc) diff --git a/collector/kvm_bsd.go b/collector/kvm_bsd.go index 8798736385..feee6cc833 100644 --- a/collector/kvm_bsd.go +++ b/collector/kvm_bsd.go @@ -11,8 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nokvm -// +build freebsd dragonfly +//go:build !nokvm && (freebsd || dragonfly) package collector diff --git a/collector/lnstat_linux.go b/collector/lnstat_linux.go new file mode 100644 index 0000000000..232e5ce4e1 --- /dev/null +++ b/collector/lnstat_linux.go @@ -0,0 +1,71 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nolnstat + +package collector + +import ( + "fmt" + "log/slog" + "strconv" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +type lnstatCollector struct { + logger *slog.Logger +} + +func init() { + registerCollector("lnstat", defaultDisabled, NewLnstatCollector) +} + +func NewLnstatCollector(logger *slog.Logger) (Collector, error) { + return &lnstatCollector{logger}, nil +} + +func (c *lnstatCollector) Update(ch chan<- prometheus.Metric) error { + const ( + subsystem = "lnstat" + ) + + fs, err := procfs.NewFS(*procPath) + if err != nil { + return fmt.Errorf("failed to open procfs: %w", err) + } + + netStats, err := fs.NetStat() + if err != nil { + return fmt.Errorf("lnstat error: %s", err) + } + + for _, netStatFile := range netStats { + labelNames := []string{"subsystem", "cpu"} + for header, stats := range netStatFile.Stats { + for cpu, value := range stats { + labelValues := []string{netStatFile.Filename, strconv.Itoa(cpu)} + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, header+"_total"), + "linux network cache stats", + labelNames, nil, + ), + prometheus.CounterValue, float64(value), labelValues..., + ) + } + } + } + return nil +} diff --git a/collector/loadavg.go b/collector/loadavg.go index 7c1fd99603..48c271d9f5 100644 --- a/collector/loadavg.go +++ b/collector/loadavg.go @@ -11,22 +11,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris -// +build !noloadavg +//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || aix) && !noloadavg package collector import ( "fmt" + "log/slog" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" ) type loadavgCollector struct { metric []typedDesc - logger log.Logger + logger *slog.Logger } func init() { @@ -34,7 +32,7 @@ func init() { } // NewLoadavgCollector returns a new Collector exposing load average stats. -func NewLoadavgCollector(logger log.Logger) (Collector, error) { +func NewLoadavgCollector(logger *slog.Logger) (Collector, error) { return &loadavgCollector{ metric: []typedDesc{ {prometheus.NewDesc(namespace+"_load1", "1m load average.", nil, nil), prometheus.GaugeValue}, @@ -51,7 +49,7 @@ func (c *loadavgCollector) Update(ch chan<- prometheus.Metric) error { return fmt.Errorf("couldn't get load: %w", err) } for i, load := range loads { - level.Debug(c.logger).Log("msg", "return load", "index", i, "load", load) + c.logger.Debug("return load", "index", i, "load", load) ch <- c.metric[i].mustNewConstMetric(load) } return err diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/collector/loadavg_aix.go similarity index 62% rename from vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go rename to collector/loadavg_aix.go index 6609e2877c..8736dca846 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go +++ b/collector/loadavg_aix.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2024 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,12 +11,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !go1.12 +//go:build !noloadavg -package prometheus +package collector -// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before -// 1.12. Remove this whole file once the minimum supported Go version is 1.12. -func readBuildInfo() (path, version, sum string) { - return "unknown", "unknown", "unknown" +import ( + "github.com/power-devops/perfstat" +) + +func getLoad() ([]float64, error) { + stat, err := perfstat.CpuTotalStat() + if err != nil { + return nil, err + } + + return []float64{float64(stat.LoadAvg1), float64(stat.LoadAvg5), float64(stat.LoadAvg15)}, nil } diff --git a/collector/loadavg_bsd.go b/collector/loadavg_bsd.go index 38215aabaa..23ab24a9e8 100644 --- a/collector/loadavg_bsd.go +++ b/collector/loadavg_bsd.go @@ -11,8 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin dragonfly freebsd netbsd openbsd -// +build !noloadavg +//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && !noloadavg package collector diff --git a/collector/loadavg_linux.go b/collector/loadavg_linux.go index 7b89668ed8..f0d6c6e727 100644 --- a/collector/loadavg_linux.go +++ b/collector/loadavg_linux.go @@ -11,20 +11,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noloadavg +//go:build !noloadavg package collector import ( "fmt" - "io/ioutil" + "os" "strconv" "strings" ) // Read loadavg from /proc. func getLoad() (loads []float64, err error) { - data, err := ioutil.ReadFile(procFilePath("loadavg")) + data, err := os.ReadFile(procFilePath("loadavg")) if err != nil { return nil, err } diff --git a/collector/loadavg_linux_test.go b/collector/loadavg_linux_test.go index e8e5a0ce3f..271cc6b42f 100644 --- a/collector/loadavg_linux_test.go +++ b/collector/loadavg_linux_test.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noloadavg + package collector import "testing" diff --git a/collector/loadavg_solaris.go b/collector/loadavg_solaris.go index 1ef8d312f2..31b1105ef1 100644 --- a/collector/loadavg_solaris.go +++ b/collector/loadavg_solaris.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noloadavg +//go:build !noloadavg package collector @@ -19,7 +19,7 @@ import ( "fmt" "strconv" - "github.com/siebenmann/go-kstat" + "github.com/illumos/go-kstat" ) // #include diff --git a/collector/logind_linux.go b/collector/logind_linux.go index 19a29a2e99..bd2c04848d 100644 --- a/collector/logind_linux.go +++ b/collector/logind_linux.go @@ -11,17 +11,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nologind +//go:build !nologind package collector import ( "fmt" + "log/slog" "os" + "slices" "strconv" - "github.com/go-kit/kit/log" - "github.com/godbus/dbus" + "github.com/godbus/dbus/v5" "github.com/prometheus/client_golang/prometheus" ) @@ -45,7 +46,7 @@ var ( ) type logindCollector struct { - logger log.Logger + logger *slog.Logger } type logindDbus struct { @@ -85,7 +86,7 @@ func init() { } // NewLogindCollector returns a new Collector exposing logind statistics. -func NewLogindCollector(logger log.Logger) (Collector, error) { +func NewLogindCollector(logger *slog.Logger) (Collector, error) { return &logindCollector{logger}, nil } @@ -137,10 +138,8 @@ func collectMetrics(ch chan<- prometheus.Metric, c logindInterface) error { } func knownStringOrOther(value string, known []string) string { - for i := range known { - if value == known[i] { - return value - } + if slices.Contains(known, value) { + return value } return "other" @@ -175,19 +174,19 @@ func newDbus() (*logindDbus, error) { } func (c *logindDbus) listSeats() ([]string, error) { - var result [][]interface{} + var result [][]any err := c.object.Call(dbusObject+".Manager.ListSeats", 0).Store(&result) if err != nil { return nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } seats := make([]logindSeatEntry, len(result)) - seatsInterface := make([]interface{}, len(seats)) + seatsInterface := make([]any, len(seats)) for i := range seats { seatsInterface[i] = &seats[i] } @@ -208,19 +207,19 @@ func (c *logindDbus) listSeats() ([]string, error) { } func (c *logindDbus) listSessions() ([]logindSessionEntry, error) { - var result [][]interface{} + var result [][]any err := c.object.Call(dbusObject+".Manager.ListSessions", 0).Store(&result) if err != nil { return nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } sessions := make([]logindSessionEntry, len(result)) - sessionsInterface := make([]interface{}, len(sessions)) + sessionsInterface := make([]any, len(sessions)) for i := range sessions { sessionsInterface[i] = &sessions[i] } diff --git a/collector/logind_linux_test.go b/collector/logind_linux_test.go index 349d53872a..b2592a74e0 100644 --- a/collector/logind_linux_test.go +++ b/collector/logind_linux_test.go @@ -11,12 +11,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nologind + package collector import ( "testing" - "github.com/godbus/dbus" + "github.com/godbus/dbus/v5" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collector/mdadm_linux.go b/collector/mdadm_linux.go index 865553fe64..14dc20887c 100644 --- a/collector/mdadm_linux.go +++ b/collector/mdadm_linux.go @@ -11,23 +11,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nomdadm +//go:build !nomdadm package collector import ( "errors" "fmt" + "log/slog" "os" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/prometheus/procfs/sysfs" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type mdadmCollector struct { - logger log.Logger + logger *slog.Logger } func init() { @@ -35,7 +36,7 @@ func init() { } // NewMdadmCollector returns a new Collector exposing raid statistics. -func NewMdadmCollector(logger log.Logger) (Collector, error) { +func NewMdadmCollector(logger *slog.Logger) (Collector, error) { return &mdadmCollector{logger}, nil } @@ -64,6 +65,12 @@ var ( []string{"device"}, prometheus.Labels{"state": "resync"}, ) + checkDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "md", "state"), + "Indicates the state of md-device.", + []string{"device"}, + prometheus.Labels{"state": "check"}, + ) disksDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "disks"), @@ -92,20 +99,33 @@ var ( []string{"device"}, nil, ) + + mdraidDisks = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "md", "raid_disks"), + "Number of raid disks on device.", + []string{"device"}, + nil, + ) + + mdraidDegradedDisksDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "md", "degraded"), + "Number of degraded disks on device.", + []string{"device"}, + nil, + ) ) func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error { - fs, err := procfs.NewFS(*procPath) + procFS, err := procfs.NewFS(*procPath) if err != nil { return fmt.Errorf("failed to open procfs: %w", err) } - mdStats, err := fs.MDStat() - + mdStats, err := procFS.MDStat() if err != nil { if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "Not collecting mdstat, file does not exist", "file", *procPath) + c.logger.Debug("Not collecting mdstat, file does not exist", "file", *procPath) return ErrNoData } @@ -113,7 +133,7 @@ func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error { } for _, mdStat := range mdStats { - level.Debug(c.logger).Log("msg", "collecting metrics for device", "device", mdStat.Name) + c.logger.Debug("collecting metrics for device", "device", mdStat.Name) stateVals := make(map[string]float64) stateVals[mdStat.ActivityState] = 1 @@ -174,6 +194,13 @@ func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error { mdStat.Name, ) + ch <- prometheus.MustNewConstMetric( + checkDesc, + prometheus.GaugeValue, + stateVals["checking"], + mdStat.Name, + ) + ch <- prometheus.MustNewConstMetric( blocksTotalDesc, prometheus.GaugeValue, @@ -188,5 +215,34 @@ func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error { ) } + sysFS, err := sysfs.NewFS(*sysPath) + if err != nil { + return fmt.Errorf("failed to open sysfs: %w", err) + } + mdraids, err := sysFS.Mdraids() + if err != nil { + if errors.Is(err, os.ErrNotExist) { + c.logger.Debug("Not collecting mdraids, file does not exist", "file", *sysPath) + return ErrNoData + } + + return fmt.Errorf("error parsing mdraids: %w", err) + } + + for _, mdraid := range mdraids { + ch <- prometheus.MustNewConstMetric( + mdraidDisks, + prometheus.GaugeValue, + float64(mdraid.Disks), + mdraid.Device, + ) + ch <- prometheus.MustNewConstMetric( + mdraidDegradedDisksDesc, + prometheus.GaugeValue, + float64(mdraid.DegradedDisks), + mdraid.Device, + ) + } + return nil } diff --git a/collector/mdadm_linux_test.go b/collector/mdadm_linux_test.go new file mode 100644 index 0000000000..6aa216232e --- /dev/null +++ b/collector/mdadm_linux_test.go @@ -0,0 +1,293 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nomdadm + +package collector + +import ( + "log/slog" + "os" + "strings" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" +) + +type testMdadmCollector struct { + mc Collector +} + +func (c testMdadmCollector) Collect(ch chan<- prometheus.Metric) { + c.mc.Update(ch) +} + +func (c testMdadmCollector) Describe(ch chan<- *prometheus.Desc) { + prometheus.DescribeByCollect(c, ch) +} + +func NewTestMdadmCollector(logger *slog.Logger) (prometheus.Collector, error) { + mc, err := NewMdadmCollector(logger) + if err != nil { + return testMdadmCollector{}, err + } + return &testMdadmCollector{mc}, nil +} + +func TestMdadmStats(t *testing.T) { + *sysPath = "fixtures/sys" + *procPath = "fixtures/proc" + testcase := `# HELP node_md_blocks Total number of blocks on device. + # TYPE node_md_blocks gauge + node_md_blocks{device="md0"} 248896 + node_md_blocks{device="md00"} 4.186624e+06 + node_md_blocks{device="md10"} 3.14159265e+08 + node_md_blocks{device="md101"} 322560 + node_md_blocks{device="md11"} 4.190208e+06 + node_md_blocks{device="md12"} 3.886394368e+09 + node_md_blocks{device="md120"} 2.095104e+06 + node_md_blocks{device="md126"} 1.855870976e+09 + node_md_blocks{device="md127"} 3.12319552e+08 + node_md_blocks{device="md201"} 1.993728e+06 + node_md_blocks{device="md219"} 7932 + node_md_blocks{device="md3"} 5.853468288e+09 + node_md_blocks{device="md4"} 4.883648e+06 + node_md_blocks{device="md6"} 1.95310144e+08 + node_md_blocks{device="md7"} 7.813735424e+09 + node_md_blocks{device="md8"} 1.95310144e+08 + node_md_blocks{device="md9"} 523968 + # HELP node_md_blocks_synced Number of blocks synced on device. + # TYPE node_md_blocks_synced gauge + node_md_blocks_synced{device="md0"} 248896 + node_md_blocks_synced{device="md00"} 4.186624e+06 + node_md_blocks_synced{device="md10"} 3.14159265e+08 + node_md_blocks_synced{device="md101"} 322560 + node_md_blocks_synced{device="md11"} 0 + node_md_blocks_synced{device="md12"} 3.886394368e+09 + node_md_blocks_synced{device="md120"} 2.095104e+06 + node_md_blocks_synced{device="md126"} 1.855870976e+09 + node_md_blocks_synced{device="md127"} 3.12319552e+08 + node_md_blocks_synced{device="md201"} 114176 + node_md_blocks_synced{device="md219"} 7932 + node_md_blocks_synced{device="md3"} 5.853468288e+09 + node_md_blocks_synced{device="md4"} 4.883648e+06 + node_md_blocks_synced{device="md6"} 1.6775552e+07 + node_md_blocks_synced{device="md7"} 7.813735424e+09 + node_md_blocks_synced{device="md8"} 1.6775552e+07 + node_md_blocks_synced{device="md9"} 0 + # HELP node_md_degraded Number of degraded disks on device. + # TYPE node_md_degraded gauge + node_md_degraded{device="md0"} 0 + node_md_degraded{device="md1"} 0 + node_md_degraded{device="md10"} 0 + node_md_degraded{device="md4"} 0 + node_md_degraded{device="md5"} 1 + node_md_degraded{device="md6"} 1 + # HELP node_md_disks Number of active/failed/spare disks of device. + # TYPE node_md_disks gauge + node_md_disks{device="md0",state="active"} 2 + node_md_disks{device="md0",state="failed"} 0 + node_md_disks{device="md0",state="spare"} 0 + node_md_disks{device="md00",state="active"} 1 + node_md_disks{device="md00",state="failed"} 0 + node_md_disks{device="md00",state="spare"} 0 + node_md_disks{device="md10",state="active"} 2 + node_md_disks{device="md10",state="failed"} 0 + node_md_disks{device="md10",state="spare"} 0 + node_md_disks{device="md101",state="active"} 3 + node_md_disks{device="md101",state="failed"} 0 + node_md_disks{device="md101",state="spare"} 0 + node_md_disks{device="md11",state="active"} 2 + node_md_disks{device="md11",state="failed"} 1 + node_md_disks{device="md11",state="spare"} 2 + node_md_disks{device="md12",state="active"} 2 + node_md_disks{device="md12",state="failed"} 0 + node_md_disks{device="md12",state="spare"} 0 + node_md_disks{device="md120",state="active"} 2 + node_md_disks{device="md120",state="failed"} 0 + node_md_disks{device="md120",state="spare"} 0 + node_md_disks{device="md126",state="active"} 2 + node_md_disks{device="md126",state="failed"} 0 + node_md_disks{device="md126",state="spare"} 0 + node_md_disks{device="md127",state="active"} 2 + node_md_disks{device="md127",state="failed"} 0 + node_md_disks{device="md127",state="spare"} 0 + node_md_disks{device="md201",state="active"} 2 + node_md_disks{device="md201",state="failed"} 0 + node_md_disks{device="md201",state="spare"} 0 + node_md_disks{device="md219",state="active"} 0 + node_md_disks{device="md219",state="failed"} 0 + node_md_disks{device="md219",state="spare"} 3 + node_md_disks{device="md3",state="active"} 8 + node_md_disks{device="md3",state="failed"} 0 + node_md_disks{device="md3",state="spare"} 2 + node_md_disks{device="md4",state="active"} 0 + node_md_disks{device="md4",state="failed"} 1 + node_md_disks{device="md4",state="spare"} 1 + node_md_disks{device="md6",state="active"} 1 + node_md_disks{device="md6",state="failed"} 1 + node_md_disks{device="md6",state="spare"} 1 + node_md_disks{device="md7",state="active"} 3 + node_md_disks{device="md7",state="failed"} 1 + node_md_disks{device="md7",state="spare"} 0 + node_md_disks{device="md8",state="active"} 2 + node_md_disks{device="md8",state="failed"} 0 + node_md_disks{device="md8",state="spare"} 2 + node_md_disks{device="md9",state="active"} 4 + node_md_disks{device="md9",state="failed"} 2 + node_md_disks{device="md9",state="spare"} 1 + # HELP node_md_disks_required Total number of disks of device. + # TYPE node_md_disks_required gauge + node_md_disks_required{device="md0"} 2 + node_md_disks_required{device="md00"} 1 + node_md_disks_required{device="md10"} 2 + node_md_disks_required{device="md101"} 3 + node_md_disks_required{device="md11"} 2 + node_md_disks_required{device="md12"} 2 + node_md_disks_required{device="md120"} 2 + node_md_disks_required{device="md126"} 2 + node_md_disks_required{device="md127"} 2 + node_md_disks_required{device="md201"} 2 + node_md_disks_required{device="md219"} 0 + node_md_disks_required{device="md3"} 8 + node_md_disks_required{device="md4"} 0 + node_md_disks_required{device="md6"} 2 + node_md_disks_required{device="md7"} 4 + node_md_disks_required{device="md8"} 2 + node_md_disks_required{device="md9"} 4 + # HELP node_md_raid_disks Number of raid disks on device. + # TYPE node_md_raid_disks gauge + node_md_raid_disks{device="md0"} 2 + node_md_raid_disks{device="md1"} 2 + node_md_raid_disks{device="md10"} 4 + node_md_raid_disks{device="md4"} 3 + node_md_raid_disks{device="md5"} 3 + node_md_raid_disks{device="md6"} 4 + # HELP node_md_state Indicates the state of md-device. + # TYPE node_md_state gauge + node_md_state{device="md0",state="active"} 1 + node_md_state{device="md0",state="check"} 0 + node_md_state{device="md0",state="inactive"} 0 + node_md_state{device="md0",state="recovering"} 0 + node_md_state{device="md0",state="resync"} 0 + node_md_state{device="md00",state="active"} 1 + node_md_state{device="md00",state="check"} 0 + node_md_state{device="md00",state="inactive"} 0 + node_md_state{device="md00",state="recovering"} 0 + node_md_state{device="md00",state="resync"} 0 + node_md_state{device="md10",state="active"} 1 + node_md_state{device="md10",state="check"} 0 + node_md_state{device="md10",state="inactive"} 0 + node_md_state{device="md10",state="recovering"} 0 + node_md_state{device="md10",state="resync"} 0 + node_md_state{device="md101",state="active"} 1 + node_md_state{device="md101",state="check"} 0 + node_md_state{device="md101",state="inactive"} 0 + node_md_state{device="md101",state="recovering"} 0 + node_md_state{device="md101",state="resync"} 0 + node_md_state{device="md11",state="active"} 0 + node_md_state{device="md11",state="check"} 0 + node_md_state{device="md11",state="inactive"} 0 + node_md_state{device="md11",state="recovering"} 0 + node_md_state{device="md11",state="resync"} 1 + node_md_state{device="md12",state="active"} 1 + node_md_state{device="md12",state="check"} 0 + node_md_state{device="md12",state="inactive"} 0 + node_md_state{device="md12",state="recovering"} 0 + node_md_state{device="md12",state="resync"} 0 + node_md_state{device="md120",state="active"} 1 + node_md_state{device="md120",state="check"} 0 + node_md_state{device="md120",state="inactive"} 0 + node_md_state{device="md120",state="recovering"} 0 + node_md_state{device="md120",state="resync"} 0 + node_md_state{device="md126",state="active"} 1 + node_md_state{device="md126",state="check"} 0 + node_md_state{device="md126",state="inactive"} 0 + node_md_state{device="md126",state="recovering"} 0 + node_md_state{device="md126",state="resync"} 0 + node_md_state{device="md127",state="active"} 1 + node_md_state{device="md127",state="check"} 0 + node_md_state{device="md127",state="inactive"} 0 + node_md_state{device="md127",state="recovering"} 0 + node_md_state{device="md127",state="resync"} 0 + node_md_state{device="md201",state="active"} 0 + node_md_state{device="md201",state="check"} 1 + node_md_state{device="md201",state="inactive"} 0 + node_md_state{device="md201",state="recovering"} 0 + node_md_state{device="md201",state="resync"} 0 + node_md_state{device="md219",state="active"} 0 + node_md_state{device="md219",state="check"} 0 + node_md_state{device="md219",state="inactive"} 1 + node_md_state{device="md219",state="recovering"} 0 + node_md_state{device="md219",state="resync"} 0 + node_md_state{device="md3",state="active"} 1 + node_md_state{device="md3",state="check"} 0 + node_md_state{device="md3",state="inactive"} 0 + node_md_state{device="md3",state="recovering"} 0 + node_md_state{device="md3",state="resync"} 0 + node_md_state{device="md4",state="active"} 0 + node_md_state{device="md4",state="check"} 0 + node_md_state{device="md4",state="inactive"} 1 + node_md_state{device="md4",state="recovering"} 0 + node_md_state{device="md4",state="resync"} 0 + node_md_state{device="md6",state="active"} 0 + node_md_state{device="md6",state="check"} 0 + node_md_state{device="md6",state="inactive"} 0 + node_md_state{device="md6",state="recovering"} 1 + node_md_state{device="md6",state="resync"} 0 + node_md_state{device="md7",state="active"} 1 + node_md_state{device="md7",state="check"} 0 + node_md_state{device="md7",state="inactive"} 0 + node_md_state{device="md7",state="recovering"} 0 + node_md_state{device="md7",state="resync"} 0 + node_md_state{device="md8",state="active"} 0 + node_md_state{device="md8",state="check"} 0 + node_md_state{device="md8",state="inactive"} 0 + node_md_state{device="md8",state="recovering"} 0 + node_md_state{device="md8",state="resync"} 1 + node_md_state{device="md9",state="active"} 0 + node_md_state{device="md9",state="check"} 0 + node_md_state{device="md9",state="inactive"} 0 + node_md_state{device="md9",state="recovering"} 0 + node_md_state{device="md9",state="resync"} 1 +` + logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + Level: slog.LevelError, + AddSource: true, + })) + collector, err := NewMdadmCollector(logger) + if err != nil { + panic(err) + } + c, err := NewTestMdadmCollector(logger) + if err != nil { + t.Fatal(err) + } + reg := prometheus.NewRegistry() + reg.MustRegister(c) + + sink := make(chan prometheus.Metric) + go func() { + err := collector.Update(sink) + if err != nil { + panic(err) + } + close(sink) + }() + + err = testutil.GatherAndCompare(reg, strings.NewReader(testcase)) + if err != nil { + t.Fatal(err) + } +} diff --git a/collector/meminfo.go b/collector/meminfo.go index 38b2326883..cc69fe742f 100644 --- a/collector/meminfo.go +++ b/collector/meminfo.go @@ -11,8 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin linux openbsd -// +build !nomeminfo +//go:build (darwin || linux || openbsd || netbsd || aix) && !nomeminfo package collector @@ -20,8 +19,6 @@ import ( "fmt" "strings" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" ) @@ -29,19 +26,10 @@ const ( memInfoSubsystem = "memory" ) -type meminfoCollector struct { - logger log.Logger -} - func init() { registerCollector("meminfo", defaultEnabled, NewMeminfoCollector) } -// NewMeminfoCollector returns a new Collector exposing memory stats. -func NewMeminfoCollector(logger log.Logger) (Collector, error) { - return &meminfoCollector{logger}, nil -} - // Update calls (*meminfoCollector).getMemInfo to get the platform specific // memory metrics. func (c *meminfoCollector) Update(ch chan<- prometheus.Metric) error { @@ -50,7 +38,7 @@ func (c *meminfoCollector) Update(ch chan<- prometheus.Metric) error { if err != nil { return fmt.Errorf("couldn't get meminfo: %w", err) } - level.Debug(c.logger).Log("msg", "Set node_mem", "memInfo", memInfo) + c.logger.Debug("Set node_mem", "memInfo", fmt.Sprintf("%v", memInfo)) for k, v := range memInfo { if strings.HasSuffix(k, "_total") { metricType = prometheus.CounterValue diff --git a/collector/meminfo_aix.go b/collector/meminfo_aix.go new file mode 100644 index 0000000000..52e84a1e23 --- /dev/null +++ b/collector/meminfo_aix.go @@ -0,0 +1,50 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nomeminfo + +package collector + +import ( + "log/slog" + + "github.com/power-devops/perfstat" +) + +type meminfoCollector struct { + logger *slog.Logger +} + +// NewMeminfoCollector returns a new Collector exposing memory stats. +func NewMeminfoCollector(logger *slog.Logger) (Collector, error) { + return &meminfoCollector{ + logger: logger, + }, nil +} + +func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { + stats, err := perfstat.MemoryTotalStat() + if err != nil { + return nil, err + } + + return map[string]float64{ + "total_bytes": float64(stats.RealTotal * 4096), + "free_bytes": float64(stats.RealFree * 4096), + "available_bytes": float64(stats.RealAvailable * 4096), + "process_bytes": float64(stats.RealProcess * 4096), + "paging_space_total_bytes": float64(stats.PgSpTotal * 4096), + "paging_space_free_bytes": float64(stats.PgSpFree * 4096), + "page_scans_total": float64(stats.Scans), + }, nil +} diff --git a/collector/meminfo_darwin.go b/collector/meminfo_darwin.go index da443cdd69..947857f944 100644 --- a/collector/meminfo_darwin.go +++ b/collector/meminfo_darwin.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nomeminfo +//go:build !nomeminfo package collector @@ -23,11 +23,23 @@ import "C" import ( "encoding/binary" "fmt" + "log/slog" "unsafe" "golang.org/x/sys/unix" ) +type meminfoCollector struct { + logger *slog.Logger +} + +// NewMeminfoCollector returns a new Collector exposing memory stats. +func NewMeminfoCollector(logger *slog.Logger) (Collector, error) { + return &meminfoCollector{ + logger: logger, + }, nil +} + func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { host := C.mach_host_self() infoCount := C.mach_msg_type_number_t(C.HOST_VM_INFO64_COUNT) @@ -39,7 +51,7 @@ func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { &infoCount, ) if ret != C.KERN_SUCCESS { - return nil, fmt.Errorf("Couldn't get memory statistics, host_statistics returned %d", ret) + return nil, fmt.Errorf("couldn't get memory statistics, host_statistics returned %d", ret) } totalb, err := unix.Sysctl("hw.memsize") if err != nil { @@ -67,6 +79,8 @@ func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { "free_bytes": ps * float64(vmstat.free_count), "swapped_in_bytes_total": ps * float64(vmstat.pageins), "swapped_out_bytes_total": ps * float64(vmstat.pageouts), + "internal_bytes": ps * float64(vmstat.internal_page_count), + "purgeable_bytes": ps * float64(vmstat.purgeable_count), "total_bytes": float64(total), "swap_used_bytes": float64(swap.xsu_used), "swap_total_bytes": float64(swap.xsu_total), diff --git a/collector/meminfo_linux.go b/collector/meminfo_linux.go index 88505da6bf..88c18d76d3 100644 --- a/collector/meminfo_linux.go +++ b/collector/meminfo_linux.go @@ -11,64 +11,200 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nomeminfo +//go:build !nomeminfo package collector import ( - "bufio" "fmt" - "io" - "os" - "regexp" - "strconv" - "strings" -) + "log/slog" -var ( - reParens = regexp.MustCompile(`\((.*)\)`) + "github.com/prometheus/procfs" ) -func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { - file, err := os.Open(procFilePath("meminfo")) +type meminfoCollector struct { + fs procfs.FS + logger *slog.Logger +} + +// NewMeminfoCollector returns a new Collector exposing memory stats. +func NewMeminfoCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to open procfs: %w", err) } - defer file.Close() - return parseMemInfo(file) + return &meminfoCollector{ + logger: logger, + fs: fs, + }, nil } -func parseMemInfo(r io.Reader) (map[string]float64, error) { - var ( - memInfo = map[string]float64{} - scanner = bufio.NewScanner(r) - ) +func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { + meminfo, err := c.fs.Meminfo() + if err != nil { + return nil, fmt.Errorf("failed to get memory info: %w", err) + } + + metrics := make(map[string]float64) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(line) - // Workaround for empty lines occasionally occur in CentOS 6.2 kernel 3.10.90. - if len(parts) == 0 { - continue - } - fv, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - return nil, fmt.Errorf("invalid value in meminfo: %w", err) - } - key := parts[0][:len(parts[0])-1] // remove trailing : from key - // Active(anon) -> Active_anon - key = reParens.ReplaceAllString(key, "_${1}") - switch len(parts) { - case 2: // no unit - case 3: // has unit, we presume kB - fv *= 1024 - key = key + "_bytes" - default: - return nil, fmt.Errorf("invalid line in meminfo: %s", line) - } - memInfo[key] = fv + if meminfo.ActiveBytes != nil { + metrics["Active_bytes"] = float64(*meminfo.ActiveBytes) + } + if meminfo.ActiveAnonBytes != nil { + metrics["Active_anon_bytes"] = float64(*meminfo.ActiveAnonBytes) + } + if meminfo.ActiveFileBytes != nil { + metrics["Active_file_bytes"] = float64(*meminfo.ActiveFileBytes) + } + if meminfo.AnonHugePagesBytes != nil { + metrics["AnonHugePages_bytes"] = float64(*meminfo.AnonHugePagesBytes) + } + if meminfo.AnonPagesBytes != nil { + metrics["AnonPages_bytes"] = float64(*meminfo.AnonPagesBytes) + } + if meminfo.BounceBytes != nil { + metrics["Bounce_bytes"] = float64(*meminfo.BounceBytes) + } + if meminfo.BuffersBytes != nil { + metrics["Buffers_bytes"] = float64(*meminfo.BuffersBytes) + } + if meminfo.CachedBytes != nil { + metrics["Cached_bytes"] = float64(*meminfo.CachedBytes) + } + if meminfo.CmaFreeBytes != nil { + metrics["CmaFree_bytes"] = float64(*meminfo.CmaFreeBytes) + } + if meminfo.CmaTotalBytes != nil { + metrics["CmaTotal_bytes"] = float64(*meminfo.CmaTotalBytes) + } + if meminfo.CommitLimitBytes != nil { + metrics["CommitLimit_bytes"] = float64(*meminfo.CommitLimitBytes) + } + if meminfo.CommittedASBytes != nil { + metrics["Committed_AS_bytes"] = float64(*meminfo.CommittedASBytes) + } + if meminfo.DirectMap1GBytes != nil { + metrics["DirectMap1G_bytes"] = float64(*meminfo.DirectMap1GBytes) + } + if meminfo.DirectMap2MBytes != nil { + metrics["DirectMap2M_bytes"] = float64(*meminfo.DirectMap2MBytes) + } + if meminfo.DirectMap4kBytes != nil { + metrics["DirectMap4k_bytes"] = float64(*meminfo.DirectMap4kBytes) + } + if meminfo.DirtyBytes != nil { + metrics["Dirty_bytes"] = float64(*meminfo.DirtyBytes) + } + if meminfo.HardwareCorruptedBytes != nil { + metrics["HardwareCorrupted_bytes"] = float64(*meminfo.HardwareCorruptedBytes) + } + if meminfo.HugepagesizeBytes != nil { + metrics["Hugepagesize_bytes"] = float64(*meminfo.HugepagesizeBytes) + } + if meminfo.InactiveBytes != nil { + metrics["Inactive_bytes"] = float64(*meminfo.InactiveBytes) + } + if meminfo.InactiveAnonBytes != nil { + metrics["Inactive_anon_bytes"] = float64(*meminfo.InactiveAnonBytes) + } + if meminfo.InactiveFileBytes != nil { + metrics["Inactive_file_bytes"] = float64(*meminfo.InactiveFileBytes) + } + if meminfo.KernelStackBytes != nil { + metrics["KernelStack_bytes"] = float64(*meminfo.KernelStackBytes) + } + if meminfo.MappedBytes != nil { + metrics["Mapped_bytes"] = float64(*meminfo.MappedBytes) + } + if meminfo.MemAvailableBytes != nil { + metrics["MemAvailable_bytes"] = float64(*meminfo.MemAvailableBytes) + } + if meminfo.MemFreeBytes != nil { + metrics["MemFree_bytes"] = float64(*meminfo.MemFreeBytes) + } + if meminfo.MemTotalBytes != nil { + metrics["MemTotal_bytes"] = float64(*meminfo.MemTotalBytes) + } + if meminfo.MlockedBytes != nil { + metrics["Mlocked_bytes"] = float64(*meminfo.MlockedBytes) + } + if meminfo.NFSUnstableBytes != nil { + metrics["NFS_Unstable_bytes"] = float64(*meminfo.NFSUnstableBytes) + } + if meminfo.PageTablesBytes != nil { + metrics["PageTables_bytes"] = float64(*meminfo.PageTablesBytes) + } + if meminfo.PercpuBytes != nil { + metrics["Percpu_bytes"] = float64(*meminfo.PercpuBytes) + } + if meminfo.SReclaimableBytes != nil { + metrics["SReclaimable_bytes"] = float64(*meminfo.SReclaimableBytes) + } + if meminfo.SUnreclaimBytes != nil { + metrics["SUnreclaim_bytes"] = float64(*meminfo.SUnreclaimBytes) + } + if meminfo.ShmemBytes != nil { + metrics["Shmem_bytes"] = float64(*meminfo.ShmemBytes) + } + if meminfo.ShmemHugePagesBytes != nil { + metrics["ShmemHugePages_bytes"] = float64(*meminfo.ShmemHugePagesBytes) + } + if meminfo.ShmemPmdMappedBytes != nil { + metrics["ShmemPmdMapped_bytes"] = float64(*meminfo.ShmemPmdMappedBytes) + } + if meminfo.SlabBytes != nil { + metrics["Slab_bytes"] = float64(*meminfo.SlabBytes) + } + if meminfo.SwapCachedBytes != nil { + metrics["SwapCached_bytes"] = float64(*meminfo.SwapCachedBytes) + } + if meminfo.SwapFreeBytes != nil { + metrics["SwapFree_bytes"] = float64(*meminfo.SwapFreeBytes) + } + if meminfo.SwapTotalBytes != nil { + metrics["SwapTotal_bytes"] = float64(*meminfo.SwapTotalBytes) + } + if meminfo.UnevictableBytes != nil { + metrics["Unevictable_bytes"] = float64(*meminfo.UnevictableBytes) + } + if meminfo.VmallocChunkBytes != nil { + metrics["VmallocChunk_bytes"] = float64(*meminfo.VmallocChunkBytes) + } + if meminfo.VmallocTotalBytes != nil { + metrics["VmallocTotal_bytes"] = float64(*meminfo.VmallocTotalBytes) + } + if meminfo.VmallocUsedBytes != nil { + metrics["VmallocUsed_bytes"] = float64(*meminfo.VmallocUsedBytes) + } + if meminfo.WritebackBytes != nil { + metrics["Writeback_bytes"] = float64(*meminfo.WritebackBytes) + } + if meminfo.WritebackTmpBytes != nil { + metrics["WritebackTmp_bytes"] = float64(*meminfo.WritebackTmpBytes) + } + if meminfo.ZswapBytes != nil { + metrics["Zswap_bytes"] = float64(*meminfo.ZswapBytes) + } + if meminfo.ZswappedBytes != nil { + metrics["Zswapped_bytes"] = float64(*meminfo.ZswappedBytes) + } + + // These fields are always in bytes and do not have `Bytes` + // suffixed counterparts in the procfs.Meminfo struct, nor do + // they have `_bytes` suffix on the metric names. + if meminfo.HugePagesFree != nil { + metrics["HugePages_Free"] = float64(*meminfo.HugePagesFree) + } + if meminfo.HugePagesRsvd != nil { + metrics["HugePages_Rsvd"] = float64(*meminfo.HugePagesRsvd) + } + if meminfo.HugePagesSurp != nil { + metrics["HugePages_Surp"] = float64(*meminfo.HugePagesSurp) + } + if meminfo.HugePagesTotal != nil { + metrics["HugePages_Total"] = float64(*meminfo.HugePagesTotal) } - return memInfo, scanner.Err() + return metrics, nil } diff --git a/collector/meminfo_linux_test.go b/collector/meminfo_linux_test.go index dc0aff58d7..2ad7d50541 100644 --- a/collector/meminfo_linux_test.go +++ b/collector/meminfo_linux_test.go @@ -11,23 +11,28 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nomeminfo + package collector import ( - "os" + "io" + "log/slog" "testing" ) func TestMemInfo(t *testing.T) { - file, err := os.Open("fixtures/proc/meminfo") + *procPath = "fixtures/proc" + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + + collector, err := NewMeminfoCollector(logger) if err != nil { - t.Fatal(err) + panic(err) } - defer file.Close() - memInfo, err := parseMemInfo(file) + memInfo, err := collector.(*meminfoCollector).getMemInfo() if err != nil { - t.Fatal(err) + panic(err) } if want, got := 3831959552.0, memInfo["MemTotal_bytes"]; want != got { diff --git a/collector/meminfo_netbsd.go b/collector/meminfo_netbsd.go new file mode 100644 index 0000000000..549cd6a55d --- /dev/null +++ b/collector/meminfo_netbsd.go @@ -0,0 +1,55 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nomeminfo + +package collector + +import ( + "log/slog" + + "golang.org/x/sys/unix" +) + +type meminfoCollector struct { + logger *slog.Logger +} + +// NewMeminfoCollector returns a new Collector exposing memory stats. +func NewMeminfoCollector(logger *slog.Logger) (Collector, error) { + return &meminfoCollector{ + logger: logger, + }, nil +} + +func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { + uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") + if err != nil { + return nil, err + } + + ps := float64(uvmexp.Pagesize) + + // see uvm(9) + return map[string]float64{ + "active_bytes": ps * float64(uvmexp.Active), + "free_bytes": ps * float64(uvmexp.Free), + "inactive_bytes": ps * float64(uvmexp.Inactive), + "size_bytes": ps * float64(uvmexp.Npages), + "swap_size_bytes": ps * float64(uvmexp.Swpages), + "swap_used_bytes": ps * float64(uvmexp.Swpginuse), + "swapped_in_pages_bytes_total": ps * float64(uvmexp.Pgswapin), + "swapped_out_pages_bytes_total": ps * float64(uvmexp.Pgswapout), + "wired_bytes": ps * float64(uvmexp.Wired), + }, nil +} diff --git a/collector/meminfo_numa_linux.go b/collector/meminfo_numa_linux.go index f3d93073ed..cd494928a3 100644 --- a/collector/meminfo_numa_linux.go +++ b/collector/meminfo_numa_linux.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nomeminfo_numa +//go:build !nomeminfo_numa package collector @@ -19,13 +19,13 @@ import ( "bufio" "fmt" "io" + "log/slog" "os" "path/filepath" "regexp" "strconv" "strings" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -44,7 +44,7 @@ type meminfoMetric struct { type meminfoNumaCollector struct { metricDescs map[string]*prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -52,7 +52,7 @@ func init() { } // NewMeminfoNumaCollector returns a new Collector exposing memory stats. -func NewMeminfoNumaCollector(logger log.Logger) (Collector, error) { +func NewMeminfoNumaCollector(logger *slog.Logger) (Collector, error) { return &meminfoNumaCollector{ metricDescs: map[string]*prometheus.Desc{}, logger: logger, diff --git a/collector/meminfo_numa_linux_test.go b/collector/meminfo_numa_linux_test.go index a17714e8df..1d251fcf9f 100644 --- a/collector/meminfo_numa_linux_test.go +++ b/collector/meminfo_numa_linux_test.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nomeminfo_numa + package collector import ( diff --git a/collector/meminfo_openbsd.go b/collector/meminfo_openbsd.go index 81102d598a..250905105a 100644 --- a/collector/meminfo_openbsd.go +++ b/collector/meminfo_openbsd.go @@ -11,13 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build openbsd -// +build !nomeminfo +//go:build !nomeminfo && !amd64 package collector import ( "fmt" + "log/slog" ) /* @@ -53,6 +53,17 @@ sysctl_bcstats(struct bcachestats *bcstats) */ import "C" +type meminfoCollector struct { + logger *slog.Logger +} + +// NewMeminfoCollector returns a new Collector exposing memory stats. +func NewMeminfoCollector(logger *slog.Logger) (Collector, error) { + return &meminfoCollector{ + logger: logger, + }, nil +} + func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { var uvmexp C.struct_uvmexp var bcstats C.struct_bcachestats diff --git a/collector/meminfo_openbsd_amd64.go b/collector/meminfo_openbsd_amd64.go new file mode 100644 index 0000000000..729507263f --- /dev/null +++ b/collector/meminfo_openbsd_amd64.go @@ -0,0 +1,93 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nomeminfo + +package collector + +import ( + "log/slog" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + CTL_VFS = 10 + VFS_GENERIC = 0 + VFS_BCACHESTAT = 3 +) + +type bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Kvaslots_avail int64 + Highflips int64 + Highflops int64 + Dmaflips int64 +} + +type meminfoCollector struct { + logger *slog.Logger +} + +// NewMeminfoCollector returns a new Collector exposing memory stats. +func NewMeminfoCollector(logger *slog.Logger) (Collector, error) { + return &meminfoCollector{ + logger: logger, + }, nil +} + +func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { + uvmexpb, err := unix.SysctlRaw("vm.uvmexp") + if err != nil { + return nil, err + } + + mib := [3]_C_int{CTL_VFS, VFS_GENERIC, VFS_BCACHESTAT} + bcstatsb, err := sysctl(mib[:]) + if err != nil { + return nil, err + } + + uvmexp := *(*unix.Uvmexp)(unsafe.Pointer(&uvmexpb[0])) + ps := float64(uvmexp.Pagesize) + + bcstats := *(*bcachestats)(unsafe.Pointer(&bcstatsb[0])) + + // see uvm(9) + return map[string]float64{ + "active_bytes": ps * float64(uvmexp.Active), + "cache_bytes": ps * float64(bcstats.Numbufpages), + "free_bytes": ps * float64(uvmexp.Free), + "inactive_bytes": ps * float64(uvmexp.Inactive), + "size_bytes": ps * float64(uvmexp.Npages), + "swap_size_bytes": ps * float64(uvmexp.Swpages), + "swap_used_bytes": ps * float64(uvmexp.Swpginuse), + "swapped_in_pages_bytes_total": ps * float64(uvmexp.Pgswapin), + "swapped_out_pages_bytes_total": ps * float64(uvmexp.Pgswapout), + "wired_bytes": ps * float64(uvmexp.Wired), + }, nil +} diff --git a/collector/memory_bsd.go b/collector/memory_bsd.go index ac8b30100a..5bec6155d0 100644 --- a/collector/memory_bsd.go +++ b/collector/memory_bsd.go @@ -11,15 +11,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build freebsd dragonfly -// +build !nomeminfo +//go:build (freebsd || dragonfly) && !nomeminfo package collector import ( "fmt" + "log/slog" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) @@ -32,7 +31,7 @@ type memoryCollector struct { pageSize uint64 sysctls []bsdSysctl kvm kvm - logger log.Logger + logger *slog.Logger } func init() { @@ -40,7 +39,7 @@ func init() { } // NewMemoryCollector returns a new Collector exposing memory stats. -func NewMemoryCollector(logger log.Logger) (Collector, error) { +func NewMemoryCollector(logger *slog.Logger) (Collector, error) { tmp32, err := unix.SysctlUint32("vm.stats.vm.v_page_size") if err != nil { return nil, fmt.Errorf("sysctl(vm.stats.vm.v_page_size) failed: %w", err) @@ -81,6 +80,13 @@ func NewMemoryCollector(logger log.Logger) (Collector, error) { mib: "vm.stats.vm.v_wire_count", conversion: fromPage, }, + { + name: "user_wired_bytes", + description: "Locked in memory by user, mlock, etc", + mib: "vm.stats.vm.v_user_wire_count", + conversion: fromPage, + dataType: bsdSysctlTypeCLong, + }, { name: "cache_bytes", description: "Almost free, backed by swap or files, available for re-allocation", @@ -99,6 +105,12 @@ func NewMemoryCollector(logger log.Logger) (Collector, error) { mib: "vm.stats.vm.v_free_count", conversion: fromPage, }, + { + name: "laundry_bytes", + description: "Dirty not recently used by userland", + mib: "vm.stats.vm.v_laundry_count", + conversion: fromPage, + }, { name: "size_bytes", description: "Total physical memory size", diff --git a/collector/mountstats_linux.go b/collector/mountstats_linux.go index 4102067161..319b89149c 100644 --- a/collector/mountstats_linux.go +++ b/collector/mountstats_linux.go @@ -11,15 +11,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nomountstats +//go:build !nomountstats package collector import ( "fmt" + "log/slog" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) @@ -95,7 +94,7 @@ type mountStatsCollector struct { proc procfs.Proc - logger log.Logger + logger *slog.Logger } // used to uniquely identify an NFS mount to prevent duplicates @@ -110,7 +109,7 @@ func init() { } // NewMountStatsCollector returns a new Collector exposing NFS statistics. -func NewMountStatsCollector(logger log.Logger) (Collector, error) { +func NewMountStatsCollector(logger *slog.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) @@ -537,15 +536,16 @@ func (c *mountStatsCollector) Update(ch chan<- prometheus.Metric) error { mountAddress = miStats.SuperOptions["addr"] } - deviceIdentifier := nfsDeviceIdentifier{m.Device, stats.Transport.Protocol, mountAddress} - i := deviceList[deviceIdentifier] - if i { - level.Debug(c.logger).Log("msg", "Skipping duplicate device entry", "device", deviceIdentifier) - continue + for k := range stats.Transport { + deviceIdentifier := nfsDeviceIdentifier{m.Device, stats.Transport[k].Protocol, mountAddress} + i := deviceList[deviceIdentifier] + if i { + c.logger.Debug("Skipping duplicate device entry", "device", deviceIdentifier) + break + } + deviceList[deviceIdentifier] = true + c.updateNFSStats(ch, stats, m.Device, stats.Transport[k].Protocol, mountAddress) } - - deviceList[deviceIdentifier] = true - c.updateNFSStats(ch, stats, m.Device, stats.Transport.Protocol, mountAddress) } return nil @@ -616,75 +616,77 @@ func (c *mountStatsCollector) updateNFSStats(ch chan<- prometheus.Metric, s *pro labelValues..., ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportBindTotal, - prometheus.CounterValue, - float64(s.Transport.Bind), - labelValues..., - ) + for i := range s.Transport { + ch <- prometheus.MustNewConstMetric( + c.NFSTransportBindTotal, + prometheus.CounterValue, + float64(s.Transport[i].Bind), + labelValues..., + ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportConnectTotal, - prometheus.CounterValue, - float64(s.Transport.Connect), - labelValues..., - ) + ch <- prometheus.MustNewConstMetric( + c.NFSTransportConnectTotal, + prometheus.CounterValue, + float64(s.Transport[i].Connect), + labelValues..., + ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportIdleTimeSeconds, - prometheus.GaugeValue, - float64(s.Transport.IdleTimeSeconds%float64Mantissa), - labelValues..., - ) + ch <- prometheus.MustNewConstMetric( + c.NFSTransportIdleTimeSeconds, + prometheus.GaugeValue, + float64(s.Transport[i].IdleTimeSeconds%float64Mantissa), + labelValues..., + ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportSendsTotal, - prometheus.CounterValue, - float64(s.Transport.Sends), - labelValues..., - ) + ch <- prometheus.MustNewConstMetric( + c.NFSTransportSendsTotal, + prometheus.CounterValue, + float64(s.Transport[i].Sends), + labelValues..., + ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportReceivesTotal, - prometheus.CounterValue, - float64(s.Transport.Receives), - labelValues..., - ) + ch <- prometheus.MustNewConstMetric( + c.NFSTransportReceivesTotal, + prometheus.CounterValue, + float64(s.Transport[i].Receives), + labelValues..., + ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportBadTransactionIDsTotal, - prometheus.CounterValue, - float64(s.Transport.BadTransactionIDs), - labelValues..., - ) + ch <- prometheus.MustNewConstMetric( + c.NFSTransportBadTransactionIDsTotal, + prometheus.CounterValue, + float64(s.Transport[i].BadTransactionIDs), + labelValues..., + ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportBacklogQueueTotal, - prometheus.CounterValue, - float64(s.Transport.CumulativeBacklog), - labelValues..., - ) + ch <- prometheus.MustNewConstMetric( + c.NFSTransportBacklogQueueTotal, + prometheus.CounterValue, + float64(s.Transport[i].CumulativeBacklog), + labelValues..., + ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportMaximumRPCSlots, - prometheus.GaugeValue, - float64(s.Transport.MaximumRPCSlotsUsed), - labelValues..., - ) + ch <- prometheus.MustNewConstMetric( + c.NFSTransportMaximumRPCSlots, + prometheus.GaugeValue, + float64(s.Transport[i].MaximumRPCSlotsUsed), + labelValues..., + ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportSendingQueueTotal, - prometheus.CounterValue, - float64(s.Transport.CumulativeSendingQueue), - labelValues..., - ) + ch <- prometheus.MustNewConstMetric( + c.NFSTransportSendingQueueTotal, + prometheus.CounterValue, + float64(s.Transport[i].CumulativeSendingQueue), + labelValues..., + ) - ch <- prometheus.MustNewConstMetric( - c.NFSTransportPendingQueueTotal, - prometheus.CounterValue, - float64(s.Transport.CumulativePendingQueue), - labelValues..., - ) + ch <- prometheus.MustNewConstMetric( + c.NFSTransportPendingQueueTotal, + prometheus.CounterValue, + float64(s.Transport[i].CumulativePendingQueue), + labelValues..., + ) + } for _, op := range s.Operations { opLabelValues := []string{export, protocol, mountAddress, op.Operation} diff --git a/collector/netclass_linux.go b/collector/netclass_linux.go index 0fde219101..7f8373dfb3 100644 --- a/collector/netclass_linux.go +++ b/collector/netclass_linux.go @@ -11,23 +11,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonetclass -// +build linux +//go:build !nonetclass && linux package collector import ( + "errors" "fmt" + "log/slog" + "net" + "os" "regexp" + "sync" - "github.com/go-kit/kit/log" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" - "gopkg.in/alecthomas/kingpin.v2" ) var ( netclassIgnoredDevices = kingpin.Flag("collector.netclass.ignored-devices", "Regexp of net devices to ignore for netclass collector.").Default("^$").String() + netclassInvalidSpeed = kingpin.Flag("collector.netclass.ignore-invalid-speed", "Ignore devices where the speed is invalid. This will be the default behavior in 2.x.").Bool() + netclassNetlink = kingpin.Flag("collector.netclass.netlink", "Use netlink to gather stats instead of /proc/net/dev.").Default("false").Bool() ) type netClassCollector struct { @@ -35,7 +40,8 @@ type netClassCollector struct { subsystem string ignoredDevicesPattern *regexp.Regexp metricDescs map[string]*prometheus.Desc - logger log.Logger + metricDescsMu sync.Mutex + logger *slog.Logger } func init() { @@ -43,7 +49,7 @@ func init() { } // NewNetClassCollector returns a new Collector exposing network class stats. -func NewNetClassCollector(logger log.Logger) (Collector, error) { +func NewNetClassCollector(logger *slog.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) @@ -59,8 +65,19 @@ func NewNetClassCollector(logger log.Logger) (Collector, error) { } func (c *netClassCollector) Update(ch chan<- prometheus.Metric) error { + if *netclassNetlink { + return c.netClassRTNLUpdate(ch) + } + return c.netClassSysfsUpdate(ch) +} + +func (c *netClassCollector) netClassSysfsUpdate(ch chan<- prometheus.Metric) error { netClass, err := c.getNetClassInfo() if err != nil { + if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) { + c.logger.Debug("Could not read netclass file", "err", err) + return ErrNoData + } return fmt.Errorf("could not get net class info: %w", err) } for _, ifaceInfo := range netClass { @@ -80,109 +97,92 @@ func (c *netClassCollector) Update(ch chan<- prometheus.Metric) error { infoDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "info"), "Non-numeric data from /sys/class/net/, value is always 1.", - []string{"device", "address", "broadcast", "duplex", "operstate", "ifalias"}, + []string{"device", "address", "broadcast", "duplex", "operstate", "adminstate", "ifalias"}, nil, ) infoValue := 1.0 - ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, ifaceInfo.Name, ifaceInfo.Address, ifaceInfo.Broadcast, ifaceInfo.Duplex, ifaceInfo.OperState, ifaceInfo.IfAlias) - - if ifaceInfo.AddrAssignType != nil { - pushMetric(ch, c.subsystem, "address_assign_type", *ifaceInfo.AddrAssignType, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.Carrier != nil { - pushMetric(ch, c.subsystem, "carrier", *ifaceInfo.Carrier, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.CarrierChanges != nil { - pushMetric(ch, c.subsystem, "carrier_changes_total", *ifaceInfo.CarrierChanges, ifaceInfo.Name, prometheus.CounterValue) - } - - if ifaceInfo.CarrierUpCount != nil { - pushMetric(ch, c.subsystem, "carrier_up_changes_total", *ifaceInfo.CarrierUpCount, ifaceInfo.Name, prometheus.CounterValue) - } - - if ifaceInfo.CarrierDownCount != nil { - pushMetric(ch, c.subsystem, "carrier_down_changes_total", *ifaceInfo.CarrierDownCount, ifaceInfo.Name, prometheus.CounterValue) - } - - if ifaceInfo.DevID != nil { - pushMetric(ch, c.subsystem, "device_id", *ifaceInfo.DevID, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.Dormant != nil { - pushMetric(ch, c.subsystem, "dormant", *ifaceInfo.Dormant, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.Flags != nil { - pushMetric(ch, c.subsystem, "flags", *ifaceInfo.Flags, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.IfIndex != nil { - pushMetric(ch, c.subsystem, "iface_id", *ifaceInfo.IfIndex, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.IfLink != nil { - pushMetric(ch, c.subsystem, "iface_link", *ifaceInfo.IfLink, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.LinkMode != nil { - pushMetric(ch, c.subsystem, "iface_link_mode", *ifaceInfo.LinkMode, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.MTU != nil { - pushMetric(ch, c.subsystem, "mtu_bytes", *ifaceInfo.MTU, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.NameAssignType != nil { - pushMetric(ch, c.subsystem, "name_assign_type", *ifaceInfo.NameAssignType, ifaceInfo.Name, prometheus.GaugeValue) - } - - if ifaceInfo.NetDevGroup != nil { - pushMetric(ch, c.subsystem, "net_dev_group", *ifaceInfo.NetDevGroup, ifaceInfo.Name, prometheus.GaugeValue) - } + ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, ifaceInfo.Name, ifaceInfo.Address, ifaceInfo.Broadcast, ifaceInfo.Duplex, ifaceInfo.OperState, getAdminState(ifaceInfo.Flags), ifaceInfo.IfAlias) + + pushMetric(ch, c.getFieldDesc("address_assign_type"), "address_assign_type", ifaceInfo.AddrAssignType, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("carrier"), "carrier", ifaceInfo.Carrier, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("carrier_changes_total"), "carrier_changes_total", ifaceInfo.CarrierChanges, prometheus.CounterValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("carrier_up_changes_total"), "carrier_up_changes_total", ifaceInfo.CarrierUpCount, prometheus.CounterValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("carrier_down_changes_total"), "carrier_down_changes_total", ifaceInfo.CarrierDownCount, prometheus.CounterValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("device_id"), "device_id", ifaceInfo.DevID, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("dormant"), "dormant", ifaceInfo.Dormant, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("flags"), "flags", ifaceInfo.Flags, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("iface_id"), "iface_id", ifaceInfo.IfIndex, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("iface_link"), "iface_link", ifaceInfo.IfLink, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("iface_link_mode"), "iface_link_mode", ifaceInfo.LinkMode, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("mtu_bytes"), "mtu_bytes", ifaceInfo.MTU, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("name_assign_type"), "name_assign_type", ifaceInfo.NameAssignType, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("net_dev_group"), "net_dev_group", ifaceInfo.NetDevGroup, prometheus.GaugeValue, ifaceInfo.Name) if ifaceInfo.Speed != nil { - speedBytes := int64(*ifaceInfo.Speed * 1000 * 1000 / 8) - pushMetric(ch, c.subsystem, "speed_bytes", speedBytes, ifaceInfo.Name, prometheus.GaugeValue) + // Some devices return -1 if the speed is unknown. + if *ifaceInfo.Speed >= 0 || !*netclassInvalidSpeed { + speedBytes := int64(*ifaceInfo.Speed * 1000 * 1000 / 8) + pushMetric(ch, c.getFieldDesc("speed_bytes"), "speed_bytes", speedBytes, prometheus.GaugeValue, ifaceInfo.Name) + } } - if ifaceInfo.TxQueueLen != nil { - pushMetric(ch, c.subsystem, "transmit_queue_length", *ifaceInfo.TxQueueLen, ifaceInfo.Name, prometheus.GaugeValue) - } + pushMetric(ch, c.getFieldDesc("transmit_queue_length"), "transmit_queue_length", ifaceInfo.TxQueueLen, prometheus.GaugeValue, ifaceInfo.Name) + pushMetric(ch, c.getFieldDesc("protocol_type"), "protocol_type", ifaceInfo.Type, prometheus.GaugeValue, ifaceInfo.Name) - if ifaceInfo.Type != nil { - pushMetric(ch, c.subsystem, "protocol_type", *ifaceInfo.Type, ifaceInfo.Name, prometheus.GaugeValue) - } } return nil } -func pushMetric(ch chan<- prometheus.Metric, subsystem string, name string, value int64, ifaceName string, valueType prometheus.ValueType) { - fieldDesc := prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, name), - fmt.Sprintf("%s value of /sys/class/net/.", name), - []string{"device"}, - nil, - ) +func (c *netClassCollector) getFieldDesc(name string) *prometheus.Desc { + c.metricDescsMu.Lock() + defer c.metricDescsMu.Unlock() + + fieldDesc, exists := c.metricDescs[name] + + if !exists { + fieldDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, name), + fmt.Sprintf("Network device property: %s", name), + []string{"device"}, + nil, + ) + c.metricDescs[name] = fieldDesc + } - ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, float64(value), ifaceName) + return fieldDesc } func (c *netClassCollector) getNetClassInfo() (sysfs.NetClass, error) { - netClass, err := c.fs.NetClass() - + netClass := sysfs.NetClass{} + netDevices, err := c.fs.NetClassDevices() if err != nil { - return netClass, fmt.Errorf("error obtaining net class info: %w", err) + return netClass, err } - for device := range netClass { + for _, device := range netDevices { if c.ignoredDevicesPattern.MatchString(device) { - delete(netClass, device) + continue + } + interfaceClass, err := c.fs.NetClassByIface(device) + if err != nil { + return netClass, err } + netClass[device] = *interfaceClass } return netClass, nil } + +func getAdminState(flags *int64) string { + if flags == nil { + return "unknown" + } + + if *flags&int64(net.FlagUp) == 1 { + return "up" + } + + return "down" +} diff --git a/collector/netclass_rtnl_linux.go b/collector/netclass_rtnl_linux.go new file mode 100644 index 0000000000..6df34756a0 --- /dev/null +++ b/collector/netclass_rtnl_linux.go @@ -0,0 +1,228 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonetclass && linux + +package collector + +import ( + "errors" + "fmt" + "io/fs" + "path/filepath" + + "github.com/alecthomas/kingpin/v2" + "github.com/jsimonetti/rtnetlink/v2" + "github.com/mdlayher/ethtool" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +var ( + netclassRTNLWithStats = kingpin.Flag("collector.netclass_rtnl.with-stats", "Expose the statistics for each network device, replacing netdev collector.").Bool() + operstateStr = []string{ + "unknown", "notpresent", "down", "lowerlayerdown", "testing", + "dormant", "up", + } +) + +func (c *netClassCollector) netClassRTNLUpdate(ch chan<- prometheus.Metric) error { + linkModes := make(map[string]*ethtool.LinkMode) + lms, err := c.getLinkModes() + if err != nil { + if !errors.Is(errors.Unwrap(err), fs.ErrNotExist) { + return fmt.Errorf("could not get link modes: %w", err) + } + c.logger.Info("ETHTOOL netlink interface unavailable, duplex and linkspeed are not scraped.") + } else { + for _, lm := range lms { + if c.ignoredDevicesPattern.MatchString(lm.Interface.Name) { + continue + } + if lm.SpeedMegabits >= 0 { + speedBytes := uint64(lm.SpeedMegabits * 1000 * 1000 / 8) + pushMetric(ch, c.getFieldDesc("speed_bytes"), "speed_bytes", speedBytes, prometheus.GaugeValue, lm.Interface.Name) + } + linkModes[lm.Interface.Name] = lm + } + } + + // Get most attributes from Netlink + lMsgs, err := c.getNetClassInfoRTNL() + if err != nil { + return fmt.Errorf("could not get net class info: %w", err) + } + + relevantLinks := make([]rtnetlink.LinkMessage, 0, len(lMsgs)) + for _, msg := range lMsgs { + if !c.ignoredDevicesPattern.MatchString(msg.Attributes.Name) { + relevantLinks = append(relevantLinks, msg) + } + } + + // Read sysfs for attributes that Netlink doesn't expose + sysfsAttrs, err := getSysfsAttributes(relevantLinks) + if err != nil { + return fmt.Errorf("could not get sysfs device info: %w", err) + } + + // Parse all the info and update metrics + for _, msg := range relevantLinks { + upDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "up"), + "Value is 1 if operstate is 'up', 0 otherwise.", + []string{"device"}, + nil, + ) + upValue := 0.0 + if msg.Attributes.OperationalState == rtnetlink.OperStateUp { + upValue = 1.0 + } + ch <- prometheus.MustNewConstMetric(upDesc, prometheus.GaugeValue, upValue, msg.Attributes.Name) + + infoDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "info"), + "Non-numeric data of , value is always 1.", + []string{"device", "address", "broadcast", "duplex", "operstate", "ifalias"}, + nil, + ) + infoValue := 1.0 + + var ifalias = "" + if msg.Attributes.Alias != nil { + ifalias = *msg.Attributes.Alias + } + + duplex := "" + lm, lmExists := linkModes[msg.Attributes.Name] + if lmExists { + duplex = lm.Duplex.String() + } + + ifaceInfo := sysfsAttrs[msg.Attributes.Name] + + ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, msg.Attributes.Name, msg.Attributes.Address.String(), msg.Attributes.Broadcast.String(), duplex, operstateStr[int(msg.Attributes.OperationalState)], ifalias) + + pushMetric(ch, c.getFieldDesc("address_assign_type"), "address_assign_type", ifaceInfo.AddrAssignType, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("carrier"), "carrier", msg.Attributes.Carrier, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("carrier_changes_total"), "carrier_changes_total", msg.Attributes.CarrierChanges, prometheus.CounterValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("carrier_up_changes_total"), "carrier_up_changes_total", msg.Attributes.CarrierUpCount, prometheus.CounterValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("carrier_down_changes_total"), "carrier_down_changes_total", msg.Attributes.CarrierDownCount, prometheus.CounterValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("device_id"), "device_id", ifaceInfo.DevID, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("flags"), "flags", msg.Flags, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("iface_id"), "iface_id", msg.Index, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("iface_link_mode"), "iface_link_mode", msg.Attributes.LinkMode, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("dormant"), "dormant", msg.Attributes.LinkMode, prometheus.GaugeValue, msg.Attributes.Name) + + // kernel logic: IFLA_LINK attribute will be ignore when ifindex is the same as iflink + // (dev->ifindex != dev_get_iflink(dev) && nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) + // As interface ID is never 0, we assume msg.Attributes.Type 0 means iflink is omitted in RTM_GETLINK response. + if msg.Attributes.Type > 0 { + pushMetric(ch, c.getFieldDesc("iface_link"), "iface_link", msg.Attributes.Type, prometheus.GaugeValue, msg.Attributes.Name) + } else { + pushMetric(ch, c.getFieldDesc("iface_link"), "iface_link", msg.Index, prometheus.GaugeValue, msg.Attributes.Name) + } + + pushMetric(ch, c.getFieldDesc("mtu_bytes"), "mtu_bytes", msg.Attributes.MTU, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("name_assign_type"), "name_assign_type", ifaceInfo.NameAssignType, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("net_dev_group"), "net_dev_group", msg.Attributes.NetDevGroup, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_queue_length"), "transmit_queue_length", msg.Attributes.TxQueueLen, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("protocol_type"), "protocol_type", msg.Type, prometheus.GaugeValue, msg.Attributes.Name) + + // Skip statistics if argument collector.netclass_rtnl.with-stats is false or statistics are unavailable. + if netclassRTNLWithStats == nil || !*netclassRTNLWithStats || msg.Attributes.Stats64 == nil { + continue + } + + pushMetric(ch, c.getFieldDesc("receive_packets_total"), "receive_packets_total", msg.Attributes.Stats64.RXPackets, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_packets_total"), "transmit_packets_total", msg.Attributes.Stats64.TXPackets, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("receive_bytes_total"), "receive_bytes_total", msg.Attributes.Stats64.RXBytes, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_bytes_total"), "transmit_bytes_total", msg.Attributes.Stats64.TXBytes, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("receive_errors_total"), "receive_errors_total", msg.Attributes.Stats64.RXErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_errors_total"), "transmit_errors_total", msg.Attributes.Stats64.TXErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("receive_dropped_total"), "receive_dropped_total", msg.Attributes.Stats64.RXDropped, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_dropped_total"), "transmit_dropped_total", msg.Attributes.Stats64.TXDropped, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("multicast_total"), "multicast_total", msg.Attributes.Stats64.Multicast, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("collisions_total"), "collisions_total", msg.Attributes.Stats64.Collisions, prometheus.GaugeValue, msg.Attributes.Name) + + // Detailed rx_errors. + pushMetric(ch, c.getFieldDesc("receive_length_errors_total"), "receive_length_errors_total", msg.Attributes.Stats64.RXLengthErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("receive_over_errors_total"), "receive_over_errors_total", msg.Attributes.Stats64.RXOverErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("receive_crc_errors_total"), "receive_crc_errors_total", msg.Attributes.Stats64.RXCRCErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("receive_frame_errors_total"), "receive_frame_errors_total", msg.Attributes.Stats64.RXFrameErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("receive_fifo_errors_total"), "receive_fifo_errors_total", msg.Attributes.Stats64.RXFIFOErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("receive_missed_errors_total"), "receive_missed_errors_total", msg.Attributes.Stats64.RXMissedErrors, prometheus.GaugeValue, msg.Attributes.Name) + + // Detailed tx_errors. + pushMetric(ch, c.getFieldDesc("transmit_aborted_errors_total"), "transmit_aborted_errors_total", msg.Attributes.Stats64.TXAbortedErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_carrier_errors_total"), "transmit_carrier_errors_total", msg.Attributes.Stats64.TXCarrierErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_fifo_errors_total"), "transmit_fifo_errors_total", msg.Attributes.Stats64.TXFIFOErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_heartbeat_errors_total"), "transmit_heartbeat_errors_total", msg.Attributes.Stats64.TXHeartbeatErrors, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_window_errors_total"), "transmit_window_errors_total", msg.Attributes.Stats64.TXWindowErrors, prometheus.GaugeValue, msg.Attributes.Name) + + // For cslip, etc. + pushMetric(ch, c.getFieldDesc("receive_compressed_total"), "receive_compressed_total", msg.Attributes.Stats64.RXCompressed, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("transmit_compressed_total"), "transmit_compressed_total", msg.Attributes.Stats64.TXCompressed, prometheus.GaugeValue, msg.Attributes.Name) + pushMetric(ch, c.getFieldDesc("receive_nohandler_total"), "receive_nohandler_total", msg.Attributes.Stats64.RXNoHandler, prometheus.GaugeValue, msg.Attributes.Name) + + } + + return nil +} + +func (c *netClassCollector) getNetClassInfoRTNL() ([]rtnetlink.LinkMessage, error) { + conn, err := rtnetlink.Dial(nil) + if err != nil { + return nil, err + } + defer conn.Close() + + lMsgs, err := conn.Link.List() + + return lMsgs, err + +} + +func (c *netClassCollector) getLinkModes() ([]*ethtool.LinkMode, error) { + conn, err := ethtool.New() + if err != nil { + return nil, err + } + defer conn.Close() + + lms, err := conn.LinkModes() + + return lms, err +} + +// getSysfsAttributes reads attributes that are absent from netlink but provided +// by sysfs. +func getSysfsAttributes(links []rtnetlink.LinkMessage) (sysfs.NetClass, error) { + netClass := sysfs.NetClass{} + for _, msg := range links { + interfaceClass := sysfs.NetClassIface{} + ifName := msg.Attributes.Name + devPath := filepath.Join("/sys", "class", "net", ifName) + + // These three attributes hold a device-specific lock when + // accessed, not the RTNL lock, so they are much less impactful + // than reading most of the other attributes from sysfs. + for _, attr := range []string{"addr_assign_type", "dev_id", "name_assign_type"} { + if err := sysfs.ParseNetClassAttribute(devPath, attr, &interfaceClass); err != nil { + return nil, err + } + } + netClass[ifName] = interfaceClass + } + return netClass, nil +} diff --git a/collector/netdev_aix.go b/collector/netdev_aix.go new file mode 100644 index 0000000000..6c48600f33 --- /dev/null +++ b/collector/netdev_aix.go @@ -0,0 +1,57 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonetdev + +package collector + +import ( + "log/slog" + + "github.com/power-devops/perfstat" +) + +func getNetDevStats(filter *deviceFilter, logger *slog.Logger) (netDevStats, error) { + netDev := netDevStats{} + + stats, err := perfstat.NetAdapterStat() + if err != nil { + return nil, err + } + + for _, stat := range stats { + netDev[stat.Name] = map[string]uint64{ + "receive_bytes": uint64(stat.RxBytes), + "receive_dropped": uint64(stat.RxPacketsDropped), + "receive_errors": uint64(stat.RxErrors), + "receive_multicast": uint64(stat.RxMulticastPackets), + "receive_packets": uint64(stat.RxPackets), + "receive_collision_errors": uint64(stat.RxCollisionErrors), + "transmit_bytes": uint64(stat.TxBytes), + "transmit_dropped": uint64(stat.TxPacketsDropped), + "transmit_errors": uint64(stat.TxErrors), + "transmit_multicast": uint64(stat.TxMulticastPackets), + "transmit_packets": uint64(stat.TxPackets), + "transmit_queue_overflow": uint64(stat.TxQueueOverflow), + "transmit_collision_single_errors": uint64(stat.TxSingleCollisionCount), + "transmit_collision_multiple_errors": uint64(stat.TxMultipleCollisionCount), + } + } + + return netDev, nil +} + +func getNetDevLabels() (map[string]map[string]string, error) { + // to be implemented if needed + return nil, nil +} diff --git a/collector/netdev_bsd.go b/collector/netdev_bsd.go index 8bed2e1424..7ec6ad576b 100644 --- a/collector/netdev_bsd.go +++ b/collector/netdev_bsd.go @@ -11,18 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonetdev -// +build freebsd dragonfly +//go:build !nonetdev && (freebsd || dragonfly) package collector import ( "errors" - "regexp" - "strconv" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "log/slog" ) /* @@ -35,8 +30,8 @@ import ( */ import "C" -func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) { - netDev := map[string]map[string]string{} +func getNetDevStats(filter *deviceFilter, logger *slog.Logger) (netDevStats, error) { + netDev := netDevStats{} var ifap, ifa *C.struct_ifaddrs if C.getifaddrs(&ifap) == -1 { @@ -45,37 +40,35 @@ func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Log defer C.freeifaddrs(ifap) for ifa = ifap; ifa != nil; ifa = ifa.ifa_next { - if ifa.ifa_addr.sa_family == C.AF_LINK { - dev := C.GoString(ifa.ifa_name) - if ignore != nil && ignore.MatchString(dev) { - level.Debug(logger).Log("msg", "Ignoring device", "device", dev) - continue - } - if accept != nil && !accept.MatchString(dev) { - level.Debug(logger).Log("msg", "Ignoring device", "device", dev) - continue - } + if ifa.ifa_addr.sa_family != C.AF_LINK { + continue + } + + dev := C.GoString(ifa.ifa_name) + if filter.ignored(dev) { + logger.Debug("Ignoring device", "device", dev) + continue + } - devStats := map[string]string{} - data := (*C.struct_if_data)(ifa.ifa_data) + data := (*C.struct_if_data)(ifa.ifa_data) - devStats["receive_packets"] = convertFreeBSDCPUTime(uint64(data.ifi_ipackets)) - devStats["transmit_packets"] = convertFreeBSDCPUTime(uint64(data.ifi_opackets)) - devStats["receive_errs"] = convertFreeBSDCPUTime(uint64(data.ifi_ierrors)) - devStats["transmit_errs"] = convertFreeBSDCPUTime(uint64(data.ifi_oerrors)) - devStats["receive_bytes"] = convertFreeBSDCPUTime(uint64(data.ifi_ibytes)) - devStats["transmit_bytes"] = convertFreeBSDCPUTime(uint64(data.ifi_obytes)) - devStats["receive_multicast"] = convertFreeBSDCPUTime(uint64(data.ifi_imcasts)) - devStats["transmit_multicast"] = convertFreeBSDCPUTime(uint64(data.ifi_omcasts)) - devStats["receive_drop"] = convertFreeBSDCPUTime(uint64(data.ifi_iqdrops)) - devStats["transmit_drop"] = convertFreeBSDCPUTime(uint64(data.ifi_oqdrops)) - netDev[dev] = devStats + netDev[dev] = map[string]uint64{ + "receive_packets": uint64(data.ifi_ipackets), + "transmit_packets": uint64(data.ifi_opackets), + "receive_bytes": uint64(data.ifi_ibytes), + "transmit_bytes": uint64(data.ifi_obytes), + "receive_errors": uint64(data.ifi_ierrors), + "transmit_errors": uint64(data.ifi_oerrors), + "receive_dropped": uint64(data.ifi_iqdrops), + "transmit_dropped": uint64(data.ifi_oqdrops), + "receive_multicast": uint64(data.ifi_imcasts), + "transmit_multicast": uint64(data.ifi_omcasts), } } return netDev, nil } -func convertFreeBSDCPUTime(counter uint64) string { - return strconv.FormatUint(counter, 10) +func getNetDevLabels() (map[string]map[string]string, error) { + return nil, nil } diff --git a/collector/netdev_bsd_test.go b/collector/netdev_bsd_test.go deleted file mode 100644 index a661c1cc0e..0000000000 --- a/collector/netdev_bsd_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !nonetdev -// +build freebsd dragonfly - -package collector - -import "testing" - -type uintToStringTest struct { - in uint64 - out string -} - -var uinttostringtests = []uintToStringTest{ - // Copied base10 values from strconv's tests: - {0, "0"}, - {1, "1"}, - {12345678, "12345678"}, - {1<<31 - 1, "2147483647"}, - {1 << 31, "2147483648"}, - {1<<31 + 1, "2147483649"}, - {1<<32 - 1, "4294967295"}, - {1 << 32, "4294967296"}, - {1<<32 + 1, "4294967297"}, - {1 << 50, "1125899906842624"}, - {1<<63 - 1, "9223372036854775807"}, - - // Some values that convert correctly on amd64, but not on i386. - {0x1bf0c640a, "7500227594"}, - {0xbee5df75, "3202735989"}, -} - -func TestUintToString(t *testing.T) { - for _, test := range uinttostringtests { - is := convertFreeBSDCPUTime(test.in) - if is != test.out { - t.Errorf("convertFreeBSDCPUTime(%v) = %v want %v", - test.in, is, test.out) - } - } -} diff --git a/collector/netdev_common.go b/collector/netdev_common.go index a28bd43bd9..091f3464db 100644 --- a/collector/netdev_common.go +++ b/collector/netdev_common.go @@ -11,21 +11,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonetdev -// +build linux freebsd openbsd dragonfly darwin +//go:build !nonetdev && (linux || freebsd || openbsd || dragonfly || darwin || aix) package collector import ( "errors" "fmt" - "regexp" + "log/slog" + "net" "strconv" + "sync" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" ) var ( @@ -33,25 +32,29 @@ var ( oldNetdevDeviceInclude = kingpin.Flag("collector.netdev.device-whitelist", "DEPRECATED: Use collector.netdev.device-include").Hidden().String() netdevDeviceExclude = kingpin.Flag("collector.netdev.device-exclude", "Regexp of net devices to exclude (mutually exclusive to device-include).").String() oldNetdevDeviceExclude = kingpin.Flag("collector.netdev.device-blacklist", "DEPRECATED: Use collector.netdev.device-exclude").Hidden().String() + netdevAddressInfo = kingpin.Flag("collector.netdev.address-info", "Collect address-info for every device").Bool() + netdevDetailedMetrics = kingpin.Flag("collector.netdev.enable-detailed-metrics", "Use (incompatible) metric names that provide more detailed stats on Linux").Bool() ) type netDevCollector struct { - subsystem string - deviceExcludePattern *regexp.Regexp - deviceIncludePattern *regexp.Regexp - metricDescs map[string]*prometheus.Desc - logger log.Logger + subsystem string + deviceFilter deviceFilter + metricDescsMutex sync.Mutex + metricDescs map[string]*prometheus.Desc + logger *slog.Logger } +type netDevStats map[string]map[string]uint64 + func init() { registerCollector("netdev", defaultEnabled, NewNetDevCollector) } // NewNetDevCollector returns a new Collector exposing network device stats. -func NewNetDevCollector(logger log.Logger) (Collector, error) { +func NewNetDevCollector(logger *slog.Logger) (Collector, error) { if *oldNetdevDeviceInclude != "" { if *netdevDeviceInclude == "" { - level.Warn(logger).Log("msg", "--collector.netdev.device-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-include") + logger.Warn("--collector.netdev.device-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-include") *netdevDeviceInclude = *oldNetdevDeviceInclude } else { return nil, errors.New("--collector.netdev.device-whitelist and --collector.netdev.device-include are mutually exclusive") @@ -60,7 +63,7 @@ func NewNetDevCollector(logger log.Logger) (Collector, error) { if *oldNetdevDeviceExclude != "" { if *netdevDeviceExclude == "" { - level.Warn(logger).Log("msg", "--collector.netdev.device-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-exclude") + logger.Warn("--collector.netdev.device-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-exclude") *netdevDeviceExclude = *oldNetdevDeviceExclude } else { return nil, errors.New("--collector.netdev.device-blacklist and --collector.netdev.device-exclude are mutually exclusive") @@ -71,50 +74,181 @@ func NewNetDevCollector(logger log.Logger) (Collector, error) { return nil, errors.New("device-exclude & device-include are mutually exclusive") } - var excludePattern *regexp.Regexp if *netdevDeviceExclude != "" { - level.Info(logger).Log("msg", "Parsed flag --collector.netdev.device-exclude", "flag", *netdevDeviceExclude) - excludePattern = regexp.MustCompile(*netdevDeviceExclude) + logger.Info("Parsed flag --collector.netdev.device-exclude", "flag", *netdevDeviceExclude) } - var includePattern *regexp.Regexp if *netdevDeviceInclude != "" { - level.Info(logger).Log("msg", "Parsed Flag --collector.netdev.device-include", "flag", *netdevDeviceInclude) - includePattern = regexp.MustCompile(*netdevDeviceInclude) + logger.Info("Parsed Flag --collector.netdev.device-include", "flag", *netdevDeviceInclude) } return &netDevCollector{ - subsystem: "network", - deviceExcludePattern: excludePattern, - deviceIncludePattern: includePattern, - metricDescs: map[string]*prometheus.Desc{}, - logger: logger, + subsystem: "network", + deviceFilter: newDeviceFilter(*netdevDeviceExclude, *netdevDeviceInclude), + metricDescs: map[string]*prometheus.Desc{}, + logger: logger, }, nil } +func (c *netDevCollector) metricDesc(key string, labels []string) *prometheus.Desc { + c.metricDescsMutex.Lock() + defer c.metricDescsMutex.Unlock() + + if _, ok := c.metricDescs[key]; !ok { + c.metricDescs[key] = prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, key+"_total"), + fmt.Sprintf("Network device statistic %s.", key), + labels, + nil, + ) + } + + return c.metricDescs[key] +} + func (c *netDevCollector) Update(ch chan<- prometheus.Metric) error { - netDev, err := getNetDevStats(c.deviceExcludePattern, c.deviceIncludePattern, c.logger) + netDev, err := getNetDevStats(&c.deviceFilter, c.logger) if err != nil { return fmt.Errorf("couldn't get netstats: %w", err) } + + netDevLabels, err := getNetDevLabels() + if err != nil { + return fmt.Errorf("couldn't get netdev labels: %w", err) + } + for dev, devStats := range netDev { - for key, value := range devStats { - desc, ok := c.metricDescs[key] - if !ok { - desc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, c.subsystem, key+"_total"), - fmt.Sprintf("Network device statistic %s.", key), - []string{"device"}, - nil, - ) - c.metricDescs[key] = desc + if !*netdevDetailedMetrics { + legacy(devStats) + } + + labels := []string{"device"} + labelValues := []string{dev} + if devLabels, exists := netDevLabels[dev]; exists { + for labelName, labelValue := range devLabels { + labels = append(labels, labelName) + labelValues = append(labelValues, labelValue) } - v, err := strconv.ParseFloat(value, 64) + } + + for key, value := range devStats { + desc := c.metricDesc(key, labels) + ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, float64(value), labelValues...) + } + } + if *netdevAddressInfo { + interfaces, err := net.Interfaces() + if err != nil { + return fmt.Errorf("could not get network interfaces: %w", err) + } + + desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, "network_address", + "info"), "node network address by device", + []string{"device", "address", "netmask", "scope"}, nil) + + for _, addr := range getAddrsInfo(interfaces) { + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, 1, + addr.device, addr.addr, addr.netmask, addr.scope) + } + } + return nil +} + +type addrInfo struct { + device string + addr string + scope string + netmask string +} + +func scope(ip net.IP) string { + if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() { + return "link-local" + } + + if ip.IsInterfaceLocalMulticast() { + return "interface-local" + } + + if ip.IsGlobalUnicast() { + return "global" + } + + return "" +} + +// getAddrsInfo returns interface name, address, scope and netmask for all interfaces. +func getAddrsInfo(interfaces []net.Interface) []addrInfo { + var res []addrInfo + + for _, ifs := range interfaces { + addrs, _ := ifs.Addrs() + for _, addr := range addrs { + ip, ipNet, err := net.ParseCIDR(addr.String()) if err != nil { - return fmt.Errorf("invalid value %s in netstats: %w", value, err) + continue } - ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, v, dev) + size, _ := ipNet.Mask.Size() + + res = append(res, addrInfo{ + device: ifs.Name, + addr: ip.String(), + scope: scope(ip), + netmask: strconv.Itoa(size), + }) } } - return nil + + return res +} + +// https://github.com/torvalds/linux/blob/master/net/core/net-procfs.c#L75-L97 +func legacy(metrics map[string]uint64) { + if metric, ok := pop(metrics, "receive_errors"); ok { + metrics["receive_errs"] = metric + } + if metric, ok := pop(metrics, "receive_dropped"); ok { + metrics["receive_drop"] = metric + popz(metrics, "receive_missed_errors") + } + if metric, ok := pop(metrics, "receive_fifo_errors"); ok { + metrics["receive_fifo"] = metric + } + if metric, ok := pop(metrics, "receive_frame_errors"); ok { + metrics["receive_frame"] = metric + popz(metrics, "receive_length_errors") + popz(metrics, "receive_over_errors") + popz(metrics, "receive_crc_errors") + } + if metric, ok := pop(metrics, "multicast"); ok { + metrics["receive_multicast"] = metric + } + if metric, ok := pop(metrics, "transmit_errors"); ok { + metrics["transmit_errs"] = metric + } + if metric, ok := pop(metrics, "transmit_dropped"); ok { + metrics["transmit_drop"] = metric + } + if metric, ok := pop(metrics, "transmit_fifo_errors"); ok { + metrics["transmit_fifo"] = metric + } + if metric, ok := pop(metrics, "multicast"); ok { + metrics["receive_multicast"] = metric + } + if metric, ok := pop(metrics, "collisions"); ok { + metrics["transmit_colls"] = metric + } + if metric, ok := pop(metrics, "transmit_carrier_errors"); ok { + metrics["transmit_carrier"] = metric + popz(metrics, "transmit_aborted_errors") + popz(metrics, "transmit_heartbeat_errors") + popz(metrics, "transmit_window_errors") + } +} + +func pop(m map[string]uint64, key string) (uint64, bool) { + value, ok := m[key] + delete(m, key) + return value, ok +} + +func popz(m map[string]uint64, key string) uint64 { + if value, ok := m[key]; ok { + delete(m, key) + return value + } + return 0 } diff --git a/collector/netdev_darwin.go b/collector/netdev_darwin.go index 4970853f81..7b70a0d09e 100644 --- a/collector/netdev_darwin.go +++ b/collector/netdev_darwin.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonetdev +//go:build !nonetdev package collector @@ -19,17 +19,15 @@ import ( "bytes" "encoding/binary" "fmt" + "log/slog" "net" - "regexp" - "strconv" + "unsafe" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "golang.org/x/sys/unix" ) -func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) { - netDev := map[string]map[string]string{} +func getNetDevStats(filter *deviceFilter, logger *slog.Logger) (netDevStats, error) { + netDev := netDevStats{} ifs, err := net.Interfaces() if err != nil { @@ -37,31 +35,30 @@ func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Log } for _, iface := range ifs { - ifaceData, err := getIfaceData(iface.Index) - if err != nil { - level.Debug(logger).Log("msg", "failed to load data for interface", "device", iface.Name, "err", err) + if filter.ignored(iface.Name) { + logger.Debug("Ignoring device", "device", iface.Name) continue } - if ignore != nil && ignore.MatchString(iface.Name) { - level.Debug(logger).Log("msg", "Ignoring device", "device", iface.Name) - continue - } - if accept != nil && !accept.MatchString(iface.Name) { - level.Debug(logger).Log("msg", "Ignoring device", "device", iface.Name) + ifaceData, err := getIfaceData(iface.Index) + if err != nil { + logger.Debug("failed to load data for interface", "device", iface.Name, "err", err) continue } - devStats := map[string]string{} - devStats["receive_packets"] = strconv.FormatUint(ifaceData.Data.Ipackets, 10) - devStats["transmit_packets"] = strconv.FormatUint(ifaceData.Data.Opackets, 10) - devStats["receive_errs"] = strconv.FormatUint(ifaceData.Data.Ierrors, 10) - devStats["transmit_errs"] = strconv.FormatUint(ifaceData.Data.Oerrors, 10) - devStats["receive_bytes"] = strconv.FormatUint(ifaceData.Data.Ibytes, 10) - devStats["transmit_bytes"] = strconv.FormatUint(ifaceData.Data.Obytes, 10) - devStats["receive_multicast"] = strconv.FormatUint(ifaceData.Data.Imcasts, 10) - devStats["transmit_multicast"] = strconv.FormatUint(ifaceData.Data.Omcasts, 10) - netDev[iface.Name] = devStats + netDev[iface.Name] = map[string]uint64{ + "receive_packets": ifaceData.Data.Ipackets, + "transmit_packets": ifaceData.Data.Opackets, + "receive_bytes": ifaceData.Data.Ibytes, + "transmit_bytes": ifaceData.Data.Obytes, + "receive_errors": ifaceData.Data.Ierrors, + "transmit_errors": ifaceData.Data.Oerrors, + "receive_dropped": ifaceData.Data.Iqdrops, + "receive_multicast": ifaceData.Data.Imcasts, + "transmit_multicast": ifaceData.Data.Omcasts, + "collisions": ifaceData.Data.Collisions, + "noproto": ifaceData.Data.Noproto, + } } return netDev, nil @@ -74,48 +71,110 @@ func getIfaceData(index int) (*ifMsghdr2, error) { return nil, err } err = binary.Read(bytes.NewReader(rawData), binary.LittleEndian, &data) + if err != nil { + return &data, err + } + + /* + As of macOS Ventura 13.2.1, there’s a kernel bug which truncates traffic values at the 4GiB mark. + This is a workaround to fetch the interface traffic metrics using a sysctl call. + Apple wants to prevent fingerprinting by 3rdparty apps and might fix this bug in future which would break this implementation. + */ + mib := []int32{ + unix.CTL_NET, + unix.AF_LINK, + 0, // NETLINK_GENERIC: functions not specific to a type of iface + 2, //IFMIB_IFDATA: per-interface data table + int32(index), + 1, // IFDATA_GENERAL: generic stats for all kinds of ifaces + } + + var mibData ifMibData + size := unsafe.Sizeof(mibData) + + if _, _, errno := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&mibData)), + uintptr(unsafe.Pointer(&size)), + uintptr(unsafe.Pointer(nil)), + 0, + ); errno != 0 { + return &data, err + } + + var ifdata ifData64 + err = binary.Read(bytes.NewReader(mibData.Data[:]), binary.LittleEndian, &ifdata) + if err != nil { + return &data, err + } + + data.Data.Ibytes = ifdata.Ibytes + data.Data.Obytes = ifdata.Obytes return &data, err } +// https://github.com/apple-oss-distributions/xnu/blob/main/bsd/net/if.h#L220-L232 type ifMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - _ [2]byte - SndLen int32 - SndMaxlen int32 - SndDrops int32 - Timer int32 - Data ifData64 + Msglen uint16 // to skip over non-understood messages + Version uint8 // future binary compatabilit + Type uint8 // message type + Addrs int32 // like rtm_addrs + Flags int32 // value of if_flags + Index uint16 // index for associated ifp + _ [2]byte // padding for alignment + SndLen int32 // instantaneous length of send queue + SndMaxlen int32 // maximum length of send queue + SndDrops int32 // number of drops in send queue + Timer int32 // time until if_watchdog called + Data ifData64 // statistics and other data } +// https://github.com/apple-oss-distributions/xnu/blob/main/bsd/net/if_var.h#L207-L235 type ifData64 struct { - Type uint8 - Typelen uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Recvquota uint8 - Xmitquota uint8 - Unused1 uint8 - Mtu uint32 - Metric uint32 - Baudrate uint64 - Ipackets uint64 - Ierrors uint64 - Opackets uint64 - Oerrors uint64 - Collisions uint64 - Ibytes uint64 - Obytes uint64 - Imcasts uint64 - Omcasts uint64 - Iqdrops uint64 - Noproto uint64 - Recvtiming uint32 - Xmittiming uint32 - Lastchange unix.Timeval32 + Type uint8 // ethernet, tokenring, etc + Typelen uint8 // Length of frame type id + Physical uint8 // e.g., AUI, Thinnet, 10base-T, etc + Addrlen uint8 // media address length + Hdrlen uint8 // media header length + Recvquota uint8 // polling quota for receive intrs + Xmitquota uint8 // polling quota for xmit intrs + Unused1 uint8 // for future use + Mtu uint32 // maximum transmission unit + Metric uint32 // routing metric (external only) + Baudrate uint64 // linespeed + + // volatile statistics + Ipackets uint64 // packets received on interface + Ierrors uint64 // input errors on interface + Opackets uint64 // packets sent on interface + Oerrors uint64 // output errors on interface + Collisions uint64 // collisions on csma interfaces + Ibytes uint64 // total number of octets received + Obytes uint64 // total number of octets sent + Imcasts uint64 // packets received via multicast + Omcasts uint64 // packets sent via multicast + Iqdrops uint64 // dropped on input, this interface + Noproto uint64 // destined for unsupported protocol + Recvtiming uint32 // usec spent receiving when timing + Xmittiming uint32 // usec spent xmitting when timing + Lastchange unix.Timeval32 // time of last administrative change +} + +// https://github.com/apple-oss-distributions/xnu/blob/main/bsd/net/if_mib.h#L65-L74 +type ifMibData struct { + Name [16]byte // name of interface + PCount uint32 // number of promiscuous listeners + Flags uint32 // interface flags + SendLength uint32 // instantaneous length of send queue + MaxSendLength uint32 // maximum length of send queue + SendDrops uint32 // number of drops in send queue + _ [4]uint32 // for future expansion + Data [128]byte // generic information and statistics +} + +func getNetDevLabels() (map[string]map[string]string, error) { + // to be implemented if needed + return nil, nil } diff --git a/collector/netdev_linux.go b/collector/netdev_linux.go index 1a4b5dfcfa..8f694c2c97 100644 --- a/collector/netdev_linux.go +++ b/collector/netdev_linux.go @@ -11,82 +11,199 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonetdev +//go:build !nonetdev package collector import ( - "bufio" "fmt" - "io" - "os" - "regexp" - "strings" + "log/slog" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" + "github.com/jsimonetti/rtnetlink/v2" + "github.com/prometheus/procfs" + "github.com/prometheus/procfs/sysfs" ) var ( - procNetDevInterfaceRE = regexp.MustCompile(`^(.+): *(.+)$`) - procNetDevFieldSep = regexp.MustCompile(` +`) + netDevNetlink = kingpin.Flag("collector.netdev.netlink", "Use netlink to gather stats instead of /proc/net/dev.").Default("true").Bool() + netdevLabelIfAlias = kingpin.Flag("collector.netdev.label-ifalias", "Add ifAlias label").Default("false").Bool() ) -func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) { - file, err := os.Open(procFilePath("net/dev")) +func getNetDevStats(filter *deviceFilter, logger *slog.Logger) (netDevStats, error) { + if *netDevNetlink { + return netlinkStats(filter, logger) + } + return procNetDevStats(filter, logger) +} + +func netlinkStats(filter *deviceFilter, logger *slog.Logger) (netDevStats, error) { + conn, err := rtnetlink.Dial(nil) if err != nil { return nil, err } - defer file.Close() - return parseNetDevStats(file, ignore, accept, logger) -} - -func parseNetDevStats(r io.Reader, ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) { - scanner := bufio.NewScanner(r) - scanner.Scan() // skip first header - scanner.Scan() - parts := strings.Split(scanner.Text(), "|") - if len(parts) != 3 { // interface + receive + transmit - return nil, fmt.Errorf("invalid header line in net/dev: %s", - scanner.Text()) + defer conn.Close() + links, err := conn.Link.List() + if err != nil { + return nil, err } - receiveHeader := strings.Fields(parts[1]) - transmitHeader := strings.Fields(parts[2]) - headerLength := len(receiveHeader) + len(transmitHeader) + return parseNetlinkStats(links, filter, logger), nil +} + +func parseNetlinkStats(links []rtnetlink.LinkMessage, filter *deviceFilter, logger *slog.Logger) netDevStats { + metrics := netDevStats{} - netDev := map[string]map[string]string{} - for scanner.Scan() { - line := strings.TrimLeft(scanner.Text(), " ") - parts := procNetDevInterfaceRE.FindStringSubmatch(line) - if len(parts) != 3 { - return nil, fmt.Errorf("couldn't get interface name, invalid line in net/dev: %q", line) + for _, msg := range links { + if msg.Attributes == nil { + logger.Debug("No netlink attributes, skipping") + continue + } + name := msg.Attributes.Name + stats := msg.Attributes.Stats64 + if stats32 := msg.Attributes.Stats; stats == nil && stats32 != nil { + stats = &rtnetlink.LinkStats64{ + RXPackets: uint64(stats32.RXPackets), + TXPackets: uint64(stats32.TXPackets), + RXBytes: uint64(stats32.RXBytes), + TXBytes: uint64(stats32.TXBytes), + RXErrors: uint64(stats32.RXErrors), + TXErrors: uint64(stats32.TXErrors), + RXDropped: uint64(stats32.RXDropped), + TXDropped: uint64(stats32.TXDropped), + Multicast: uint64(stats32.Multicast), + Collisions: uint64(stats32.Collisions), + RXLengthErrors: uint64(stats32.RXLengthErrors), + RXOverErrors: uint64(stats32.RXOverErrors), + RXCRCErrors: uint64(stats32.RXCRCErrors), + RXFrameErrors: uint64(stats32.RXFrameErrors), + RXFIFOErrors: uint64(stats32.RXFIFOErrors), + RXMissedErrors: uint64(stats32.RXMissedErrors), + TXAbortedErrors: uint64(stats32.TXAbortedErrors), + TXCarrierErrors: uint64(stats32.TXCarrierErrors), + TXFIFOErrors: uint64(stats32.TXFIFOErrors), + TXHeartbeatErrors: uint64(stats32.TXHeartbeatErrors), + TXWindowErrors: uint64(stats32.TXWindowErrors), + RXCompressed: uint64(stats32.RXCompressed), + TXCompressed: uint64(stats32.TXCompressed), + RXNoHandler: uint64(stats32.RXNoHandler), + RXOtherhostDropped: 0, + } } - dev := parts[1] - if ignore != nil && ignore.MatchString(dev) { - level.Debug(logger).Log("msg", "Ignoring device", "device", dev) + if filter.ignored(name) { + logger.Debug("Ignoring device", "device", name) continue } - if accept != nil && !accept.MatchString(dev) { - level.Debug(logger).Log("msg", "Ignoring device", "device", dev) + + // Make sure we don't panic when accessing `stats` attributes below. + if stats == nil { + logger.Debug("No netlink stats, skipping") continue } - values := procNetDevFieldSep.Split(strings.TrimLeft(parts[2], " "), -1) - if len(values) != headerLength { - return nil, fmt.Errorf("couldn't get values, invalid line in net/dev: %q", parts[2]) + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/if_link.h#L42-L246 + metrics[name] = map[string]uint64{ + "receive_packets": stats.RXPackets, + "transmit_packets": stats.TXPackets, + "receive_bytes": stats.RXBytes, + "transmit_bytes": stats.TXBytes, + "receive_errors": stats.RXErrors, + "transmit_errors": stats.TXErrors, + "receive_dropped": stats.RXDropped, + "transmit_dropped": stats.TXDropped, + "multicast": stats.Multicast, + "collisions": stats.Collisions, + + // detailed rx_errors + "receive_length_errors": stats.RXLengthErrors, + "receive_over_errors": stats.RXOverErrors, + "receive_crc_errors": stats.RXCRCErrors, + "receive_frame_errors": stats.RXFrameErrors, + "receive_fifo_errors": stats.RXFIFOErrors, + "receive_missed_errors": stats.RXMissedErrors, + + // detailed tx_errors + "transmit_aborted_errors": stats.TXAbortedErrors, + "transmit_carrier_errors": stats.TXCarrierErrors, + "transmit_fifo_errors": stats.TXFIFOErrors, + "transmit_heartbeat_errors": stats.TXHeartbeatErrors, + "transmit_window_errors": stats.TXWindowErrors, + + // for cslip etc + "receive_compressed": stats.RXCompressed, + "transmit_compressed": stats.TXCompressed, + "receive_nohandler": stats.RXNoHandler, } + } + + return metrics +} + +func procNetDevStats(filter *deviceFilter, logger *slog.Logger) (netDevStats, error) { + metrics := netDevStats{} + + fs, err := procfs.NewFS(*procPath) + if err != nil { + return metrics, fmt.Errorf("failed to open procfs: %w", err) + } + + netDev, err := fs.NetDev() + if err != nil { + return metrics, fmt.Errorf("failed to parse /proc/net/dev: %w", err) + } + + for _, stats := range netDev { + name := stats.Name - netDev[dev] = map[string]string{} - for i := 0; i < len(receiveHeader); i++ { - netDev[dev]["receive_"+receiveHeader[i]] = values[i] + if filter.ignored(name) { + logger.Debug("Ignoring device", "device", name) + continue } - for i := 0; i < len(transmitHeader); i++ { - netDev[dev]["transmit_"+transmitHeader[i]] = values[i+len(receiveHeader)] + metrics[name] = map[string]uint64{ + "receive_bytes": stats.RxBytes, + "receive_packets": stats.RxPackets, + "receive_errors": stats.RxErrors, + "receive_dropped": stats.RxDropped, + "receive_fifo": stats.RxFIFO, + "receive_frame": stats.RxFrame, + "receive_compressed": stats.RxCompressed, + "receive_multicast": stats.RxMulticast, + "transmit_bytes": stats.TxBytes, + "transmit_packets": stats.TxPackets, + "transmit_errors": stats.TxErrors, + "transmit_dropped": stats.TxDropped, + "transmit_fifo": stats.TxFIFO, + "transmit_colls": stats.TxCollisions, + "transmit_carrier": stats.TxCarrier, + "transmit_compressed": stats.TxCompressed, } } - return netDev, scanner.Err() + + return metrics, nil +} + +func getNetDevLabels() (map[string]map[string]string, error) { + if !*netdevLabelIfAlias { + return nil, nil + } + + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, err + } + + interfaces, err := fs.NetClass() + if err != nil { + return nil, err + } + + labels := make(map[string]map[string]string) + for iface, params := range interfaces { + labels[iface] = map[string]string{"ifalias": params.IfAlias} + } + + return labels, nil } diff --git a/collector/netdev_linux_test.go b/collector/netdev_linux_test.go index 1c88d83cce..3f0515b3f5 100644 --- a/collector/netdev_linux_test.go +++ b/collector/netdev_linux_test.go @@ -11,40 +11,175 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nonetdev + package collector import ( - "github.com/go-kit/kit/log" - "os" - "regexp" + "io" + "log/slog" "testing" + + "github.com/jsimonetti/rtnetlink/v2" ) +var links = []rtnetlink.LinkMessage{ + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "tun0", + Stats64: &rtnetlink.LinkStats64{ + RXPackets: 24, + TXPackets: 934, + RXBytes: 1888, + TXBytes: 67120, + }, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "veth4B09XN", + Stats64: &rtnetlink.LinkStats64{ + RXPackets: 8, + TXPackets: 10640, + RXBytes: 648, + TXBytes: 1943284, + }, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "lo", + Stats64: &rtnetlink.LinkStats64{ + RXPackets: 1832522, + TXPackets: 1832522, + RXBytes: 435303245, + TXBytes: 435303245, + }, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "eth0", + Stats64: &rtnetlink.LinkStats64{ + RXPackets: 520993275, + TXPackets: 43451486, + RXBytes: 68210035552, + TXBytes: 9315587528, + }, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "lxcbr0", + Stats64: &rtnetlink.LinkStats64{ + TXPackets: 28339, + TXBytes: 2630299, + }, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "wlan0", + Stats64: &rtnetlink.LinkStats64{ + RXPackets: 13899359, + TXPackets: 11726200, + RXBytes: 10437182923, + TXBytes: 2851649360, + }, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "docker0", + Stats64: &rtnetlink.LinkStats64{ + RXPackets: 1065585, + TXPackets: 1929779, + RXBytes: 64910168, + TXBytes: 2681662018, + }, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "ibr10:30", + Stats64: &rtnetlink.LinkStats64{}, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "flannel.1", + Stats64: &rtnetlink.LinkStats64{ + RXPackets: 228499337, + TXPackets: 258369223, + RXBytes: 18144009813, + TXBytes: 20758990068, + TXDropped: 64, + }, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "💩0", + Stats64: &rtnetlink.LinkStats64{ + RXPackets: 105557, + TXPackets: 304261, + RXBytes: 57750104, + TXBytes: 404570255, + Multicast: 72, + }, + }, + }, + { + Attributes: &rtnetlink.LinkAttributes{ + Name: "enp0s0f0", + Stats64: &rtnetlink.LinkStats64{ + RXPackets: 226, + TXPackets: 803, + RXBytes: 231424, + TXBytes: 822272, + RXErrors: 14, + TXErrors: 2, + RXDropped: 10, + TXDropped: 17, + Multicast: 285, + Collisions: 30, + RXLengthErrors: 5, + RXOverErrors: 3, + RXCRCErrors: 1, + RXFrameErrors: 4, + RXFIFOErrors: 6, + RXMissedErrors: 21, + TXAbortedErrors: 22, + TXCarrierErrors: 7, + TXFIFOErrors: 24, + TXHeartbeatErrors: 9, + TXWindowErrors: 19, + RXCompressed: 23, + TXCompressed: 20, + RXNoHandler: 62, + }, + }, + }, +} + func TestNetDevStatsIgnore(t *testing.T) { - file, err := os.Open("fixtures/proc/net/dev") - if err != nil { - t.Fatal(err) - } - defer file.Close() + filter := newDeviceFilter("^veth", "") - netStats, err := parseNetDevStats(file, regexp.MustCompile("^veth"), nil, log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } + netStats := parseNetlinkStats(links, &filter, slog.New(slog.NewTextHandler(io.Discard, nil))) - if want, got := "10437182923", netStats["wlan0"]["receive_bytes"]; want != got { - t.Errorf("want netstat wlan0 bytes %s, got %s", want, got) + if want, got := uint64(10437182923), netStats["wlan0"]["receive_bytes"]; want != got { + t.Errorf("want netstat wlan0 bytes %v, got %v", want, got) } - if want, got := "68210035552", netStats["eth0"]["receive_bytes"]; want != got { - t.Errorf("want netstat eth0 bytes %s, got %s", want, got) + if want, got := uint64(68210035552), netStats["eth0"]["receive_bytes"]; want != got { + t.Errorf("want netstat eth0 bytes %v, got %v", want, got) } - if want, got := "934", netStats["tun0"]["transmit_packets"]; want != got { - t.Errorf("want netstat tun0 packets %s, got %s", want, got) + if want, got := uint64(934), netStats["tun0"]["transmit_packets"]; want != got { + t.Errorf("want netstat tun0 packets %v, got %v", want, got) } - if want, got := 9, len(netStats); want != got { + if want, got := 10, len(netStats); want != got { t.Errorf("want count of devices to be %d, got %d", want, got) } @@ -52,31 +187,154 @@ func TestNetDevStatsIgnore(t *testing.T) { t.Error("want fixture interface veth4B09XN to not exist, but it does") } - if want, got := "0", netStats["ibr10:30"]["receive_fifo"]; want != got { + if want, got := uint64(0), netStats["ibr10:30"]["receive_fifo"]; want != got { t.Error("want fixture interface ibr10:30 to exist, but it does not") } - if want, got := "72", netStats["💩0"]["receive_multicast"]; want != got { + if want, got := uint64(72), netStats["💩0"]["multicast"]; want != got { t.Error("want fixture interface 💩0 to exist, but it does not") } } func TestNetDevStatsAccept(t *testing.T) { - file, err := os.Open("fixtures/proc/net/dev") - if err != nil { - t.Fatal(err) - } - defer file.Close() - - netStats, err := parseNetDevStats(file, nil, regexp.MustCompile("^💩0$"), log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } + filter := newDeviceFilter("", "^💩0$") + netStats := parseNetlinkStats(links, &filter, slog.New(slog.NewTextHandler(io.Discard, nil))) if want, got := 1, len(netStats); want != got { t.Errorf("want count of devices to be %d, got %d", want, got) } - if want, got := "72", netStats["💩0"]["receive_multicast"]; want != got { + if want, got := uint64(72), netStats["💩0"]["multicast"]; want != got { t.Error("want fixture interface 💩0 to exist, but it does not") } } + +func TestNetDevLegacyMetricNames(t *testing.T) { + expected := []string{ + "receive_packets", + "transmit_packets", + "receive_bytes", + "transmit_bytes", + "receive_errs", + "transmit_errs", + "receive_drop", + "transmit_drop", + "receive_multicast", + "transmit_colls", + "receive_frame", + "receive_fifo", + "transmit_carrier", + "transmit_fifo", + "receive_compressed", + "transmit_compressed", + } + + filter := newDeviceFilter("", "") + netStats := parseNetlinkStats(links, &filter, slog.New(slog.NewTextHandler(io.Discard, nil))) + + for dev, devStats := range netStats { + legacy(devStats) + for _, name := range expected { + if _, ok := devStats[name]; !ok { + t.Errorf("metric %s should be defined on interface %s", name, dev) + } + } + } +} + +func TestNetDevLegacyMetricValues(t *testing.T) { + expected := map[string]uint64{ + "receive_packets": 226, + "transmit_packets": 803, + "receive_bytes": 231424, + "transmit_bytes": 822272, + "receive_errs": 14, + "transmit_errs": 2, + "receive_drop": 10 + 21, + "transmit_drop": 17, + "receive_multicast": 285, + "transmit_colls": 30, + "receive_frame": 5 + 3 + 1 + 4, + "receive_fifo": 6, + "transmit_carrier": 22 + 7 + 9 + 19, + "transmit_fifo": 24, + "receive_compressed": 23, + "transmit_compressed": 20, + } + + filter := newDeviceFilter("", "^enp0s0f0$") + netStats := parseNetlinkStats(links, &filter, slog.New(slog.NewTextHandler(io.Discard, nil))) + metrics, ok := netStats["enp0s0f0"] + if !ok { + t.Error("expected stats for interface enp0s0f0") + } + + legacy(metrics) + + for name, want := range expected { + got, ok := metrics[name] + if !ok { + t.Errorf("metric %s should be defined on interface enp0s0f0", name) + continue + } + if want != got { + t.Errorf("want %s %d, got %d", name, want, got) + } + } +} + +func TestNetDevMetricValues(t *testing.T) { + filter := newDeviceFilter("", "") + netStats := parseNetlinkStats(links, &filter, slog.New(slog.NewTextHandler(io.Discard, nil))) + + for _, msg := range links { + device := msg.Attributes.Name + stats := msg.Attributes.Stats64 + + expected := map[string]uint64{ + "receive_packets": stats.RXPackets, + "transmit_packets": stats.TXPackets, + "receive_bytes": stats.RXBytes, + "transmit_bytes": stats.TXBytes, + "receive_errors": stats.RXErrors, + "transmit_errors": stats.TXErrors, + "receive_dropped": stats.RXDropped, + "transmit_dropped": stats.TXDropped, + "multicast": stats.Multicast, + "collisions": stats.Collisions, + + // detailed rx_errors + "receive_length_errors": stats.RXLengthErrors, + "receive_over_errors": stats.RXOverErrors, + "receive_crc_errors": stats.RXCRCErrors, + "receive_frame_errors": stats.RXFrameErrors, + "receive_fifo_errors": stats.RXFIFOErrors, + "receive_missed_errors": stats.RXMissedErrors, + + // detailed tx_errors + "transmit_aborted_errors": stats.TXAbortedErrors, + "transmit_carrier_errors": stats.TXCarrierErrors, + "transmit_fifo_errors": stats.TXFIFOErrors, + "transmit_heartbeat_errors": stats.TXHeartbeatErrors, + "transmit_window_errors": stats.TXWindowErrors, + + // for cslip etc + "receive_compressed": stats.RXCompressed, + "transmit_compressed": stats.TXCompressed, + "receive_nohandler": stats.RXNoHandler, + } + + for name, want := range expected { + devStats, ok := netStats[device] + if !ok { + t.Errorf("expected stats for interface %s", device) + } + got, ok := devStats[name] + if !ok { + t.Errorf("metric %s should be defined on interface %s", name, device) + } + if want != got { + t.Errorf("want %s %d, got %d", name, want, got) + } + } + } +} diff --git a/collector/netdev_openbsd.go b/collector/netdev_openbsd.go index f7c90309d9..85df680182 100644 --- a/collector/netdev_openbsd.go +++ b/collector/netdev_openbsd.go @@ -11,17 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonetdev +//go:build !nonetdev && !amd64 package collector import ( "errors" - "regexp" - "strconv" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "log/slog" ) /* @@ -32,8 +28,8 @@ import ( */ import "C" -func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) { - netDev := map[string]map[string]string{} +func getNetDevStats(filter *deviceFilter, logger *slog.Logger) (netDevStats, error) { + netDev := netDevStats{} var ifap, ifa *C.struct_ifaddrs if C.getifaddrs(&ifap) == -1 { @@ -42,32 +38,39 @@ func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Log defer C.freeifaddrs(ifap) for ifa = ifap; ifa != nil; ifa = ifa.ifa_next { - if ifa.ifa_addr.sa_family == C.AF_LINK { - dev := C.GoString(ifa.ifa_name) - if ignore != nil && ignore.MatchString(dev) { - level.Debug(logger).Log("msg", "Ignoring device", "device", dev) - continue - } - if accept != nil && !accept.MatchString(dev) { - level.Debug(logger).Log("msg", "Ignoring device", "device", dev) - continue - } + if ifa.ifa_addr.sa_family != C.AF_LINK { + continue + } + + dev := C.GoString(ifa.ifa_name) + if filter.ignored(dev) { + logger.Debug("Ignoring device", "device", dev) + continue + } - devStats := map[string]string{} - data := (*C.struct_if_data)(ifa.ifa_data) + data := (*C.struct_if_data)(ifa.ifa_data) - devStats["receive_packets"] = strconv.Itoa(int(data.ifi_ipackets)) - devStats["transmit_packets"] = strconv.Itoa(int(data.ifi_opackets)) - devStats["receive_errs"] = strconv.Itoa(int(data.ifi_ierrors)) - devStats["transmit_errs"] = strconv.Itoa(int(data.ifi_oerrors)) - devStats["receive_bytes"] = strconv.Itoa(int(data.ifi_ibytes)) - devStats["transmit_bytes"] = strconv.Itoa(int(data.ifi_obytes)) - devStats["receive_multicast"] = strconv.Itoa(int(data.ifi_imcasts)) - devStats["transmit_multicast"] = strconv.Itoa(int(data.ifi_omcasts)) - devStats["receive_drop"] = strconv.Itoa(int(data.ifi_iqdrops)) - netDev[dev] = devStats + // https://github.com/openbsd/src/blob/master/sys/net/if.h#L101-L126 + netDev[dev] = map[string]uint64{ + "receive_packets": uint64(data.ifi_ipackets), + "transmit_packets": uint64(data.ifi_opackets), + "receive_bytes": uint64(data.ifi_ibytes), + "transmit_bytes": uint64(data.ifi_obytes), + "receive_errors": uint64(data.ifi_ierrors), + "transmit_errors": uint64(data.ifi_oerrors), + "receive_dropped": uint64(data.ifi_iqdrops), + "transmit_dropped": uint64(data.ifi_oqdrops), + "receive_multicast": uint64(data.ifi_imcasts), + "transmit_multicast": uint64(data.ifi_omcasts), + "collisions": uint64(data.ifi_collisions), + "noproto": uint64(data.ifi_noproto), } } return netDev, nil } + +func getNetDevLabels() (map[string]map[string]string, error) { + // to be implemented if needed + return nil, nil +} diff --git a/collector/netdev_openbsd_amd64.go b/collector/netdev_openbsd_amd64.go new file mode 100644 index 0000000000..62b706d047 --- /dev/null +++ b/collector/netdev_openbsd_amd64.go @@ -0,0 +1,82 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonetdev + +package collector + +import ( + "log/slog" + + "unsafe" + + "golang.org/x/sys/unix" +) + +func getNetDevStats(filter *deviceFilter, logger *slog.Logger) (netDevStats, error) { + netDev := netDevStats{} + + mib := [6]_C_int{unix.CTL_NET, unix.AF_ROUTE, 0, 0, unix.NET_RT_IFLIST, 0} + buf, err := sysctl(mib[:]) + if err != nil { + return nil, err + } + n := uintptr(len(buf)) + index := uintptr(unsafe.Pointer(&buf[0])) + next := uintptr(0) + + var rtm *unix.RtMsghdr + + for next = index; next < (index + n); next += uintptr(rtm.Msglen) { + rtm = (*unix.RtMsghdr)(unsafe.Pointer(next)) + if rtm.Version != unix.RTM_VERSION || rtm.Type != unix.RTM_IFINFO { + continue + } + ifm := (*unix.IfMsghdr)(unsafe.Pointer(next)) + if ifm.Addrs&unix.RTA_IFP == 0 { + continue + } + dl := (*unix.RawSockaddrDatalink)(unsafe.Pointer(next + uintptr(rtm.Hdrlen))) + if dl.Family != unix.AF_LINK { + continue + } + data := ifm.Data + dev := int8ToString(dl.Data[:dl.Nlen]) + if filter.ignored(dev) { + logger.Debug("Ignoring device", "device", dev) + continue + } + + // https://cs.opensource.google/go/x/sys/+/master:unix/ztypes_openbsd_amd64.go;l=292-316 + netDev[dev] = map[string]uint64{ + "receive_packets": data.Ipackets, + "transmit_packets": data.Opackets, + "receive_bytes": data.Ibytes, + "transmit_bytes": data.Obytes, + "receive_errors": data.Ierrors, + "transmit_errors": data.Oerrors, + "receive_dropped": data.Iqdrops, + "transmit_dropped": data.Oqdrops, + "receive_multicast": data.Imcasts, + "transmit_multicast": data.Omcasts, + "collisions": data.Collisions, + "noproto": data.Noproto, + } + } + return netDev, nil +} + +func getNetDevLabels() (map[string]map[string]string, error) { + // to be implemented if needed + return nil, nil +} diff --git a/collector/netinterface_aix.go b/collector/netinterface_aix.go new file mode 100644 index 0000000000..913963e74f --- /dev/null +++ b/collector/netinterface_aix.go @@ -0,0 +1,85 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonetinterface + +package collector + +import ( + "log/slog" + + "github.com/power-devops/perfstat" + "github.com/prometheus/client_golang/prometheus" +) + +type netinterfaceCollector struct { + logger *slog.Logger + collisions *prometheus.Desc + ibytes *prometheus.Desc + ipackets *prometheus.Desc + obytes *prometheus.Desc + opackets *prometheus.Desc +} + +const ( + netinterfaceSubsystem = "netinterface" +) + +func init() { + registerCollector("netinterface", defaultEnabled, NewNetinterfaceCollector) +} + +func NewNetinterfaceCollector(logger *slog.Logger) (Collector, error) { + labels := []string{"interface"} + return &netinterfaceCollector{ + logger: logger, + collisions: prometheus.NewDesc( + prometheus.BuildFQName(namespace, netinterfaceSubsystem, "collisions_total"), + "Total number of CSMA collisions on the interface.", labels, nil, + ), + ibytes: prometheus.NewDesc( + prometheus.BuildFQName(namespace, netinterfaceSubsystem, "receive_bytes_total"), + "Total number of bytes received on the interface.", labels, nil, + ), + ipackets: prometheus.NewDesc( + prometheus.BuildFQName(namespace, netinterfaceSubsystem, "receive_packets_total"), + "Total number of packets received on the interface.", labels, nil, + ), + obytes: prometheus.NewDesc( + prometheus.BuildFQName(namespace, netinterfaceSubsystem, "transmit_bytes_total"), + "Total number of bytes transmitted on the interface.", labels, nil, + ), + opackets: prometheus.NewDesc( + prometheus.BuildFQName(namespace, netinterfaceSubsystem, "transmit_packets_total"), + "Total number of packets transmitted on the interface.", labels, nil, + ), + }, nil +} + +func (c *netinterfaceCollector) Update(ch chan<- prometheus.Metric) error { + stats, err := perfstat.NetIfaceStat() + if err != nil { + return err + } + + for _, stat := range stats { + iface := stat.Name + + ch <- prometheus.MustNewConstMetric(c.collisions, prometheus.CounterValue, float64(stat.Collisions), iface) + ch <- prometheus.MustNewConstMetric(c.ibytes, prometheus.CounterValue, float64(stat.IBytes), iface) + ch <- prometheus.MustNewConstMetric(c.ipackets, prometheus.CounterValue, float64(stat.IPackets), iface) + ch <- prometheus.MustNewConstMetric(c.obytes, prometheus.CounterValue, float64(stat.OBytes), iface) + ch <- prometheus.MustNewConstMetric(c.opackets, prometheus.CounterValue, float64(stat.OPackets), iface) + } + return nil +} diff --git a/collector/netisr_freebsd.go b/collector/netisr_freebsd.go new file mode 100644 index 0000000000..608adbbf9e --- /dev/null +++ b/collector/netisr_freebsd.go @@ -0,0 +1,104 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonetisr + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" +) + +type netisrCollector struct { + sysctls []bsdSysctl + logger *slog.Logger +} + +const ( + netisrCollectorSubsystem = "netisr" +) + +func init() { + registerCollector("netisr", defaultEnabled, NewNetisrCollector) +} + +func NewNetisrCollector(logger *slog.Logger) (Collector, error) { + return &netisrCollector{ + sysctls: []bsdSysctl{ + { + name: "numthreads", + description: "netisr current thread count", + mib: "net.isr.numthreads", + dataType: bsdSysctlTypeUint32, + valueType: prometheus.GaugeValue, + }, + { + name: "maxprot", + description: "netisr maximum protocols", + mib: "net.isr.maxprot", + dataType: bsdSysctlTypeUint32, + valueType: prometheus.GaugeValue, + }, + { + name: "defaultqlimit", + description: "netisr default queue limit", + mib: "net.isr.defaultqlimit", + dataType: bsdSysctlTypeUint32, + valueType: prometheus.GaugeValue, + }, + { + name: "maxqlimit", + description: "netisr maximum queue limit", + mib: "net.isr.maxqlimit", + dataType: bsdSysctlTypeUint32, + valueType: prometheus.GaugeValue, + }, + { + name: "bindthreads", + description: "netisr threads bound to CPUs", + mib: "net.isr.bindthreads", + dataType: bsdSysctlTypeUint32, + valueType: prometheus.GaugeValue, + }, + { + name: "maxthreads", + description: "netisr maximum thread count", + mib: "net.isr.maxthreads", + dataType: bsdSysctlTypeUint32, + valueType: prometheus.GaugeValue, + }, + }, + logger: logger, + }, nil +} + +func (c *netisrCollector) Update(ch chan<- prometheus.Metric) error { + for _, m := range c.sysctls { + v, err := m.Value() + if err != nil { + return fmt.Errorf("couldn't get sysctl: %w", err) + } + + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, netisrCollectorSubsystem, m.name), + m.description, + nil, nil, + ), m.valueType, v) + } + + return nil +} diff --git a/collector/netstat_freebsd.go b/collector/netstat_freebsd.go new file mode 100644 index 0000000000..3688abca86 --- /dev/null +++ b/collector/netstat_freebsd.go @@ -0,0 +1,107 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build freebsd + +package collector + +import ( + "errors" + "fmt" + "log/slog" + "unsafe" + + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +var ( + bsdNetstatTcpSendPacketsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "netstat", "tcp_transmit_packets_total"), + "TCP packets sent", + nil, nil, + ) + + bsdNetstatTcpRecvPacketsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "netstat", "tcp_receive_packets_total"), + "TCP packets received", + nil, nil, + ) +) + +type netStatCollector struct { + netStatMetric *prometheus.Desc +} + +func init() { + registerCollector("netstat", defaultEnabled, NewNetStatCollector) +} + +func NewNetStatCollector(logger *slog.Logger) (Collector, error) { + return &netStatCollector{}, nil +} + +func (c *netStatCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.netStatMetric +} + +func (c *netStatCollector) Collect(ch chan<- prometheus.Metric) { + _ = c.Update(ch) +} + +func getData(queryString string) ([]byte, error) { + data, err := unix.SysctlRaw(queryString) + if err != nil { + fmt.Println("Error:", err) + return nil, err + } + + if len(data) < int(unsafe.Sizeof(C.struct_tcpstat{})) { + return nil, errors.New("Data Size mismatch") + } + return data, nil +} + +func (c *netStatCollector) Update(ch chan<- prometheus.Metric) error { + + tcpData, err := getData("net.inet.tcp.stats") + if err != nil { + return err + } + + tcpStats := *(*C.struct_tcpstat)(unsafe.Pointer(&tcpData[0])) + + ch <- prometheus.MustNewConstMetric( + bsdNetstatTcpSendPacketsTotal, + prometheus.CounterValue, + float64(tcpStats.tcps_sndtotal), + ) + + ch <- prometheus.MustNewConstMetric( + bsdNetstatTcpRecvPacketsTotal, + prometheus.CounterValue, + float64(tcpStats.tcps_rcvtotal), + ) + + return nil +} diff --git a/collector/netstat_freebsd_test.go b/collector/netstat_freebsd_test.go new file mode 100644 index 0000000000..2b3e05759e --- /dev/null +++ b/collector/netstat_freebsd_test.go @@ -0,0 +1,77 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build freebsd + +package collector + +import ( + "testing" + "unsafe" + + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +func TestNetStatCollectorDescribe(t *testing.T) { + ch := make(chan *prometheus.Desc, 1) + collector := &netStatCollector{ + netStatMetric: prometheus.NewDesc("dummy_metric", "dummy", nil, nil), + } + collector.Describe(ch) + desc := <-ch + + if want, got := "dummy_metric", desc.String(); want != got { + t.Errorf("want %s, got %s", want, got) + } +} + +func TestGetData(t *testing.T) { + data, err := getData("net.inet.tcp.stats") + if err != nil { + t.Fatal("unexpected error:", err) + } + + if got, want := len(data), int(unsafe.Sizeof(unix.TCPStats{})); got < want { + t.Errorf("data length too small: want >= %d, got %d", want, got) + } +} + +func TestNetStatCollectorUpdate(t *testing.T) { + ch := make(chan prometheus.Metric, len(metrics)) + collector := &netStatCollector{ + netStatMetric: prometheus.NewDesc("netstat_metric", "NetStat Metric", nil, nil), + } + err := collector.Update(ch) + if err != nil { + t.Fatal("unexpected error:", err) + } + + if got, want := len(ch), len(metrics); got != want { + t.Errorf("metric count mismatch: want %d, got %d", want, got) + } + + for range metrics { + <-ch + } +} + +func TestNewNetStatCollector(t *testing.T) { + collector, err := NewNetStatCollector(nil) + if err != nil { + t.Fatal("unexpected error:", err) + } + if collector == nil { + t.Fatal("collector is nil, want non-nil") + } +} diff --git a/collector/netstat_linux.go b/collector/netstat_linux.go index 98447f86cb..5065a19af3 100644 --- a/collector/netstat_linux.go +++ b/collector/netstat_linux.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonetstat +//go:build !nonetstat package collector @@ -20,14 +20,15 @@ import ( "errors" "fmt" "io" + "log/slog" + "maps" "os" "regexp" "strconv" "strings" - "github.com/go-kit/kit/log" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" ) const ( @@ -35,12 +36,12 @@ const ( ) var ( - netStatFields = kingpin.Flag("collector.netstat.fields", "Regexp of fields to return for netstat collector.").Default("^(.*_(InErrors|InErrs)|Ip_Forwarding|Ip(6|Ext)_(InOctets|OutOctets)|Icmp6?_(InMsgs|OutMsgs)|TcpExt_(Listen.*|Syncookies.*|TCPSynRetrans)|Tcp_(ActiveOpens|InSegs|OutSegs|OutRsts|PassiveOpens|RetransSegs|CurrEstab)|Udp6?_(InDatagrams|OutDatagrams|NoPorts|RcvbufErrors|SndbufErrors))$").String() + netStatFields = kingpin.Flag("collector.netstat.fields", "Regexp of fields to return for netstat collector.").Default("^(.*_(InErrors|InErrs)|Ip_Forwarding|Ip(6|Ext)_(InOctets|OutOctets)|Icmp6?_(InMsgs|OutMsgs)|TcpExt_(Listen.*|Syncookies.*|TCPSynRetrans|TCPTimeouts|TCPOFOQueue|TCPRcvQDrop)|Tcp_(ActiveOpens|InSegs|OutSegs|OutRsts|PassiveOpens|RetransSegs|CurrEstab)|Udp6?_(InDatagrams|OutDatagrams|NoPorts|RcvbufErrors|SndbufErrors))$").String() ) type netStatCollector struct { fieldPattern *regexp.Regexp - logger log.Logger + logger *slog.Logger } func init() { @@ -49,7 +50,7 @@ func init() { // NewNetStatCollector takes and returns // a new Collector exposing network stats. -func NewNetStatCollector(logger log.Logger) (Collector, error) { +func NewNetStatCollector(logger *slog.Logger) (Collector, error) { pattern := regexp.MustCompile(*netStatFields) return &netStatCollector{ fieldPattern: pattern, @@ -72,12 +73,8 @@ func (c *netStatCollector) Update(ch chan<- prometheus.Metric) error { } // Merge the results of snmpStats into netStats (collisions are possible, but // we know that the keys are always unique for the given use case). - for k, v := range snmpStats { - netStats[k] = v - } - for k, v := range snmp6Stats { - netStats[k] = v - } + maps.Copy(netStats, snmpStats) + maps.Copy(netStats, snmp6Stats) for protocol, protocolStats := range netStats { for name, value := range protocolStats { key := protocol + "_" + name diff --git a/collector/netstat_linux_test.go b/collector/netstat_linux_test.go index a27382b416..a30f44a2c9 100644 --- a/collector/netstat_linux_test.go +++ b/collector/netstat_linux_test.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nonetstat + package collector import ( diff --git a/collector/network_route_linux.go b/collector/network_route_linux.go new file mode 100644 index 0000000000..d290b202d3 --- /dev/null +++ b/collector/network_route_linux.go @@ -0,0 +1,203 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonetworkroute + +package collector + +import ( + "fmt" + "log/slog" + "net" + "strconv" + + "github.com/jsimonetti/rtnetlink/v2" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +type networkRouteCollector struct { + routeInfoDesc *prometheus.Desc + routesDesc *prometheus.Desc + logger *slog.Logger +} + +func init() { + registerCollector("network_route", defaultDisabled, NewNetworkRouteCollector) +} + +// NewNetworkRouteCollector returns a new Collector exposing systemd statistics. +func NewNetworkRouteCollector(logger *slog.Logger) (Collector, error) { + const subsystem = "network" + + routeInfoDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "route_info"), + "network routing table information", []string{"device", "src", "dest", "gw", "priority", "proto", "weight"}, nil, + ) + routesDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "routes"), + "network routes by interface", []string{"device"}, nil, + ) + + return &networkRouteCollector{ + routeInfoDesc: routeInfoDesc, + routesDesc: routesDesc, + logger: logger, + }, nil +} + +func (n networkRouteCollector) Update(ch chan<- prometheus.Metric) error { + deviceRoutes := make(map[string]int) + + conn, err := rtnetlink.Dial(nil) + if err != nil { + return fmt.Errorf("couldn't connect rtnetlink: %w", err) + } + defer conn.Close() + + links, err := conn.Link.List() + if err != nil { + return fmt.Errorf("couldn't get links: %w", err) + } + + routes, err := conn.Route.List() + if err != nil { + return fmt.Errorf("couldn't get routes: %w", err) + } + + for _, route := range routes { + if route.Type != unix.RTA_DST { + continue + } + if len(route.Attributes.Multipath) != 0 { + for _, nextHop := range route.Attributes.Multipath { + ifName := "" + for _, link := range links { + if link.Index == nextHop.Hop.IfIndex { + ifName = link.Attributes.Name + break + } + } + + labels := []string{ + ifName, // if + networkRouteIPToString(route.Attributes.Src), // src + networkRouteIPWithPrefixToString(route.Attributes.Dst, route.DstLength), // dest + networkRouteIPToString(nextHop.Gateway), // gw + strconv.FormatUint(uint64(route.Attributes.Priority), 10), // priority(metrics) + networkRouteProtocolToString(route.Protocol), // proto + strconv.Itoa(int(nextHop.Hop.Hops) + 1), // weight + } + ch <- prometheus.MustNewConstMetric(n.routeInfoDesc, prometheus.GaugeValue, 1, labels...) + deviceRoutes[ifName]++ + } + } else { + ifName := "" + for _, link := range links { + if link.Index == route.Attributes.OutIface { + ifName = link.Attributes.Name + break + } + } + + labels := []string{ + ifName, // if + networkRouteIPToString(route.Attributes.Src), // src + networkRouteIPWithPrefixToString(route.Attributes.Dst, route.DstLength), // dest + networkRouteIPToString(route.Attributes.Gateway), // gw + strconv.FormatUint(uint64(route.Attributes.Priority), 10), // priority(metrics) + networkRouteProtocolToString(route.Protocol), // proto + "", // weight + } + ch <- prometheus.MustNewConstMetric(n.routeInfoDesc, prometheus.GaugeValue, 1, labels...) + deviceRoutes[ifName]++ + } + } + + for dev, total := range deviceRoutes { + ch <- prometheus.MustNewConstMetric(n.routesDesc, prometheus.GaugeValue, float64(total), dev) + } + + return nil +} + +func networkRouteIPWithPrefixToString(ip net.IP, len uint8) string { + if len == 0 { + return "default" + } + iplen := net.IPv4len + if ip.To4() == nil { + iplen = net.IPv6len + } + network := &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(len), iplen*8), + } + return network.String() +} + +func networkRouteIPToString(ip net.IP) string { + if len(ip) == 0 { + return "" + } + return ip.String() +} + +func networkRouteProtocolToString(protocol uint8) string { + // from linux kernel 'include/uapi/linux/rtnetlink.h' + switch protocol { + case 0: + return "unspec" + case 1: + return "redirect" + case 2: + return "kernel" + case 3: + return "boot" + case 4: + return "static" + case 8: + return "gated" + case 9: + return "ra" + case 10: + return "mrt" + case 11: + return "zebra" + case 12: + return "bird" + case 13: + return "dnrouted" + case 14: + return "xorp" + case 15: + return "ntk" + case 16: + return "dhcp" + case 17: + return "mrouted" + case 42: + return "babel" + case 186: + return "bgp" + case 187: + return "isis" + case 188: + return "ospf" + case 189: + return "rip" + case 192: + return "eigrp" + } + return "unknown" +} diff --git a/collector/nfs_linux.go b/collector/nfs_linux.go index 60803eb00b..7815dd0687 100644 --- a/collector/nfs_linux.go +++ b/collector/nfs_linux.go @@ -11,18 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonfs +//go:build !nonfs package collector import ( "errors" "fmt" + "log/slog" "os" "reflect" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/nfs" ) @@ -39,7 +38,7 @@ type nfsCollector struct { nfsRPCRetransmissionsDesc *prometheus.Desc nfsRPCAuthenticationRefreshesDesc *prometheus.Desc nfsProceduresDesc *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -47,7 +46,7 @@ func init() { } // NewNfsCollector returns a new Collector exposing NFS statistics. -func NewNfsCollector(logger log.Logger) (Collector, error) { +func NewNfsCollector(logger *slog.Logger) (Collector, error) { fs, err := nfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) @@ -99,7 +98,7 @@ func (c *nfsCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.ClientRPCStats() if err != nil { if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "Not collecting NFS metrics", "err", err) + c.logger.Debug("Not collecting NFS metrics", "err", err) return ErrNoData } return fmt.Errorf("failed to retrieve nfs stats: %w", err) diff --git a/collector/nfsd_linux.go b/collector/nfsd_linux.go index 3dba89954e..866be828f7 100644 --- a/collector/nfsd_linux.go +++ b/collector/nfsd_linux.go @@ -11,17 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonfsd +//go:build !nonfsd package collector import ( "errors" "fmt" + "log/slog" "os" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/nfs" ) @@ -31,7 +30,7 @@ import ( type nfsdCollector struct { fs nfs.FS requestsDesc *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -43,7 +42,7 @@ const ( ) // NewNFSdCollector returns a new Collector exposing /proc/net/rpc/nfsd statistics. -func NewNFSdCollector(logger log.Logger) (Collector, error) { +func NewNFSdCollector(logger *slog.Logger) (Collector, error) { fs, err := nfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) @@ -65,7 +64,7 @@ func (c *nfsdCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.ServerRPCStats() if err != nil { if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "Not collecting NFSd metrics", "err", err) + c.logger.Debug("Not collecting NFSd metrics", "err", err) return ErrNoData } return fmt.Errorf("failed to retrieve nfsd stats: %w", err) @@ -81,6 +80,8 @@ func (c *nfsdCollector) Update(ch chan<- prometheus.Metric) error { c.updateNFSdRequestsv2Stats(ch, &stats.V2Stats) c.updateNFSdRequestsv3Stats(ch, &stats.V3Stats) c.updateNFSdRequestsv4Stats(ch, &stats.V4Ops) + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(stats.WdelegGetattr), "4", "WdelegGetattr") return nil } @@ -394,6 +395,10 @@ func (c *nfsdCollector) updateNFSdRequestsv4Stats(ch chan<- prometheus.Metric, s float64(s.SecInfo), proto, "SecInfo") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SetAttr), proto, "SetAttr") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.SetClientID), proto, "SetClientID") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.SetClientIDConfirm), proto, "SetClientIDConfirm") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Verify), proto, "Verify") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, diff --git a/collector/ntp.go b/collector/ntp.go index c7d55c9907..e1465f5421 100644 --- a/collector/ntp.go +++ b/collector/ntp.go @@ -11,20 +11,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nontp +//go:build !nontp package collector import ( "fmt" + "log/slog" "net" "sync" "time" + "github.com/alecthomas/kingpin/v2" "github.com/beevik/ntp" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" ) const ( @@ -34,6 +34,7 @@ const ( var ( ntpServer = kingpin.Flag("collector.ntp.server", "NTP server to use for ntp collector").Default("127.0.0.1").String() + ntpServerPort = kingpin.Flag("collector.ntp.server-port", "UDP port number to connect to on NTP server").Default("123").Int() ntpProtocolVersion = kingpin.Flag("collector.ntp.protocol-version", "NTP protocol version").Default("4").Int() ntpServerIsLocal = kingpin.Flag("collector.ntp.server-is-local", "Certify that collector.ntp.server address is not a public ntp server").Default("false").Bool() ntpIPTTL = kingpin.Flag("collector.ntp.ip-ttl", "IP TTL to use while sending NTP query").Default("1").Int() @@ -48,7 +49,7 @@ var ( type ntpCollector struct { stratum, leap, rtt, offset, reftime, rootDelay, rootDispersion, sanity typedDesc - logger log.Logger + logger *slog.Logger } func init() { @@ -58,8 +59,8 @@ func init() { // NewNtpCollector returns a new Collector exposing sanity of local NTP server. // Default definition of "local" is: // - collector.ntp.server address is a loopback address (or collector.ntp.server-is-mine flag is turned on) -// - the server is reachable with outgoin IP_TTL = 1 -func NewNtpCollector(logger log.Logger) (Collector, error) { +// - the server is reachable with outgoing IP_TTL = 1 +func NewNtpCollector(logger *slog.Logger) (Collector, error) { ipaddr := net.ParseIP(*ntpServer) if !*ntpServerIsLocal && (ipaddr == nil || !ipaddr.IsLoopback()) { return nil, fmt.Errorf("only IP address of local NTP server is valid for --collector.ntp.server") @@ -73,6 +74,11 @@ func NewNtpCollector(logger log.Logger) (Collector, error) { return nil, fmt.Errorf("offset tolerance must be non-negative") } + if *ntpServerPort < 1 || *ntpServerPort > 65535 { + return nil, fmt.Errorf("invalid NTP port number %d; must be between 1 and 65535 inclusive", *ntpServerPort) + } + + logger.Warn("This collector is deprecated and will be removed in the next major version release.") return &ntpCollector{ stratum: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, ntpSubsystem, "stratum"), @@ -123,6 +129,7 @@ func (c *ntpCollector) Update(ch chan<- prometheus.Metric) error { Version: *ntpProtocolVersion, TTL: *ntpIPTTL, Timeout: time.Second, // default `ntpdate` timeout + Port: *ntpServerPort, }) if err != nil { return fmt.Errorf("couldn't get SNTP reply: %w", err) diff --git a/collector/nvme_linux.go b/collector/nvme_linux.go new file mode 100644 index 0000000000..6de1b9368f --- /dev/null +++ b/collector/nvme_linux.go @@ -0,0 +1,72 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonvme + +package collector + +import ( + "errors" + "fmt" + "log/slog" + "os" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +type nvmeCollector struct { + fs sysfs.FS + logger *slog.Logger +} + +func init() { + registerCollector("nvme", defaultEnabled, NewNVMeCollector) +} + +// NewNVMeCollector returns a new Collector exposing NVMe stats. +func NewNVMeCollector(logger *slog.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + return &nvmeCollector{ + fs: fs, + logger: logger, + }, nil +} + +func (c *nvmeCollector) Update(ch chan<- prometheus.Metric) error { + devices, err := c.fs.NVMeClass() + if err != nil { + if errors.Is(err, os.ErrNotExist) { + c.logger.Debug("nvme statistics not found, skipping") + return ErrNoData + } + return fmt.Errorf("error obtaining NVMe class info: %w", err) + } + + for _, device := range devices { + infoDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, "nvme", "info"), + "Non-numeric data from /sys/class/nvme/, value is always 1.", + []string{"device", "firmware_revision", "model", "serial", "state"}, + nil, + ) + infoValue := 1.0 + ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, device.Name, device.FirmwareRevision, device.Model, device.Serial, device.State) + } + + return nil +} diff --git a/collector/os_release.go b/collector/os_release.go new file mode 100644 index 0000000000..6acf9bed02 --- /dev/null +++ b/collector/os_release.go @@ -0,0 +1,235 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noosrelease && !aix + +package collector + +import ( + "encoding/xml" + "errors" + "io" + "log/slog" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + envparse "github.com/hashicorp/go-envparse" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + etcOSRelease = "/etc/os-release" + usrLibOSRelease = "/usr/lib/os-release" + systemVersionPlist = "/System/Library/CoreServices/SystemVersion.plist" +) + +var ( + versionRegex = regexp.MustCompile(`^[0-9]+\.?[0-9]*`) +) + +type osRelease struct { + Name string + ID string + IDLike string + PrettyName string + Variant string + VariantID string + Version string + VersionID string + VersionCodename string + BuildID string + ImageID string + ImageVersion string + SupportEnd string +} + +type osReleaseCollector struct { + infoDesc *prometheus.Desc + logger *slog.Logger + os *osRelease + osMutex sync.RWMutex + osReleaseFilenames []string // all os-release file names to check + version float64 + versionDesc *prometheus.Desc + supportEnd time.Time + supportEndDesc *prometheus.Desc +} + +type Plist struct { + Dict Dict `xml:"dict"` +} + +type Dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +func init() { + registerCollector("os", defaultEnabled, NewOSCollector) +} + +// NewOSCollector returns a new Collector exposing os-release information. +func NewOSCollector(logger *slog.Logger) (Collector, error) { + return &osReleaseCollector{ + logger: logger, + infoDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "os", "info"), + "A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, "+ + "name, pretty_name, variant, variant_id, version, version_codename, version_id.", + []string{"build_id", "id", "id_like", "image_id", "image_version", "name", "pretty_name", + "variant", "variant_id", "version", "version_codename", "version_id"}, nil, + ), + osReleaseFilenames: []string{etcOSRelease, usrLibOSRelease, systemVersionPlist}, + versionDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "os", "version"), + "Metric containing the major.minor part of the OS version.", + []string{"id", "id_like", "name"}, nil, + ), + supportEndDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "os", "support_end_timestamp_seconds"), + "Metric containing the end-of-life date timestamp of the OS.", + nil, nil, + ), + }, nil +} + +func parseOSRelease(r io.Reader) (*osRelease, error) { + env, err := envparse.Parse(r) + return &osRelease{ + Name: env["NAME"], + ID: env["ID"], + IDLike: env["ID_LIKE"], + PrettyName: env["PRETTY_NAME"], + Variant: env["VARIANT"], + VariantID: env["VARIANT_ID"], + Version: env["VERSION"], + VersionID: env["VERSION_ID"], + VersionCodename: env["VERSION_CODENAME"], + BuildID: env["BUILD_ID"], + ImageID: env["IMAGE_ID"], + ImageVersion: env["IMAGE_VERSION"], + SupportEnd: env["SUPPORT_END"], + }, err +} + +func (c *osReleaseCollector) UpdateStruct(path string) error { + releaseFile, err := os.Open(path) + if err != nil { + return err + } + defer releaseFile.Close() + + // Acquire a lock to update the osReleaseCollector struct. + c.osMutex.Lock() + defer c.osMutex.Unlock() + + // SystemVersion.plist is xml file with MacOs version info + if strings.Contains(releaseFile.Name(), "SystemVersion.plist") { + c.os, err = getMacosProductVersion(releaseFile.Name()) + if err != nil { + return err + } + } else { + c.os, err = parseOSRelease(releaseFile) + if err != nil { + return err + } + } + + majorMinor := versionRegex.FindString(c.os.VersionID) + if majorMinor != "" { + c.version, err = strconv.ParseFloat(majorMinor, 64) + if err != nil { + return err + } + } else { + c.version = 0 + } + + if c.os.SupportEnd != "" { + c.supportEnd, err = time.Parse(time.DateOnly, c.os.SupportEnd) + + if err != nil { + return err + } + } + + return nil +} + +func (c *osReleaseCollector) Update(ch chan<- prometheus.Metric) error { + for i, path := range c.osReleaseFilenames { + err := c.UpdateStruct(*rootfsPath + path) + if err == nil { + break + } + if errors.Is(err, os.ErrNotExist) { + if i >= (len(c.osReleaseFilenames) - 1) { + c.logger.Debug("no os-release file found", "files", strings.Join(c.osReleaseFilenames, ",")) + return ErrNoData + } + continue + } + return err + } + + ch <- prometheus.MustNewConstMetric(c.infoDesc, prometheus.GaugeValue, 1.0, + c.os.BuildID, c.os.ID, c.os.IDLike, c.os.ImageID, c.os.ImageVersion, c.os.Name, c.os.PrettyName, + c.os.Variant, c.os.VariantID, c.os.Version, c.os.VersionCodename, c.os.VersionID) + if c.version > 0 { + ch <- prometheus.MustNewConstMetric(c.versionDesc, prometheus.GaugeValue, c.version, + c.os.ID, c.os.IDLike, c.os.Name) + } + + if c.os.SupportEnd != "" { + ch <- prometheus.MustNewConstMetric(c.supportEndDesc, prometheus.GaugeValue, float64(c.supportEnd.Unix())) + } + + return nil +} + +func getMacosProductVersion(filename string) (*osRelease, error) { + f, _ := os.Open(filename) + bytePlist, _ := io.ReadAll(f) + f.Close() + + var plist Plist + err := xml.Unmarshal(bytePlist, &plist) + if err != nil { + return &osRelease{}, err + } + + var osVersionID, osVersionName, osBuildID string + if len(plist.Dict.Key) > 0 { + for index, value := range plist.Dict.Key { + switch value { + case "ProductVersion": + osVersionID = plist.Dict.String[index] + case "ProductName": + osVersionName = plist.Dict.String[index] + case "ProductBuildVersion": + osBuildID = plist.Dict.String[index] + } + } + } + return &osRelease{ + Name: osVersionName, + Version: osVersionID, + VersionID: osVersionID, + BuildID: osBuildID, + }, nil +} diff --git a/collector/os_release_test.go b/collector/os_release_test.go new file mode 100644 index 0000000000..e5d6b501f0 --- /dev/null +++ b/collector/os_release_test.go @@ -0,0 +1,149 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "io" + "log/slog" + "os" + "reflect" + "strings" + "testing" +) + +const debianBullseye string = `PRETTY_NAME="Debian GNU/Linux 11 (bullseye)" +NAME="Debian GNU/Linux" +VERSION_ID="11" +VERSION="11 (bullseye)" +VERSION_CODENAME=bullseye +ID=debian +HOME_URL="https://www.debian.org/" +SUPPORT_URL="https://www.debian.org/support" +BUG_REPORT_URL="https://bugs.debian.org/" +` + +const nixosTapir string = `BUG_REPORT_URL="https://github.com/NixOS/nixpkgs/issues" +BUILD_ID="23.11.20240328.219951b" +DOCUMENTATION_URL="https://nixos.org/learn.html" +HOME_URL="https://nixos.org/" +ID=nixos +LOGO="nix-snowflake" +NAME=NixOS +PRETTY_NAME="NixOS 23.11 (Tapir)" +SUPPORT_END="2024-06-30" +SUPPORT_URL="https://nixos.org/community.html" +VERSION="23.11 (Tapir)" +VERSION_CODENAME=tapir +VERSION_ID="23.11" +` + +func TestParseOSRelease(t *testing.T) { + want := &osRelease{ + Name: "Ubuntu", + ID: "ubuntu", + IDLike: "debian", + PrettyName: "Ubuntu 20.04.2 LTS", + SupportEnd: "", + Version: "20.04.2 LTS (Focal Fossa)", + VersionID: "20.04", + VersionCodename: "focal", + } + + osReleaseFile, err := os.Open("fixtures" + usrLibOSRelease) + if err != nil { + t.Fatal(err) + } + defer osReleaseFile.Close() + + got, err := parseOSRelease(osReleaseFile) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(want, got) { + t.Fatalf("should have %+v osRelease: got %+v", want, got) + } + + want = &osRelease{ + Name: "Debian GNU/Linux", + ID: "debian", + PrettyName: "Debian GNU/Linux 11 (bullseye)", + Version: "11 (bullseye)", + VersionID: "11", + VersionCodename: "bullseye", + } + got, err = parseOSRelease(strings.NewReader(debianBullseye)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(want, got) { + t.Fatalf("should have %+v osRelease: got %+v", want, got) + } +} + +func TestParseOSSupportEnd(t *testing.T) { + want := &osRelease{ + BuildID: "23.11.20240328.219951b", + Name: "NixOS", + ID: "nixos", + IDLike: "", + ImageID: "", + ImageVersion: "", + PrettyName: "NixOS 23.11 (Tapir)", + SupportEnd: "2024-06-30", + Variant: "", + VariantID: "", + Version: "23.11 (Tapir)", + VersionID: "23.11", + VersionCodename: "tapir", + } + + got, err := parseOSRelease(strings.NewReader(nixosTapir)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(want, got) { + t.Fatalf("should have %+v osRelease: got %+v", want, got) + } +} + +func TestUpdateStruct(t *testing.T) { + wantedOS := &osRelease{ + Name: "Ubuntu", + ID: "ubuntu", + IDLike: "debian", + PrettyName: "Ubuntu 20.04.2 LTS", + Version: "20.04.2 LTS (Focal Fossa)", + VersionID: "20.04", + VersionCodename: "focal", + } + wantedVersion := 20.04 + + collector, err := NewOSCollector(slog.New(slog.NewTextHandler(io.Discard, nil))) + if err != nil { + t.Fatal(err) + } + c := collector.(*osReleaseCollector) + + err = c.UpdateStruct("fixtures" + usrLibOSRelease) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(wantedOS, c.os) { + t.Fatalf("should have %+v osRelease: got %+v", wantedOS, c.os) + } + if wantedVersion != c.version { + t.Errorf("Expected '%v' but got '%v'", wantedVersion, c.version) + } +} diff --git a/collector/partition_aix.go b/collector/partition_aix.go new file mode 100644 index 0000000000..a6d9494148 --- /dev/null +++ b/collector/partition_aix.go @@ -0,0 +1,117 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nopartition + +package collector + +import ( + "log/slog" + + "github.com/power-devops/perfstat" + "github.com/prometheus/client_golang/prometheus" +) + +type partitionCollector struct { + logger *slog.Logger + entitledCapacity *prometheus.Desc + memoryMax *prometheus.Desc + memoryOnline *prometheus.Desc + cpuOnline *prometheus.Desc + cpuSys *prometheus.Desc + cpuPool *prometheus.Desc + powerSaveMode *prometheus.Desc + smtThreads *prometheus.Desc +} + +const ( + partitionCollectorSubsystem = "partition" +) + +func init() { + registerCollector("partition", defaultEnabled, NewPartitionCollector) +} + +func NewPartitionCollector(logger *slog.Logger) (Collector, error) { + return &partitionCollector{ + logger: logger, + entitledCapacity: prometheus.NewDesc( + prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "entitled_capacity"), + "Entitled processor capacity of the partition in CPU units (e.g. 1.0 = one core).", + nil, nil, + ), + memoryMax: prometheus.NewDesc( + prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "memory_max"), + "Maximum memory of the partition in bytes.", + nil, nil, + ), + memoryOnline: prometheus.NewDesc( + prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "memory_online"), + "Online memory of the partition in bytes.", + nil, nil, + ), + cpuOnline: prometheus.NewDesc( + prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "cpus_online"), + "Number of online CPUs in the partition.", + nil, nil, + ), + cpuSys: prometheus.NewDesc( + prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "cpus_sys"), + "Number of physical CPUs in the system.", + nil, nil, + ), + cpuPool: prometheus.NewDesc( + prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "cpus_pool"), + "Number of physical CPUs in the pool.", + nil, nil, + ), + powerSaveMode: prometheus.NewDesc( + prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "power_save_mode"), + "Power save mode of the partition (1 for enabled, 0 for disabled).", + nil, nil, + ), + smtThreads: prometheus.NewDesc( + prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "smt_threads"), + "Number of SMT threads per core.", + nil, nil, + ), + }, nil +} + +func (c *partitionCollector) Update(ch chan<- prometheus.Metric) error { + stats, err := perfstat.PartitionStat() + if err != nil { + return err + } + + powerSaveMode := 0.0 + if stats.Conf.PowerSave { + powerSaveMode = 1.0 + } + + ch <- prometheus.MustNewConstMetric(c.entitledCapacity, prometheus.GaugeValue, float64(stats.EntCapacity)/100.0) + + ch <- prometheus.MustNewConstMetric(c.memoryMax, prometheus.GaugeValue, float64(stats.Mem.Max)*1024*1024) + ch <- prometheus.MustNewConstMetric(c.memoryOnline, prometheus.GaugeValue, float64(stats.Mem.Online)*1024*1024) + + ch <- prometheus.MustNewConstMetric(c.cpuOnline, prometheus.GaugeValue, float64(stats.VCpus.Online)) + + ch <- prometheus.MustNewConstMetric(c.cpuSys, prometheus.GaugeValue, float64(stats.NumProcessors.Online)) + + ch <- prometheus.MustNewConstMetric(c.cpuPool, prometheus.GaugeValue, float64(stats.ActiveCpusInPool)) + + ch <- prometheus.MustNewConstMetric(c.powerSaveMode, prometheus.GaugeValue, powerSaveMode) + ch <- prometheus.MustNewConstMetric(c.smtThreads, prometheus.GaugeValue, float64(stats.SmtThreads)) + + return nil +} diff --git a/collector/paths.go b/collector/paths.go index 5f5a7b44b3..82c941876d 100644 --- a/collector/paths.go +++ b/collector/paths.go @@ -17,15 +17,16 @@ import ( "path/filepath" "strings" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/procfs" - kingpin "gopkg.in/alecthomas/kingpin.v2" ) var ( // The path of the proc filesystem. - procPath = kingpin.Flag("path.procfs", "procfs mountpoint.").Default(procfs.DefaultMountPoint).String() - sysPath = kingpin.Flag("path.sysfs", "sysfs mountpoint.").Default("/sys").String() - rootfsPath = kingpin.Flag("path.rootfs", "rootfs mountpoint.").Default("/").String() + procPath = kingpin.Flag("path.procfs", "procfs mountpoint.").Default(procfs.DefaultMountPoint).String() + sysPath = kingpin.Flag("path.sysfs", "sysfs mountpoint.").Default("/sys").String() + rootfsPath = kingpin.Flag("path.rootfs", "rootfs mountpoint.").Default("/").String() + udevDataPath = kingpin.Flag("path.udev.data", "udev data path.").Default("/run/udev/data").String() ) func procFilePath(name string) string { @@ -40,6 +41,10 @@ func rootfsFilePath(name string) string { return filepath.Join(*rootfsPath, name) } +func udevDataFilePath(name string) string { + return filepath.Join(*udevDataPath, name) +} + func rootfsStripPrefix(path string) string { if *rootfsPath == "/" { return path diff --git a/collector/paths_test.go b/collector/paths_test.go index eca7b3a48d..9426fa4b8a 100644 --- a/collector/paths_test.go +++ b/collector/paths_test.go @@ -16,8 +16,8 @@ package collector import ( "testing" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/procfs" - "gopkg.in/alecthomas/kingpin.v2" ) func TestDefaultProcPath(t *testing.T) { diff --git a/collector/pcidevice_linux.go b/collector/pcidevice_linux.go new file mode 100644 index 0000000000..6f33db938f --- /dev/null +++ b/collector/pcidevice_linux.go @@ -0,0 +1,610 @@ +// Copyright 2017-2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nopcidevice + +package collector + +import ( + "bufio" + "errors" + "fmt" + "log/slog" + "os" + "strings" + + "github.com/alecthomas/kingpin/v2" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +const ( + pcideviceSubsystem = "pcidevice" +) + +var ( + pciIdsPaths = []string{ + "/usr/share/misc/pci.ids", + "/usr/share/hwdata/pci.ids", + } + pciIdsFile = kingpin.Flag("collector.pcidevice.idsfile", "Path to pci.ids file to use for PCI device identification.").String() + pciNames = kingpin.Flag("collector.pcidevice.names", "Enable PCI device name resolution (requires pci.ids file).").Default("false").Bool() + + pcideviceLabelNames = []string{"segment", "bus", "device", "function"} + + pcideviceMaxLinkTSDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "max_link_transfers_per_second"), + "Value of maximum link's transfers per second (T/s)", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } + + pcideviceMaxLinkWidthDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "max_link_width"), + "Value of maximum link's width (number of lanes)", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } + + pcideviceCurrentLinkTSDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "current_link_transfers_per_second"), + "Value of current link's transfers per second (T/s)", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } + pcideviceCurrentLinkWidthDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "current_link_width"), + "Value of current link's width (number of lanes)", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } + + pcidevicePowerStateDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "power_state"), + "PCIe device power state, one of: D0, D1, D2, D3hot, D3cold, unknown or error.", + append(pcideviceLabelNames, "state"), nil, + ), + valueType: prometheus.GaugeValue, + } + + pcideviceD3coldAllowedDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "d3cold_allowed"), + "Whether the PCIe device supports D3cold power state (0/1).", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } + + pcideviceSriovDriversAutoprobeDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "sriov_drivers_autoprobe"), + "Whether SR-IOV drivers autoprobe is enabled for the device (0/1).", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } + + pcideviceSriovNumvfsDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "sriov_numvfs"), + "Number of Virtual Functions (VFs) currently enabled for SR-IOV.", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } + + pcideviceSriovTotalvfsDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "sriov_totalvfs"), + "Total number of Virtual Functions (VFs) supported by the device.", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } + + pcideviceSriovVfTotalMsixDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "sriov_vf_total_msix"), + "Total number of MSI-X vectors for Virtual Functions.", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } + + pcideviceNumaNodeDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "numa_node"), + "NUMA node number for the PCI device. -1 indicates unknown or not available.", + pcideviceLabelNames, nil, + ), + valueType: prometheus.GaugeValue, + } +) + +type pcideviceCollector struct { + fs sysfs.FS + infoDesc typedDesc + logger *slog.Logger + pciVendors map[string]string + pciDevices map[string]map[string]string + pciSubsystems map[string]map[string]string + pciClasses map[string]string + pciSubclasses map[string]string + pciProgIfs map[string]string + pciNames bool +} + +func init() { + registerCollector("pcidevice", defaultDisabled, NewPcideviceCollector) +} + +// NewPcideviceCollector returns a new Collector exposing PCI devices stats. +func NewPcideviceCollector(logger *slog.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + // Initialize PCI ID maps + c := &pcideviceCollector{ + fs: fs, + logger: logger, + pciNames: *pciNames, + } + + // Build label names based on whether name resolution is enabled + labelNames := append(pcideviceLabelNames, + []string{"parent_segment", "parent_bus", "parent_device", "parent_function", + "class_id", "vendor_id", "device_id", "subsystem_vendor_id", "subsystem_device_id", "revision"}...) + + if c.pciNames { + c.loadPCIIds() + // Add name labels when name resolution is enabled + labelNames = append(labelNames, "vendor_name", "device_name", "subsystem_vendor_name", "subsystem_device_name", "class_name") + } + + c.infoDesc = typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, pcideviceSubsystem, "info"), + "Non-numeric data from /sys/bus/pci/devices/, value is always 1.", + labelNames, + nil, + ), + valueType: prometheus.GaugeValue, + } + + return c, nil +} + +func (c *pcideviceCollector) Update(ch chan<- prometheus.Metric) error { + devices, err := c.fs.PciDevices() + if err != nil { + if errors.Is(err, os.ErrNotExist) { + c.logger.Debug("PCI device not found, skipping") + return ErrNoData + } + return fmt.Errorf("error obtaining PCI device info: %w", err) + } + + for _, device := range devices { + // The device location is represented in separated format. + values := device.Location.Strings() + if device.ParentLocation != nil { + values = append(values, device.ParentLocation.Strings()...) + } else { + values = append(values, []string{"*", "*", "*", "*"}...) + } + + // Add basic device information + classID := fmt.Sprintf("0x%06x", device.Class) + vendorID := fmt.Sprintf("0x%04x", device.Vendor) + deviceID := fmt.Sprintf("0x%04x", device.Device) + subsysVendorID := fmt.Sprintf("0x%04x", device.SubsystemVendor) + subsysDeviceID := fmt.Sprintf("0x%04x", device.SubsystemDevice) + + values = append(values, classID, vendorID, deviceID, subsysVendorID, subsysDeviceID, fmt.Sprintf("0x%02x", device.Revision)) + + // Add name values if name resolution is enabled + if c.pciNames { + vendorName := c.getPCIVendorName(vendorID) + deviceName := c.getPCIDeviceName(vendorID, deviceID) + subsysVendorName := c.getPCIVendorName(subsysVendorID) + subsysDeviceName := c.getPCISubsystemName(vendorID, deviceID, subsysVendorID, subsysDeviceID) + className := c.getPCIClassName(classID) + + values = append(values, vendorName, deviceName, subsysVendorName, subsysDeviceName, className) + } + + ch <- c.infoDesc.mustNewConstMetric(1.0, values...) + + // MaxLinkSpeed and CurrentLinkSpeed are represented in GT/s + var maxLinkSpeedTS float64 + if device.MaxLinkSpeed != nil { + maxLinkSpeedTS = (*device.MaxLinkSpeed) * 1e9 + } else { + maxLinkSpeedTS = -1 + } + + var currentLinkSpeedTS float64 + if device.CurrentLinkSpeed != nil { + currentLinkSpeedTS = (*device.CurrentLinkSpeed) * 1e9 + } else { + currentLinkSpeedTS = -1 + } + + // Get power state information directly from device object + var currentPowerState string + var hasPowerState bool + if device.PowerState != nil { + currentPowerState = device.PowerState.String() + hasPowerState = true + } + + var d3coldAllowed float64 + if device.D3coldAllowed != nil { + if *device.D3coldAllowed { + d3coldAllowed = 1 + } else { + d3coldAllowed = 0 + } + } + + // Get SR-IOV information directly from device object + var sriovDriversAutoprobe float64 + if device.SriovDriversAutoprobe != nil { + if *device.SriovDriversAutoprobe { + sriovDriversAutoprobe = 1 + } else { + sriovDriversAutoprobe = 0 + } + } + + var sriovNumvfs float64 + if device.SriovNumvfs != nil { + sriovNumvfs = float64(*device.SriovNumvfs) + } + + var sriovTotalvfs float64 + if device.SriovTotalvfs != nil { + sriovTotalvfs = float64(*device.SriovTotalvfs) + } + + var sriovVfTotalMsix float64 + if device.SriovVfTotalMsix != nil { + sriovVfTotalMsix = float64(*device.SriovVfTotalMsix) + } + + // Handle numa_node with nil safety + var numaNode float64 + if device.NumaNode != nil { + numaNode = float64(*device.NumaNode) + } else { + numaNode = -1 + } + + // Handle link width fields with nil safety + var maxLinkWidth float64 + if device.MaxLinkWidth != nil { + maxLinkWidth = float64(*device.MaxLinkWidth) + } else { + maxLinkWidth = -1 + } + + var currentLinkWidth float64 + if device.CurrentLinkWidth != nil { + currentLinkWidth = float64(*device.CurrentLinkWidth) + } else { + currentLinkWidth = -1 + } + + // Emit metrics for all fields except numa_node and power_state + ch <- pcideviceMaxLinkTSDesc.mustNewConstMetric(maxLinkSpeedTS, device.Location.Strings()...) + ch <- pcideviceMaxLinkWidthDesc.mustNewConstMetric(maxLinkWidth, device.Location.Strings()...) + ch <- pcideviceCurrentLinkTSDesc.mustNewConstMetric(currentLinkSpeedTS, device.Location.Strings()...) + ch <- pcideviceCurrentLinkWidthDesc.mustNewConstMetric(currentLinkWidth, device.Location.Strings()...) + ch <- pcideviceD3coldAllowedDesc.mustNewConstMetric(d3coldAllowed, device.Location.Strings()...) + ch <- pcideviceSriovDriversAutoprobeDesc.mustNewConstMetric(sriovDriversAutoprobe, device.Location.Strings()...) + ch <- pcideviceSriovNumvfsDesc.mustNewConstMetric(sriovNumvfs, device.Location.Strings()...) + ch <- pcideviceSriovTotalvfsDesc.mustNewConstMetric(sriovTotalvfs, device.Location.Strings()...) + ch <- pcideviceSriovVfTotalMsixDesc.mustNewConstMetric(sriovVfTotalMsix, device.Location.Strings()...) + + // Emit power state metrics with state labels only if power state is available + if hasPowerState { + powerStates := []string{"D0", "D1", "D2", "D3hot", "D3cold", "unknown", "error"} + deviceLabels := device.Location.Strings() + for _, state := range powerStates { + var value float64 + if state == currentPowerState { + value = 1 + } else { + value = 0 + } + stateLabels := append(deviceLabels, state) + ch <- pcidevicePowerStateDesc.mustNewConstMetric(value, stateLabels...) + } + } + + // Only emit numa_node metric if the value is available (not -1) + if numaNode != -1 { + ch <- pcideviceNumaNodeDesc.mustNewConstMetric(numaNode, device.Location.Strings()...) + } + } + + return nil +} + +// loadPCIIds loads PCI device information from pci.ids file +func (c *pcideviceCollector) loadPCIIds() { + var file *os.File + var err error + + c.pciVendors = make(map[string]string) + c.pciDevices = make(map[string]map[string]string) + c.pciSubsystems = make(map[string]map[string]string) + c.pciClasses = make(map[string]string) + c.pciSubclasses = make(map[string]string) + c.pciProgIfs = make(map[string]string) + + // Use custom pci.ids file if specified + if *pciIdsFile != "" { + file, err = os.Open(*pciIdsFile) + if err != nil { + c.logger.Debug("Failed to open PCI IDs file", "file", *pciIdsFile, "error", err) + return + } + c.logger.Debug("Loading PCI IDs from", "file", *pciIdsFile) + } else { + // Try each possible default path + for _, path := range pciIdsPaths { + file, err = os.Open(path) + if err == nil { + c.logger.Debug("Loading PCI IDs from default path", "path", path) + break + } + } + if err != nil { + c.logger.Debug("Failed to open any default PCI IDs file", "error", err) + return + } + } + defer file.Close() + + scanner := bufio.NewScanner(file) + var currentVendor, currentDevice, currentBaseClass, currentSubclass string + var inClassContext bool + + for scanner.Scan() { + line := scanner.Text() + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + // Handle class lines (starts with 'C') + if strings.HasPrefix(line, "C ") { + parts := strings.SplitN(line, " ", 2) + if len(parts) >= 2 { + classID := strings.TrimSpace(parts[0][1:]) // Remove 'C' prefix + className := strings.TrimSpace(parts[1]) + c.pciClasses[classID] = className + currentBaseClass = classID + inClassContext = true + } + continue + } + + // Handle subclass lines (single tab after class) + if strings.HasPrefix(line, "\t") && !strings.HasPrefix(line, "\t\t") && inClassContext { + line = strings.TrimPrefix(line, "\t") + parts := strings.SplitN(line, " ", 2) + if len(parts) >= 2 && currentBaseClass != "" { + subclassID := strings.TrimSpace(parts[0]) + subclassName := strings.TrimSpace(parts[1]) + // Store as base class + subclass (e.g., "0100" for SCSI storage controller) + fullClassID := currentBaseClass + subclassID + c.pciSubclasses[fullClassID] = subclassName + currentSubclass = fullClassID + } + continue + } + + // Handle programming interface lines (double tab after subclass) + if strings.HasPrefix(line, "\t\t") && !strings.HasPrefix(line, "\t\t\t") && inClassContext { + line = strings.TrimPrefix(line, "\t\t") + parts := strings.SplitN(line, " ", 2) + if len(parts) >= 2 && currentSubclass != "" { + progIfID := strings.TrimSpace(parts[0]) + progIfName := strings.TrimSpace(parts[1]) + // Store as base class + subclass + programming interface (e.g., "010802" for NVM Express) + fullClassID := currentSubclass + progIfID + c.pciProgIfs[fullClassID] = progIfName + } + continue + } + + // Handle vendor lines (no leading whitespace, not starting with 'C') + if !strings.HasPrefix(line, "\t") && !strings.HasPrefix(line, "C ") { + parts := strings.SplitN(line, " ", 2) + if len(parts) >= 2 { + currentVendor = strings.TrimSpace(parts[0]) + c.pciVendors[currentVendor] = strings.TrimSpace(parts[1]) + currentDevice = "" + inClassContext = false + } + continue + } + + // Handle device lines (single tab) + if strings.HasPrefix(line, "\t") && !strings.HasPrefix(line, "\t\t") { + line = strings.TrimPrefix(line, "\t") + parts := strings.SplitN(line, " ", 2) + if len(parts) >= 2 && currentVendor != "" { + currentDevice = strings.TrimSpace(parts[0]) + if c.pciDevices[currentVendor] == nil { + c.pciDevices[currentVendor] = make(map[string]string) + } + c.pciDevices[currentVendor][currentDevice] = strings.TrimSpace(parts[1]) + } + continue + } + + // Handle subsystem lines (double tab) + if strings.HasPrefix(line, "\t\t") { + line = strings.TrimPrefix(line, "\t\t") + parts := strings.SplitN(line, " ", 2) + if len(parts) >= 2 && currentVendor != "" && currentDevice != "" { + subsysID := strings.TrimSpace(parts[0]) + subsysName := strings.TrimSpace(parts[1]) + key := fmt.Sprintf("%s:%s", currentVendor, currentDevice) + if c.pciSubsystems[key] == nil { + c.pciSubsystems[key] = make(map[string]string) + } + // Convert subsystem ID from "vendor device" format to "vendor:device" format + subsysParts := strings.Fields(subsysID) + if len(subsysParts) == 2 { + subsysKey := fmt.Sprintf("%s:%s", subsysParts[0], subsysParts[1]) + c.pciSubsystems[key][subsysKey] = subsysName + } + } + } + } + + // Debug summary + totalDevices := 0 + for _, devices := range c.pciDevices { + totalDevices += len(devices) + } + totalSubsystems := 0 + for _, subsystems := range c.pciSubsystems { + totalSubsystems += len(subsystems) + } + + c.logger.Debug("Loaded PCI device data", + "vendors", len(c.pciVendors), + "devices", totalDevices, + "subsystems", totalSubsystems, + "classes", len(c.pciClasses), + "subclasses", len(c.pciSubclasses), + "progIfs", len(c.pciProgIfs), + ) +} + +// getPCIVendorName converts PCI vendor ID to human-readable string using pci.ids +func (c *pcideviceCollector) getPCIVendorName(vendorID string) string { + // Return original ID if name resolution is disabled + if !c.pciNames { + return vendorID + } + + // Remove "0x" prefix if present + vendorID = strings.TrimPrefix(vendorID, "0x") + vendorID = strings.ToLower(vendorID) + + if name, ok := c.pciVendors[vendorID]; ok { + return name + } + return vendorID // Return ID if name not found +} + +// getPCIDeviceName converts PCI device ID to human-readable string using pci.ids +func (c *pcideviceCollector) getPCIDeviceName(vendorID, deviceID string) string { + // Return original ID if name resolution is disabled + if !c.pciNames { + return deviceID + } + + // Remove "0x" prefix if present + vendorID = strings.TrimPrefix(vendorID, "0x") + deviceID = strings.TrimPrefix(deviceID, "0x") + vendorID = strings.ToLower(vendorID) + deviceID = strings.ToLower(deviceID) + + if devices, ok := c.pciDevices[vendorID]; ok { + if name, ok := devices[deviceID]; ok { + return name + } + } + return deviceID // Return ID if name not found +} + +// getPCISubsystemName converts PCI subsystem ID to human-readable string using pci.ids +func (c *pcideviceCollector) getPCISubsystemName(vendorID, deviceID, subsysVendorID, subsysDeviceID string) string { + // Return original ID if name resolution is disabled + if !c.pciNames { + return subsysDeviceID + } + + // Normalize all IDs + vendorID = strings.TrimPrefix(vendorID, "0x") + deviceID = strings.TrimPrefix(deviceID, "0x") + subsysVendorID = strings.TrimPrefix(subsysVendorID, "0x") + subsysDeviceID = strings.TrimPrefix(subsysDeviceID, "0x") + + key := fmt.Sprintf("%s:%s", vendorID, deviceID) + subsysKey := fmt.Sprintf("%s:%s", subsysVendorID, subsysDeviceID) + + if subsystems, ok := c.pciSubsystems[key]; ok { + if name, ok := subsystems[subsysKey]; ok { + return name + } + } + return subsysDeviceID +} + +// getPCIClassName converts PCI class ID to human-readable string using pci.ids +func (c *pcideviceCollector) getPCIClassName(classID string) string { + // Return original ID if name resolution is disabled + if !c.pciNames { + return classID + } + + // Remove "0x" prefix if present and normalize + classID = strings.TrimPrefix(classID, "0x") + classID = strings.ToLower(classID) + + // Try to find the programming interface first (6 digits: base class + subclass + programming interface) + if len(classID) >= 6 { + progIf := classID[:6] + if className, exists := c.pciProgIfs[progIf]; exists { + return className + } + } + + // Try to find the subclass (4 digits: base class + subclass) + if len(classID) >= 4 { + subclass := classID[:4] + if className, exists := c.pciSubclasses[subclass]; exists { + return className + } + } + + // If not found, try with just the base class (first 2 digits) + if len(classID) >= 2 { + baseClass := classID[:2] + if className, exists := c.pciClasses[baseClass]; exists { + return className + } + } + + // Return the original class ID if not found + return "Unknown class (" + classID + ")" +} diff --git a/collector/pcidevice_linux_test.go b/collector/pcidevice_linux_test.go new file mode 100644 index 0000000000..aedc7c5f01 --- /dev/null +++ b/collector/pcidevice_linux_test.go @@ -0,0 +1,88 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nopcidevice + +package collector + +import ( + "fmt" + "io" + "log/slog" + "os" + "strings" + "testing" + + "github.com/alecthomas/kingpin/v2" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" +) + +func TestPCICollectorWithNameResolution(t *testing.T) { + // Test the PCI collector with name resolution enabled and compare against expected output + if _, err := kingpin.CommandLine.Parse([]string{ + "--path.sysfs", "fixtures/sys", + "--path.procfs", "fixtures/proc", + "--path.rootfs", "fixtures", + "--collector.pcidevice", + "--collector.pcidevice.names", + // "--collector.pcidevice.idsfile", "/usr/share/misc/pci.ids", + "--collector.pcidevice.idsfile", "fixtures/pci.ids", + }); err != nil { + t.Fatal(err) + } + + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + c, err := NewPcideviceCollector(logger) + if err != nil { + t.Fatal(err) + } + + reg := prometheus.NewRegistry() + reg.MustRegister(&testPCICollector{pc: c}) + + // Read expected output from fixture file + expectedOutput, err := os.ReadFile("fixtures/pcidevice-names-output.txt") + if err != nil { + t.Fatal(err) + } + + err = testutil.GatherAndCompare(reg, strings.NewReader(string(expectedOutput))) + if err != nil { + t.Fatal(err) + } +} + +// testPCICollector wraps the PCI collector for testing +type testPCICollector struct { + pc Collector +} + +func (tc *testPCICollector) Collect(ch chan<- prometheus.Metric) { + sink := make(chan prometheus.Metric) + go func() { + err := tc.pc.Update(sink) + if err != nil { + panic(fmt.Errorf("failed to update collector: %s", err)) + } + close(sink) + }() + + for m := range sink { + ch <- m + } +} + +func (tc *testPCICollector) Describe(ch chan<- *prometheus.Desc) { + // No-op for testing +} diff --git a/collector/perf_linux.go b/collector/perf_linux.go index 3a2f7394db..8b68205a6e 100644 --- a/collector/perf_linux.go +++ b/collector/perf_linux.go @@ -11,22 +11,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noperf +//go:build !noperf package collector import ( "fmt" + "log/slog" "runtime" "strconv" "strings" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/hodgesds/perf-utils" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" - kingpin "gopkg.in/alecthomas/kingpin.v2" ) const ( @@ -36,12 +35,67 @@ const ( var ( perfCPUsFlag = kingpin.Flag("collector.perf.cpus", "List of CPUs from which perf metrics should be collected").Default("").String() perfTracepointFlag = kingpin.Flag("collector.perf.tracepoint", "perf tracepoint that should be collected").Strings() + perfNoHwProfiler = kingpin.Flag("collector.perf.disable-hardware-profilers", "disable perf hardware profilers").Default("false").Bool() + perfHwProfilerFlag = kingpin.Flag("collector.perf.hardware-profilers", "perf hardware profilers that should be collected").Strings() + perfNoSwProfiler = kingpin.Flag("collector.perf.disable-software-profilers", "disable perf software profilers").Default("false").Bool() + perfSwProfilerFlag = kingpin.Flag("collector.perf.software-profilers", "perf software profilers that should be collected").Strings() + perfNoCaProfiler = kingpin.Flag("collector.perf.disable-cache-profilers", "disable perf cache profilers").Default("false").Bool() + perfCaProfilerFlag = kingpin.Flag("collector.perf.cache-profilers", "perf cache profilers that should be collected").Strings() ) func init() { registerCollector(perfSubsystem, defaultDisabled, NewPerfCollector) } +var ( + perfHardwareProfilerMap = map[string]perf.HardwareProfilerType{ + "CpuCycles": perf.CpuCyclesProfiler, + "CpuInstr": perf.CpuInstrProfiler, + "CacheRef": perf.CacheRefProfiler, + "CacheMisses": perf.CacheMissesProfiler, + "BranchInstr": perf.BranchInstrProfiler, + "BranchMisses": perf.BranchMissesProfiler, + "StalledCyclesBackend": perf.StalledCyclesBackendProfiler, + "StalledCyclesFrontend": perf.StalledCyclesFrontendProfiler, + "RefCpuCycles": perf.RefCpuCyclesProfiler, + // "BusCycles": perf.BusCyclesProfiler, + } + perfSoftwareProfilerMap = map[string]perf.SoftwareProfilerType{ + "PageFault": perf.PageFaultProfiler, + "ContextSwitch": perf.ContextSwitchProfiler, + "CpuMigration": perf.CpuMigrationProfiler, + "MinorFault": perf.MinorFaultProfiler, + "MajorFault": perf.MajorFaultProfiler, + // "CpuClock": perf.CpuClockProfiler, + // "TaskClock": perf.TaskClockProfiler, + // "AlignFault": perf.AlignFaultProfiler, + // "EmuFault": perf.EmuFaultProfiler, + } + perfCacheProfilerMap = map[string]perf.CacheProfilerType{ + "L1DataReadHit": perf.L1DataReadHitProfiler, + "L1DataReadMiss": perf.L1DataReadMissProfiler, + "L1DataWriteHit": perf.L1DataWriteHitProfiler, + "L1InstrReadMiss": perf.L1InstrReadMissProfiler, + "LLReadHit": perf.LLReadHitProfiler, + "LLReadMiss": perf.LLReadMissProfiler, + "LLWriteHit": perf.LLWriteHitProfiler, + "LLWriteMiss": perf.LLWriteMissProfiler, + "InstrTLBReadHit": perf.InstrTLBReadHitProfiler, + "InstrTLBReadMiss": perf.InstrTLBReadMissProfiler, + "BPUReadHit": perf.BPUReadHitProfiler, + "BPUReadMiss": perf.BPUReadMissProfiler, + // "L1InstrReadHit": perf.L1InstrReadHitProfiler, + "DataTLBReadHit": perf.DataTLBReadHitProfiler, + "DataTLBReadMiss": perf.DataTLBReadMissProfiler, + "DataTLBWriteHit": perf.DataTLBWriteHitProfiler, + "DataTLBWriteMiss": perf.DataTLBWriteMissProfiler, + // "NodeCacheReadHit": perf.NodeCacheReadHitProfiler, + // "NodeCacheReadMiss": perf.NodeCacheReadMissProfiler, + // "NodeCacheWriteHit": perf.NodeCacheWriteHitProfiler, + // "NodeCacheWriteMiss": perf.NodeCacheWriteMissProfiler, + } +) + // perfTracepointFlagToTracepoints returns the set of configured tracepoints. func perfTracepointFlagToTracepoints(tracepointsFlag []string) ([]*perfTracepoint, error) { tracepoints := make([]*perfTracepoint, len(tracepointsFlag)) @@ -49,7 +103,7 @@ func perfTracepointFlagToTracepoints(tracepointsFlag []string) ([]*perfTracepoin for i, tracepoint := range tracepointsFlag { split := strings.Split(tracepoint, ":") if len(split) != 2 { - return nil, fmt.Errorf("Invalid tracepoint config %v", tracepoint) + return nil, fmt.Errorf("invalid tracepoint config %v", tracepoint) } tracepoints[i] = &perfTracepoint{ subsystem: split[0], @@ -63,7 +117,7 @@ func perfTracepointFlagToTracepoints(tracepointsFlag []string) ([]*perfTracepoin func perfCPUFlagToCPUs(cpuFlag string) ([]int, error) { var err error cpus := []int{} - for _, subset := range strings.Split(cpuFlag, ",") { + for subset := range strings.SplitSeq(cpuFlag, ",") { // First parse a single CPU. if !strings.Contains(subset, "-") { cpu, err := strconv.Atoi(subset) @@ -133,7 +187,7 @@ type perfCollector struct { perfSwProfilers map[int]*perf.SoftwareProfiler perfCacheProfilers map[int]*perf.CacheProfiler desc map[string]*prometheus.Desc - logger log.Logger + logger *slog.Logger tracepointCollector *perfTracepointCollector } @@ -143,7 +197,7 @@ type perfTracepointCollector struct { // collection order is the sorted configured collection order of the profiler. collectionOrder []string - logger log.Logger + logger *slog.Logger profilers map[int]perf.GroupProfiler } @@ -159,14 +213,15 @@ func (c *perfTracepointCollector) update(ch chan<- prometheus.Metric) error { // updateCPU is used to update metrics per CPU profiler. func (c *perfTracepointCollector) updateCPU(cpu int, ch chan<- prometheus.Metric) error { - cpuStr := fmt.Sprintf("%d", cpu) profiler := c.profilers[cpu] - p, err := profiler.Profile() - if err != nil { - level.Error(c.logger).Log("msg", "Failed to collect tracepoint profile", "err", err) + p := &perf.GroupProfileValue{} + if err := profiler.Profile(p); err != nil { + c.logger.Error("Failed to collect tracepoint profile", "err", err) return err } + cpuid := strconv.Itoa(cpu) + for i, value := range p.Values { // Get the Desc from the ordered group value. descKey := c.collectionOrder[i] @@ -175,7 +230,7 @@ func (c *perfTracepointCollector) updateCPU(cpu int, ch chan<- prometheus.Metric c.descs[descKeySlice[0]][descKeySlice[1]], prometheus.CounterValue, float64(value), - cpuStr, + cpuid, ) } return nil @@ -183,7 +238,7 @@ func (c *perfTracepointCollector) updateCPU(cpu int, ch chan<- prometheus.Metric // newPerfTracepointCollector returns a configured perfTracepointCollector. func newPerfTracepointCollector( - logger log.Logger, + logger *slog.Logger, tracepointsFlag []string, cpus []int, ) (*perfTracepointCollector, error) { @@ -244,7 +299,7 @@ func newPerfTracepointCollector( // NewPerfCollector returns a new perf based collector, it creates a profiler // per CPU. -func NewPerfCollector(logger log.Logger) (Collector, error) { +func NewPerfCollector(logger *slog.Logger) (Collector, error) { collector := &perfCollector{ perfHwProfilers: map[int]*perf.HardwareProfiler{}, perfSwProfilers: map[int]*perf.SoftwareProfiler{}, @@ -280,30 +335,82 @@ func NewPerfCollector(logger log.Logger) (Collector, error) { collector.tracepointCollector = tracepointCollector } + // Configure perf profilers + hardwareProfilers := perf.AllHardwareProfilers + if *perfHwProfilerFlag != nil && len(*perfHwProfilerFlag) > 0 { + // hardwareProfilers = 0 + for _, hf := range *perfHwProfilerFlag { + if v, ok := perfHardwareProfilerMap[hf]; ok { + hardwareProfilers |= v + } + } + } + softwareProfilers := perf.AllSoftwareProfilers + if *perfSwProfilerFlag != nil && len(*perfSwProfilerFlag) > 0 { + // softwareProfilers = 0 + for _, sf := range *perfSwProfilerFlag { + if v, ok := perfSoftwareProfilerMap[sf]; ok { + softwareProfilers |= v + } + } + } + cacheProfilers := perf.L1DataReadHitProfiler | perf.L1DataReadMissProfiler | perf.L1DataWriteHitProfiler | perf.L1InstrReadMissProfiler | perf.InstrTLBReadHitProfiler | perf.InstrTLBReadMissProfiler | perf.DataTLBReadHitProfiler | perf.DataTLBReadMissProfiler | perf.DataTLBWriteHitProfiler | perf.DataTLBWriteMissProfiler | perf.LLReadHitProfiler | perf.LLReadMissProfiler | perf.LLWriteHitProfiler | perf.LLWriteMissProfiler | perf.BPUReadHitProfiler | perf.BPUReadMissProfiler + if *perfCaProfilerFlag != nil && len(*perfCaProfilerFlag) > 0 { + cacheProfilers = 0 + for _, cf := range *perfCaProfilerFlag { + if v, ok := perfCacheProfilerMap[cf]; ok { + cacheProfilers |= v + } + } + } + // Configure all profilers for the specified CPUs. for _, cpu := range cpus { // Use -1 to profile all processes on the CPU, see: // man perf_event_open - hwProf := perf.NewHardwareProfiler(-1, cpu) - if err := hwProf.Start(); err != nil { - return nil, err + if !*perfNoHwProfiler { + hwProf, err := perf.NewHardwareProfiler( + -1, + cpu, + hardwareProfilers, + ) + if err != nil && !hwProf.HasProfilers() { + return nil, err + } + if err := hwProf.Start(); err != nil { + return nil, err + } + collector.perfHwProfilers[cpu] = &hwProf + collector.hwProfilerCPUMap[&hwProf] = cpu } - collector.perfHwProfilers[cpu] = &hwProf - collector.hwProfilerCPUMap[&hwProf] = cpu - swProf := perf.NewSoftwareProfiler(-1, cpu) - if err := swProf.Start(); err != nil { - return nil, err + if !*perfNoSwProfiler { + swProf, err := perf.NewSoftwareProfiler(-1, cpu, softwareProfilers) + if err != nil && !swProf.HasProfilers() { + return nil, err + } + if err := swProf.Start(); err != nil { + return nil, err + } + collector.perfSwProfilers[cpu] = &swProf + collector.swProfilerCPUMap[&swProf] = cpu } - collector.perfSwProfilers[cpu] = &swProf - collector.swProfilerCPUMap[&swProf] = cpu - cacheProf := perf.NewCacheProfiler(-1, cpu) - if err := cacheProf.Start(); err != nil { - return nil, err + if !*perfNoCaProfiler { + cacheProf, err := perf.NewCacheProfiler( + -1, + cpu, + cacheProfilers, + ) + if err != nil && !cacheProf.HasProfilers() { + return nil, err + } + if err := cacheProf.Start(); err != nil { + return nil, err + } + collector.perfCacheProfilers[cpu] = &cacheProf + collector.cacheProfilerCPUMap[&cacheProf] = cpu } - collector.perfCacheProfilers[cpu] = &cacheProf - collector.cacheProfilerCPUMap[&cacheProf] = cpu } collector.desc = map[string]*prometheus.Desc{ @@ -377,6 +484,26 @@ func NewPerfCollector(logger log.Logger) (Collector, error) { []string{"cpu"}, nil, ), + "stalled_cycles_backend_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "stalled_cycles_backend_total", + ), + "Number of stalled backend CPU cycles", + []string{"cpu"}, + nil, + ), + "stalled_cycles_frontend_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "stalled_cycles_frontend_total", + ), + "Number of stalled frontend CPU cycles", + []string{"cpu"}, + nil, + ), "page_faults_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, @@ -487,6 +614,46 @@ func NewPerfCollector(logger log.Logger) (Collector, error) { []string{"cpu"}, nil, ), + "cache_tlb_data_read_hits_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_tlb_data_read_hits_total", + ), + "Number of data TLB read hits", + []string{"cpu"}, + nil, + ), + "cache_tlb_data_read_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_tlb_data_read_misses_total", + ), + "Number of data TLB read misses", + []string{"cpu"}, + nil, + ), + "cache_tlb_data_write_hits_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_tlb_data_write_hits_total", + ), + "Number of data TLB write hits", + []string{"cpu"}, + nil, + ), + "cache_tlb_data_write_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_tlb_data_write_misses_total", + ), + "Number of data TLB write misses", + []string{"cpu"}, + nil, + ), "cache_ll_read_hits_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, @@ -574,21 +741,18 @@ func (c *perfCollector) Update(ch chan<- prometheus.Metric) error { func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { for _, profiler := range c.perfHwProfilers { - cpuid := c.hwProfilerCPUMap[profiler] - cpuStr := strconv.Itoa(cpuid) - hwProfile, err := (*profiler).Profile() - if err != nil { + hwProfile := &perf.HardwareProfile{} + if err := (*profiler).Profile(hwProfile); err != nil { return err } - if hwProfile == nil { - continue - } + + cpuid := strconv.Itoa(c.hwProfilerCPUMap[profiler]) if hwProfile.CPUCycles != nil { ch <- prometheus.MustNewConstMetric( c.desc["cpucycles_total"], prometheus.CounterValue, float64(*hwProfile.CPUCycles), - cpuStr, + cpuid, ) } @@ -596,7 +760,7 @@ func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["instructions_total"], prometheus.CounterValue, float64(*hwProfile.Instructions), - cpuStr, + cpuid, ) } @@ -604,7 +768,7 @@ func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["branch_instructions_total"], prometheus.CounterValue, float64(*hwProfile.BranchInstr), - cpuStr, + cpuid, ) } @@ -612,7 +776,7 @@ func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["branch_misses_total"], prometheus.CounterValue, float64(*hwProfile.BranchMisses), - cpuStr, + cpuid, ) } @@ -620,7 +784,7 @@ func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_refs_total"], prometheus.CounterValue, float64(*hwProfile.CacheRefs), - cpuStr, + cpuid, ) } @@ -628,7 +792,7 @@ func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_misses_total"], prometheus.CounterValue, float64(*hwProfile.CacheMisses), - cpuStr, + cpuid, ) } @@ -636,7 +800,23 @@ func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["ref_cpucycles_total"], prometheus.CounterValue, float64(*hwProfile.RefCPUCycles), - cpuStr, + cpuid, + ) + } + + if hwProfile.StalledCyclesBackend != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["stalled_cycles_backend_total"], + prometheus.CounterValue, float64(*hwProfile.StalledCyclesBackend), + cpuid, + ) + } + + if hwProfile.StalledCyclesFrontend != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["stalled_cycles_frontend_total"], + prometheus.CounterValue, float64(*hwProfile.StalledCyclesFrontend), + cpuid, ) } } @@ -646,21 +826,18 @@ func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { func (c *perfCollector) updateSoftwareStats(ch chan<- prometheus.Metric) error { for _, profiler := range c.perfSwProfilers { - cpuid := c.swProfilerCPUMap[profiler] - cpuStr := fmt.Sprintf("%d", cpuid) - swProfile, err := (*profiler).Profile() - if err != nil { + swProfile := &perf.SoftwareProfile{} + if err := (*profiler).Profile(swProfile); err != nil { return err } - if swProfile == nil { - continue - } + + cpuid := strconv.Itoa(c.swProfilerCPUMap[profiler]) if swProfile.PageFaults != nil { ch <- prometheus.MustNewConstMetric( c.desc["page_faults_total"], prometheus.CounterValue, float64(*swProfile.PageFaults), - cpuStr, + cpuid, ) } @@ -668,7 +845,7 @@ func (c *perfCollector) updateSoftwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["context_switches_total"], prometheus.CounterValue, float64(*swProfile.ContextSwitches), - cpuStr, + cpuid, ) } @@ -676,7 +853,7 @@ func (c *perfCollector) updateSoftwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cpu_migrations_total"], prometheus.CounterValue, float64(*swProfile.CPUMigrations), - cpuStr, + cpuid, ) } @@ -684,7 +861,7 @@ func (c *perfCollector) updateSoftwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["minor_faults_total"], prometheus.CounterValue, float64(*swProfile.MinorPageFaults), - cpuStr, + cpuid, ) } @@ -692,7 +869,7 @@ func (c *perfCollector) updateSoftwareStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["major_faults_total"], prometheus.CounterValue, float64(*swProfile.MajorPageFaults), - cpuStr, + cpuid, ) } } @@ -702,21 +879,18 @@ func (c *perfCollector) updateSoftwareStats(ch chan<- prometheus.Metric) error { func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { for _, profiler := range c.perfCacheProfilers { - cpuid := c.cacheProfilerCPUMap[profiler] - cpuStr := fmt.Sprintf("%d", cpuid) - cacheProfile, err := (*profiler).Profile() - if err != nil { + cacheProfile := &perf.CacheProfile{} + if err := (*profiler).Profile(cacheProfile); err != nil { return err } - if cacheProfile == nil { - continue - } + + cpuid := strconv.Itoa(c.cacheProfilerCPUMap[profiler]) if cacheProfile.L1DataReadHit != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_l1d_read_hits_total"], prometheus.CounterValue, float64(*cacheProfile.L1DataReadHit), - cpuStr, + cpuid, ) } @@ -724,7 +898,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_l1d_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.L1DataReadMiss), - cpuStr, + cpuid, ) } @@ -732,7 +906,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_l1d_write_hits_total"], prometheus.CounterValue, float64(*cacheProfile.L1DataWriteHit), - cpuStr, + cpuid, ) } @@ -740,7 +914,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_l1_instr_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.L1InstrReadMiss), - cpuStr, + cpuid, ) } @@ -748,7 +922,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_tlb_instr_read_hits_total"], prometheus.CounterValue, float64(*cacheProfile.InstrTLBReadHit), - cpuStr, + cpuid, ) } @@ -756,7 +930,39 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_tlb_instr_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.InstrTLBReadMiss), - cpuStr, + cpuid, + ) + } + + if cacheProfile.DataTLBReadHit != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_tlb_data_read_hits_total"], + prometheus.CounterValue, float64(*cacheProfile.DataTLBReadHit), + cpuid, + ) + } + + if cacheProfile.DataTLBReadMiss != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_tlb_data_read_misses_total"], + prometheus.CounterValue, float64(*cacheProfile.DataTLBReadMiss), + cpuid, + ) + } + + if cacheProfile.DataTLBWriteHit != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_tlb_data_write_hits_total"], + prometheus.CounterValue, float64(*cacheProfile.DataTLBWriteHit), + cpuid, + ) + } + + if cacheProfile.DataTLBWriteMiss != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_tlb_data_write_misses_total"], + prometheus.CounterValue, float64(*cacheProfile.DataTLBWriteMiss), + cpuid, ) } @@ -764,7 +970,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_ll_read_hits_total"], prometheus.CounterValue, float64(*cacheProfile.LastLevelReadHit), - cpuStr, + cpuid, ) } @@ -772,7 +978,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_ll_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.LastLevelReadMiss), - cpuStr, + cpuid, ) } @@ -780,7 +986,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_ll_write_hits_total"], prometheus.CounterValue, float64(*cacheProfile.LastLevelWriteHit), - cpuStr, + cpuid, ) } @@ -788,7 +994,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_ll_write_misses_total"], prometheus.CounterValue, float64(*cacheProfile.LastLevelWriteMiss), - cpuStr, + cpuid, ) } @@ -796,7 +1002,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_bpu_read_hits_total"], prometheus.CounterValue, float64(*cacheProfile.BPUReadHit), - cpuStr, + cpuid, ) } @@ -804,7 +1010,7 @@ func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric( c.desc["cache_bpu_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.BPUReadMiss), - cpuStr, + cpuid, ) } } diff --git a/collector/perf_linux_test.go b/collector/perf_linux_test.go index b384a656b6..3176e7d2fb 100644 --- a/collector/perf_linux_test.go +++ b/collector/perf_linux_test.go @@ -11,31 +11,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noprocesses +//go:build !noperf package collector import ( - "io/ioutil" + "io" + "log/slog" + "os" "runtime" "strconv" "strings" "testing" - "github.com/go-kit/kit/log" - "github.com/prometheus/client_golang/prometheus" ) func canTestPerf(t *testing.T) { - paranoidBytes, err := ioutil.ReadFile("/proc/sys/kernel/perf_event_paranoid") + paranoidBytes, err := os.ReadFile("/proc/sys/kernel/perf_event_paranoid") if err != nil { t.Skip("Procfs not mounted, skipping perf tests") } - paranoidStr := strings.Replace(string(paranoidBytes), "\n", "", -1) + paranoidStr := strings.ReplaceAll(string(paranoidBytes), "\n", "") paranoid, err := strconv.Atoi(paranoidStr) if err != nil { - t.Fatalf("Expected perf_event_paranoid to be an int, got: %s", paranoidStr) + t.Fatalf("expected perf_event_paranoid to be an int, got: %s", paranoidStr) } if paranoid >= 1 { t.Skip("Skipping perf tests, set perf_event_paranoid to 0") @@ -44,7 +44,7 @@ func canTestPerf(t *testing.T) { func TestPerfCollector(t *testing.T) { canTestPerf(t) - collector, err := NewPerfCollector(log.NewNopLogger()) + collector, err := NewPerfCollector(slog.New(slog.NewTextHandler(io.Discard, nil))) if err != nil { t.Fatal(err) } @@ -53,7 +53,9 @@ func TestPerfCollector(t *testing.T) { metrics := make(chan prometheus.Metric) defer close(metrics) go func() { + i := 0 for range metrics { + i++ } }() if err := collector.Update(metrics); err != nil { @@ -67,41 +69,41 @@ func TestPerfCollectorStride(t *testing.T) { tests := []struct { name string flag string - exCpus []int + exCPUs []int }{ { - name: "valid single cpu", + name: "valid single CPU", flag: "1", - exCpus: []int{1}, + exCPUs: []int{1}, }, { - name: "valid range cpus", + name: "valid range CPUs", flag: "1-5", - exCpus: []int{1, 2, 3, 4, 5}, + exCPUs: []int{1, 2, 3, 4, 5}, }, { name: "valid stride", flag: "1-8:2", - exCpus: []int{1, 3, 5, 7}, + exCPUs: []int{1, 3, 5, 7}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ncpu := runtime.NumCPU() - for _, cpu := range test.exCpus { + for _, cpu := range test.exCPUs { if cpu > ncpu { t.Skipf("Skipping test because runtime.NumCPU < %d", cpu) } } perfCPUsFlag = &test.flag - collector, err := NewPerfCollector(log.NewNopLogger()) + collector, err := NewPerfCollector(slog.New(slog.NewTextHandler(io.Discard, nil))) if err != nil { t.Fatal(err) } c := collector.(*perfCollector) - for _, cpu := range test.exCpus { + for _, cpu := range test.exCPUs { if _, ok := c.perfHwProfilers[cpu]; !ok { t.Fatalf("Expected CPU %v in hardware profilers", cpu) } @@ -124,12 +126,12 @@ func TestPerfCPUFlagToCPUs(t *testing.T) { errStr string }{ { - name: "valid single cpu", + name: "valid single CPU", flag: "1", exCpus: []int{1}, }, { - name: "valid range cpus", + name: "valid range CPUs", flag: "1-5", exCpus: []int{1, 2, 3, 4, 5}, }, @@ -171,7 +173,7 @@ func TestPerfCPUFlagToCPUs(t *testing.T) { } if len(cpus) != len(test.exCpus) { t.Fatalf( - "expected cpus %v, got %v", + "expected CPUs %v, got %v", test.exCpus, cpus, ) @@ -179,7 +181,7 @@ func TestPerfCPUFlagToCPUs(t *testing.T) { for i := range cpus { if test.exCpus[i] != cpus[i] { t.Fatalf( - "expected cpus %v, got %v", + "expected CPUs %v, got %v", test.exCpus[i], cpus[i], ) diff --git a/collector/powersupplyclass.go b/collector/powersupplyclass.go index adebf0e6b1..33142617f3 100644 --- a/collector/powersupplyclass.go +++ b/collector/powersupplyclass.go @@ -11,21 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nopowersupplyclass -// +build linux +//go:build !nopowersupplyclass && (linux || darwin) package collector import ( - "errors" - "fmt" - "os" + "log/slog" "regexp" - "github.com/go-kit/kit/log" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/procfs/sysfs" - "gopkg.in/alecthomas/kingpin.v2" ) var ( @@ -36,14 +31,14 @@ type powerSupplyClassCollector struct { subsystem string ignoredPattern *regexp.Regexp metricDescs map[string]*prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { registerCollector("powersupplyclass", defaultEnabled, NewPowerSupplyClassCollector) } -func NewPowerSupplyClassCollector(logger log.Logger) (Collector, error) { +func NewPowerSupplyClassCollector(logger *slog.Logger) (Collector, error) { pattern := regexp.MustCompile(*powerSupplyClassIgnoredPowerSupplies) return &powerSupplyClassCollector{ subsystem: "power_supply", @@ -52,151 +47,3 @@ func NewPowerSupplyClassCollector(logger log.Logger) (Collector, error) { logger: logger, }, nil } - -func (c *powerSupplyClassCollector) Update(ch chan<- prometheus.Metric) error { - powerSupplyClass, err := getPowerSupplyClassInfo(c.ignoredPattern) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return ErrNoData - } - return fmt.Errorf("could not get power_supply class info: %w", err) - } - for _, powerSupply := range powerSupplyClass { - - for name, value := range map[string]*int64{ - "authentic": powerSupply.Authentic, - "calibrate": powerSupply.Calibrate, - "capacity": powerSupply.Capacity, - "capacity_alert_max": powerSupply.CapacityAlertMax, - "capacity_alert_min": powerSupply.CapacityAlertMin, - "cyclecount": powerSupply.CycleCount, - "online": powerSupply.Online, - "present": powerSupply.Present, - "time_to_empty_seconds": powerSupply.TimeToEmptyNow, - "time_to_full_seconds": powerSupply.TimeToFullNow, - } { - if value != nil { - pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value), powerSupply.Name, prometheus.GaugeValue) - } - } - - for name, value := range map[string]*int64{ - "current_boot": powerSupply.CurrentBoot, - "current_max": powerSupply.CurrentMax, - "current_ampere": powerSupply.CurrentNow, - "energy_empty": powerSupply.EnergyEmpty, - "energy_empty_design": powerSupply.EnergyEmptyDesign, - "energy_full": powerSupply.EnergyFull, - "energy_full_design": powerSupply.EnergyFullDesign, - "energy_watthour": powerSupply.EnergyNow, - "voltage_boot": powerSupply.VoltageBoot, - "voltage_max": powerSupply.VoltageMax, - "voltage_max_design": powerSupply.VoltageMaxDesign, - "voltage_min": powerSupply.VoltageMin, - "voltage_min_design": powerSupply.VoltageMinDesign, - "voltage_volt": powerSupply.VoltageNow, - "voltage_ocv": powerSupply.VoltageOCV, - "charge_control_limit": powerSupply.ChargeControlLimit, - "charge_control_limit_max": powerSupply.ChargeControlLimitMax, - "charge_counter": powerSupply.ChargeCounter, - "charge_empty": powerSupply.ChargeEmpty, - "charge_empty_design": powerSupply.ChargeEmptyDesign, - "charge_full": powerSupply.ChargeFull, - "charge_full_design": powerSupply.ChargeFullDesign, - "charge_ampere": powerSupply.ChargeNow, - "charge_term_current": powerSupply.ChargeTermCurrent, - "constant_charge_current": powerSupply.ConstantChargeCurrent, - "constant_charge_current_max": powerSupply.ConstantChargeCurrentMax, - "constant_charge_voltage": powerSupply.ConstantChargeVoltage, - "constant_charge_voltage_max": powerSupply.ConstantChargeVoltageMax, - "precharge_current": powerSupply.PrechargeCurrent, - "input_current_limit": powerSupply.InputCurrentLimit, - "power_watt": powerSupply.PowerNow, - } { - if value != nil { - pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value)/1e6, powerSupply.Name, prometheus.GaugeValue) - } - } - - for name, value := range map[string]*int64{ - "temp_celsius": powerSupply.Temp, - "temp_alert_max_celsius": powerSupply.TempAlertMax, - "temp_alert_min_celsius": powerSupply.TempAlertMin, - "temp_ambient_celsius": powerSupply.TempAmbient, - "temp_ambient_max_celsius": powerSupply.TempAmbientMax, - "temp_ambient_min_celsius": powerSupply.TempAmbientMin, - "temp_max_celsius": powerSupply.TempMax, - "temp_min_celsius": powerSupply.TempMin, - } { - if value != nil { - pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value)/10.0, powerSupply.Name, prometheus.GaugeValue) - } - } - - var ( - keys []string - values []string - ) - for name, value := range map[string]string{ - "power_supply": powerSupply.Name, - "capacity_level": powerSupply.CapacityLevel, - "charge_type": powerSupply.ChargeType, - "health": powerSupply.Health, - "manufacturer": powerSupply.Manufacturer, - "model_name": powerSupply.ModelName, - "serial_number": powerSupply.SerialNumber, - "status": powerSupply.Status, - "technology": powerSupply.Technology, - "type": powerSupply.Type, - "usb_type": powerSupply.UsbType, - "scope": powerSupply.Scope, - } { - if value != "" { - keys = append(keys, name) - values = append(values, value) - } - } - - fieldDesc := prometheus.NewDesc( - prometheus.BuildFQName(namespace, c.subsystem, "info"), - "info of /sys/class/power_supply/.", - keys, - nil, - ) - ch <- prometheus.MustNewConstMetric(fieldDesc, prometheus.GaugeValue, 1.0, values...) - - } - - return nil -} - -func pushPowerSupplyMetric(ch chan<- prometheus.Metric, subsystem string, name string, value float64, powerSupplyName string, valueType prometheus.ValueType) { - fieldDesc := prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, name), - fmt.Sprintf("%s value of /sys/class/power_supply/.", name), - []string{"power_supply"}, - nil, - ) - - ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, value, powerSupplyName) -} - -func getPowerSupplyClassInfo(ignore *regexp.Regexp) (sysfs.PowerSupplyClass, error) { - fs, err := sysfs.NewFS(*sysPath) - if err != nil { - return nil, err - } - powerSupplyClass, err := fs.PowerSupplyClass() - - if err != nil { - return powerSupplyClass, fmt.Errorf("error obtaining power_supply class info: %w", err) - } - - for device := range powerSupplyClass { - if ignore.MatchString(device) { - delete(powerSupplyClass, device) - } - } - - return powerSupplyClass, nil -} diff --git a/collector/powersupplyclass_darwin.go b/collector/powersupplyclass_darwin.go new file mode 100644 index 0000000000..be3d778ce5 --- /dev/null +++ b/collector/powersupplyclass_darwin.go @@ -0,0 +1,419 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nopowersupplyclass + +package collector + +/* +#cgo LDFLAGS: -framework IOKit -framework CoreFoundation +#include +#include +#include +#include +#include + +// values collected from IOKit Power Source APIs +// Functions documentation available at +// https://developer.apple.com/documentation/iokit/iopowersources_h +// CFDictionary keys definition +// https://developer.apple.com/documentation/iokit/iopskeys_h/defines +struct macos_powersupply { + char *Name; + char *PowerSourceState; + char *Type; + char *TransportType; + char *BatteryHealth; + char *HardwareSerialNumber; + + int *PowerSourceID; + int *CurrentCapacity; + int *MaxCapacity; + int *DesignCapacity; + int *NominalCapacity; + + int *TimeToEmpty; + int *TimeToFullCharge; + + int *Voltage; + int *Current; + + int *Temperature; + + // boolean values + int *IsCharged; + int *IsCharging; + int *InternalFailure; + int *IsPresent; +}; + +int *CFDictionaryGetInt(CFDictionaryRef theDict, const void *key) { + CFNumberRef tmp; + int *value; + + tmp = CFDictionaryGetValue(theDict, key); + + if (tmp == NULL) + return NULL; + + value = (int*)malloc(sizeof(int)); + if (CFNumberGetValue(tmp, kCFNumberIntType, value)) { + return value; + } + + free(value); + return NULL; +} + +int *CFDictionaryGetBoolean(CFDictionaryRef theDict, const void *key) { + CFBooleanRef tmp; + int *value; + + tmp = CFDictionaryGetValue(theDict, key); + + if (tmp == NULL) + return NULL; + + value = (int*)malloc(sizeof(int)); + if (CFBooleanGetValue(tmp)) { + *value = 1; + } else { + *value = 0; + } + + return value; +} + +char *CFDictionaryGetSring(CFDictionaryRef theDict, const void *key) { + CFStringRef tmp; + CFIndex size; + char *value; + + tmp = CFDictionaryGetValue(theDict, key); + + if (tmp == NULL) + return NULL; + + size = CFStringGetLength(tmp) + 1; + value = (char*)malloc(size); + + if(CFStringGetCString(tmp, value, size, kCFStringEncodingUTF8)) { + return value; + } + + free(value); + return NULL; +} + +struct macos_powersupply* getPowerSupplyInfo(CFDictionaryRef powerSourceInformation) { + struct macos_powersupply *ret; + + if (powerSourceInformation == NULL) + return NULL; + + ret = (struct macos_powersupply*)malloc(sizeof(struct macos_powersupply)); + + ret->PowerSourceID = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSPowerSourceIDKey)); + ret->CurrentCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSCurrentCapacityKey)); + ret->MaxCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSMaxCapacityKey)); + ret->DesignCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSDesignCapacityKey)); + ret->NominalCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSNominalCapacityKey)); + ret->TimeToEmpty = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSTimeToEmptyKey)); + ret->TimeToFullCharge = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSTimeToFullChargeKey)); + ret->Voltage = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSVoltageKey)); + ret->Current = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSCurrentKey)); + ret->Temperature = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSTemperatureKey)); + + ret->Name = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSNameKey)); + ret->PowerSourceState = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSPowerSourceStateKey)); + ret->Type = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSTypeKey)); + ret->TransportType = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSTransportTypeKey)); + ret->BatteryHealth = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSBatteryHealthKey)); + ret->HardwareSerialNumber = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSHardwareSerialNumberKey)); + + ret->IsCharged = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSIsChargedKey)); + ret->IsCharging = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSIsChargingKey)); + ret->InternalFailure = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSInternalFailureKey)); + ret->IsPresent = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSIsPresentKey)); + + return ret; +} + + + +void releasePowerSupply(struct macos_powersupply *ps) { + free(ps->Name); + free(ps->PowerSourceState); + free(ps->Type); + free(ps->TransportType); + free(ps->BatteryHealth); + free(ps->HardwareSerialNumber); + + free(ps->PowerSourceID); + free(ps->CurrentCapacity); + free(ps->MaxCapacity); + free(ps->DesignCapacity); + free(ps->NominalCapacity); + free(ps->TimeToEmpty); + free(ps->TimeToFullCharge); + free(ps->Voltage); + free(ps->Current); + free(ps->Temperature); + + free(ps->IsCharged); + free(ps->IsCharging); + free(ps->InternalFailure); + free(ps->IsPresent); + + free(ps); +} +*/ +import "C" + +import ( + "fmt" + "strconv" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *powerSupplyClassCollector) Update(ch chan<- prometheus.Metric) error { + psList, err := getPowerSourceList() + if err != nil { + return fmt.Errorf("couldn't get IOPPowerSourcesList: %w", err) + } + + for _, info := range psList { + labels := getPowerSourceDescriptorLabels(info) + powerSupplyName := labels["power_supply"] + + if c.ignoredPattern.MatchString(powerSupplyName) { + continue + } + + for name, value := range getPowerSourceDescriptorMap(info) { + if value == nil { + continue + } + + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, name), + fmt.Sprintf("IOKit Power Source information field %s for .", name), + []string{"power_supply"}, nil, + ), + prometheus.GaugeValue, *value, powerSupplyName, + ) + } + + pushEnumMetric( + ch, + getPowerSourceDescriptorState(info), + "power_source_state", + c.subsystem, + powerSupplyName, + ) + + pushEnumMetric( + ch, + getPowerSourceDescriptorBatteryHealth(info), + "battery_health", + c.subsystem, + powerSupplyName, + ) + + var ( + keys []string + values []string + ) + for name, value := range labels { + if value != "" { + keys = append(keys, name) + values = append(values, value) + } + } + fieldDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "info"), + "IOKit Power Source information for .", + keys, + nil, + ) + ch <- prometheus.MustNewConstMetric(fieldDesc, prometheus.GaugeValue, 1.0, values...) + + C.releasePowerSupply(info) + } + + return nil +} + +// getPowerSourceList fetches information from IOKit APIs +// +// Data is provided as opaque CoreFoundation references +// C.getPowerSupplyInfo will convert those objects in something +// easily manageable in Go. +// https://developer.apple.com/documentation/iokit/iopowersources_h +func getPowerSourceList() ([]*C.struct_macos_powersupply, error) { + infos, err := C.IOPSCopyPowerSourcesInfo() + if err != nil { + return nil, err + } + defer C.CFRelease(infos) + + psList, err := C.IOPSCopyPowerSourcesList(infos) + if err != nil { + return nil, err + } + + if psList == C.CFArrayRef(0) { + return nil, nil + } + defer C.CFRelease(C.CFTypeRef(psList)) + + size, err := C.CFArrayGetCount(psList) + if err != nil { + return nil, err + } + + ret := make([]*C.struct_macos_powersupply, size) + for i := C.CFIndex(0); i < size; i++ { + ps, err := C.CFArrayGetValueAtIndex(psList, i) + if err != nil { + return nil, err + } + + dict, err := C.IOPSGetPowerSourceDescription(infos, (C.CFTypeRef)(ps)) + if err != nil { + return nil, err + } + + info, err := C.getPowerSupplyInfo(dict) + if err != nil { + return nil, err + } + + ret[int(i)] = info + } + + return ret, nil +} + +func getPowerSourceDescriptorMap(info *C.struct_macos_powersupply) map[string]*float64 { + return map[string]*float64{ + "current_capacity": convertValue(info.CurrentCapacity), + "max_capacity": convertValue(info.MaxCapacity), + "design_capacity": convertValue(info.DesignCapacity), + "nominal_capacity": convertValue(info.NominalCapacity), + "time_to_empty_seconds": minutesToSeconds(info.TimeToEmpty), + "time_to_full_seconds": minutesToSeconds(info.TimeToFullCharge), + "voltage_volt": scaleValue(info.Voltage, 1e3), + "current_ampere": scaleValue(info.Current, 1e3), + "temp_celsius": convertValue(info.Temperature), + "present": convertValue(info.IsPresent), + "charging": convertValue(info.IsCharging), + "charged": convertValue(info.IsCharged), + "internal_failure": convertValue(info.InternalFailure), + } +} + +func getPowerSourceDescriptorLabels(info *C.struct_macos_powersupply) map[string]string { + return map[string]string{ + "id": strconv.FormatInt(int64(*info.PowerSourceID), 10), + "power_supply": C.GoString(info.Name), + "type": C.GoString(info.Type), + "transport_type": C.GoString(info.TransportType), + "serial_number": C.GoString(info.HardwareSerialNumber), + } +} + +func getPowerSourceDescriptorState(info *C.struct_macos_powersupply) map[string]float64 { + stateMap := map[string]float64{ + "Off Line": 0, + "AC Power": 0, + "Battery Power": 0, + } + + // This field is always present + // https://developer.apple.com/documentation/iokit/kiopspowersourcestatekey + stateMap[C.GoString(info.PowerSourceState)] = 1 + + return stateMap +} + +func getPowerSourceDescriptorBatteryHealth(info *C.struct_macos_powersupply) map[string]float64 { + // This field is optional + // https://developer.apple.com/documentation/iokit/kiopsBatteryHealthkey + if info.BatteryHealth == nil { + return nil + } + + stateMap := map[string]float64{ + "Good": 0, + "Fair": 0, + "Poor": 0, + } + + stateMap[C.GoString(info.BatteryHealth)] = 1 + + return stateMap +} + +func convertValue(value *C.int) *float64 { + if value == nil { + return nil + } + + ret := new(float64) + *ret = (float64)(*value) + return ret +} + +func scaleValue(value *C.int, scale float64) *float64 { + ret := convertValue(value) + if ret == nil { + return nil + } + + *ret /= scale + + return ret +} + +// minutesToSeconds converts *C.int minutes into *float64 seconds. +// +// Only positive values will be scaled to seconds, because negative ones +// have special meanings. I.e. -1 indicates "Still Calculating the Time" +func minutesToSeconds(minutes *C.int) *float64 { + ret := convertValue(minutes) + if ret == nil { + return nil + } + + if *ret > 0 { + *ret *= 60 + } + + return ret +} + +func pushEnumMetric(ch chan<- prometheus.Metric, values map[string]float64, name, subsystem, powerSupply string) { + for state, value := range values { + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, name), + fmt.Sprintf("IOKit Power Source information field %s for .", name), + []string{"power_supply", "state"}, nil, + ), + prometheus.GaugeValue, value, powerSupply, state, + ) + } +} diff --git a/collector/powersupplyclass_linux.go b/collector/powersupplyclass_linux.go new file mode 100644 index 0000000000..b4fbf3510a --- /dev/null +++ b/collector/powersupplyclass_linux.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nopowersupplyclass + +package collector + +import ( + "errors" + "fmt" + "os" + "regexp" + "strings" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +func (c *powerSupplyClassCollector) Update(ch chan<- prometheus.Metric) error { + powerSupplyClass, err := getPowerSupplyClassInfo(c.ignoredPattern) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return ErrNoData + } + return fmt.Errorf("could not get power_supply class info: %w", err) + } + for _, powerSupply := range powerSupplyClass { + + for name, value := range map[string]*int64{ + "authentic": powerSupply.Authentic, + "calibrate": powerSupply.Calibrate, + "capacity": powerSupply.Capacity, + "capacity_alert_max": powerSupply.CapacityAlertMax, + "capacity_alert_min": powerSupply.CapacityAlertMin, + "cyclecount": powerSupply.CycleCount, + "online": powerSupply.Online, + "present": powerSupply.Present, + "time_to_empty_seconds": powerSupply.TimeToEmptyNow, + "time_to_full_seconds": powerSupply.TimeToFullNow, + } { + if value != nil { + pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value), powerSupply.Name, prometheus.GaugeValue) + } + } + + for name, value := range map[string]*int64{ + "current_boot": powerSupply.CurrentBoot, + "current_max": powerSupply.CurrentMax, + "current_ampere": powerSupply.CurrentNow, + "energy_empty": powerSupply.EnergyEmpty, + "energy_empty_design": powerSupply.EnergyEmptyDesign, + "energy_full": powerSupply.EnergyFull, + "energy_full_design": powerSupply.EnergyFullDesign, + "energy_watthour": powerSupply.EnergyNow, + "voltage_boot": powerSupply.VoltageBoot, + "voltage_max": powerSupply.VoltageMax, + "voltage_max_design": powerSupply.VoltageMaxDesign, + "voltage_min": powerSupply.VoltageMin, + "voltage_min_design": powerSupply.VoltageMinDesign, + "voltage_volt": powerSupply.VoltageNow, + "voltage_ocv": powerSupply.VoltageOCV, + "charge_control_limit": powerSupply.ChargeControlLimit, + "charge_control_limit_max": powerSupply.ChargeControlLimitMax, + "charge_counter": powerSupply.ChargeCounter, + "charge_empty": powerSupply.ChargeEmpty, + "charge_empty_design": powerSupply.ChargeEmptyDesign, + "charge_full": powerSupply.ChargeFull, + "charge_full_design": powerSupply.ChargeFullDesign, + "charge_ampere": powerSupply.ChargeNow, + "charge_term_current": powerSupply.ChargeTermCurrent, + "constant_charge_current": powerSupply.ConstantChargeCurrent, + "constant_charge_current_max": powerSupply.ConstantChargeCurrentMax, + "constant_charge_voltage": powerSupply.ConstantChargeVoltage, + "constant_charge_voltage_max": powerSupply.ConstantChargeVoltageMax, + "precharge_current": powerSupply.PrechargeCurrent, + "input_current_limit": powerSupply.InputCurrentLimit, + "power_watt": powerSupply.PowerNow, + } { + if value != nil { + pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value)/1e6, powerSupply.Name, prometheus.GaugeValue) + } + } + + for name, value := range map[string]*int64{ + "temp_celsius": powerSupply.Temp, + "temp_alert_max_celsius": powerSupply.TempAlertMax, + "temp_alert_min_celsius": powerSupply.TempAlertMin, + "temp_ambient_celsius": powerSupply.TempAmbient, + "temp_ambient_max_celsius": powerSupply.TempAmbientMax, + "temp_ambient_min_celsius": powerSupply.TempAmbientMin, + "temp_max_celsius": powerSupply.TempMax, + "temp_min_celsius": powerSupply.TempMin, + } { + if value != nil { + pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value)/10.0, powerSupply.Name, prometheus.GaugeValue) + } + } + + var ( + keys []string + values []string + ) + for name, value := range map[string]string{ + "power_supply": powerSupply.Name, + "capacity_level": powerSupply.CapacityLevel, + "charge_type": powerSupply.ChargeType, + "health": powerSupply.Health, + "manufacturer": powerSupply.Manufacturer, + "model_name": powerSupply.ModelName, + "serial_number": powerSupply.SerialNumber, + "status": powerSupply.Status, + "technology": powerSupply.Technology, + "type": powerSupply.Type, + "usb_type": powerSupply.UsbType, + "scope": powerSupply.Scope, + } { + if value != "" { + keys = append(keys, name) + values = append(values, strings.ToValidUTF8(value, "�")) + } + } + + fieldDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "info"), + "info of /sys/class/power_supply/.", + keys, + nil, + ) + ch <- prometheus.MustNewConstMetric(fieldDesc, prometheus.GaugeValue, 1.0, values...) + + } + + return nil +} + +func pushPowerSupplyMetric(ch chan<- prometheus.Metric, subsystem string, name string, value float64, powerSupplyName string, valueType prometheus.ValueType) { + fieldDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, name), + fmt.Sprintf("%s value of /sys/class/power_supply/.", name), + []string{"power_supply"}, + nil, + ) + + ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, value, powerSupplyName) +} + +func getPowerSupplyClassInfo(ignore *regexp.Regexp) (sysfs.PowerSupplyClass, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, err + } + powerSupplyClass, err := fs.PowerSupplyClass() + + if err != nil { + return powerSupplyClass, fmt.Errorf("error obtaining power_supply class info: %w", err) + } + + for device := range powerSupplyClass { + if ignore.MatchString(device) { + delete(powerSupplyClass, device) + } + } + + return powerSupplyClass, nil +} diff --git a/collector/pressure_linux.go b/collector/pressure_linux.go index 8f0b8db6f2..659c4b3768 100644 --- a/collector/pressure_linux.go +++ b/collector/pressure_linux.go @@ -11,21 +11,30 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nopressure +//go:build !nopressure package collector import ( + "errors" "fmt" + "log/slog" + "os" + "syscall" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) +const ( + psiResourceCPU = "cpu" + psiResourceIO = "io" + psiResourceMemory = "memory" + psiResourceIRQ = "irq" +) + var ( - psiResources = []string{"cpu", "io", "memory"} + psiResources = []string{psiResourceCPU, psiResourceIO, psiResourceMemory, psiResourceIRQ} ) type pressureStatsCollector struct { @@ -34,10 +43,11 @@ type pressureStatsCollector struct { ioFull *prometheus.Desc mem *prometheus.Desc memFull *prometheus.Desc + irqFull *prometheus.Desc fs procfs.FS - logger log.Logger + logger *slog.Logger } func init() { @@ -45,7 +55,7 @@ func init() { } // NewPressureStatsCollector returns a Collector exposing pressure stall information -func NewPressureStatsCollector(logger log.Logger) (Collector, error) { +func NewPressureStatsCollector(logger *slog.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) @@ -77,6 +87,11 @@ func NewPressureStatsCollector(logger log.Logger) (Collector, error) { "Total time in seconds no process could make progress due to memory congestion", nil, nil, ), + irqFull: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "pressure", "irq_stalled_seconds_total"), + "Total time in seconds no process could make progress due to IRQ congestion", + nil, nil, + ), fs: fs, logger: logger, }, nil @@ -84,25 +99,56 @@ func NewPressureStatsCollector(logger log.Logger) (Collector, error) { // Update calls procfs.NewPSIStatsForResource for the different resources and updates the values func (c *pressureStatsCollector) Update(ch chan<- prometheus.Metric) error { + foundResources := 0 for _, res := range psiResources { - level.Debug(c.logger).Log("msg", "collecting statistics for resource", "resource", res) + c.logger.Debug("collecting statistics for resource", "resource", res) vals, err := c.fs.PSIStatsForResource(res) if err != nil { - level.Debug(c.logger).Log("msg", "pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel") - return nil + if errors.Is(err, os.ErrNotExist) && res != psiResourceIRQ { + c.logger.Debug("pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel", "resource", res) + continue + } + if errors.Is(err, os.ErrNotExist) && res == psiResourceIRQ { + c.logger.Debug("IRQ pressure information is unavailable, you need a Linux kernel >= 6.1 and/or CONFIG_PSI enabled for your kernel", "resource", res) + continue + } + if errors.Is(err, syscall.ENOTSUP) { + c.logger.Debug("pressure information is disabled, add psi=1 kernel command line to enable it") + return ErrNoData + } + return fmt.Errorf("failed to retrieve pressure stats: %w", err) + } + // IRQ pressure does not have 'some' data. + // See https://github.com/torvalds/linux/blob/v6.9/include/linux/psi_types.h#L65 + if vals.Some == nil && res != psiResourceIRQ { + c.logger.Debug("pressure information returned no 'some' data") + return ErrNoData + } + if vals.Full == nil && res != psiResourceCPU { + c.logger.Debug("pressure information returned no 'full' data") + return ErrNoData } switch res { - case "cpu": + case psiResourceCPU: ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0) - case "io": + case psiResourceIO: ch <- prometheus.MustNewConstMetric(c.io, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0) ch <- prometheus.MustNewConstMetric(c.ioFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0) - case "memory": + case psiResourceMemory: ch <- prometheus.MustNewConstMetric(c.mem, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0) ch <- prometheus.MustNewConstMetric(c.memFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0) + case psiResourceIRQ: + ch <- prometheus.MustNewConstMetric(c.irqFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0) default: - level.Debug(c.logger).Log("msg", "did not account for resource", "resource", res) + c.logger.Debug("did not account for resource", "resource", res) + continue } + foundResources++ + } + + if foundResources == 0 { + c.logger.Debug("pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel") + return ErrNoData } return nil diff --git a/collector/processes_linux.go b/collector/processes_linux.go index 3d4e95d242..eff5cf06b5 100644 --- a/collector/processes_linux.go +++ b/collector/processes_linux.go @@ -11,29 +11,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noprocesses +//go:build !noprocesses package collector import ( "errors" "fmt" + "log/slog" "os" + "path" + "strconv" + "strings" + "syscall" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type processCollector struct { - fs procfs.FS - threadAlloc *prometheus.Desc - threadLimit *prometheus.Desc - procsState *prometheus.Desc - pidUsed *prometheus.Desc - pidMax *prometheus.Desc - logger log.Logger + fs procfs.FS + threadAlloc *prometheus.Desc + threadLimit *prometheus.Desc + threadsState *prometheus.Desc + procsState *prometheus.Desc + pidUsed *prometheus.Desc + pidMax *prometheus.Desc + logger *slog.Logger } func init() { @@ -41,7 +45,7 @@ func init() { } // NewProcessStatCollector returns a new Collector exposing process data read from the proc filesystem. -func NewProcessStatCollector(logger log.Logger) (Collector, error) { +func NewProcessStatCollector(logger *slog.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) @@ -59,6 +63,11 @@ func NewProcessStatCollector(logger log.Logger) (Collector, error) { "Limit of threads in the system", nil, nil, ), + threadsState: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "threads_state"), + "Number of threads in each state.", + []string{"thread_state"}, nil, + ), procsState: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "state"), "Number of processes in each state.", @@ -74,7 +83,7 @@ func NewProcessStatCollector(logger log.Logger) (Collector, error) { }, nil } func (c *processCollector) Update(ch chan<- prometheus.Metric) error { - pids, states, threads, err := c.getAllocatedThreads() + pids, states, threads, threadStates, err := c.getAllocatedThreads() if err != nil { return fmt.Errorf("unable to retrieve number of allocated threads: %w", err) } @@ -90,9 +99,13 @@ func (c *processCollector) Update(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric(c.procsState, prometheus.GaugeValue, float64(states[state]), state) } + for state := range threadStates { + ch <- prometheus.MustNewConstMetric(c.threadsState, prometheus.GaugeValue, float64(threadStates[state]), state) + } + pidM, err := readUintFromFile(procFilePath("sys/kernel/pid_max")) if err != nil { - return fmt.Errorf("unable to retrieve limit number of maximum pids alloved: %w", err) + return fmt.Errorf("unable to retrieve limit number of maximum pids allowed: %w", err) } ch <- prometheus.MustNewConstMetric(c.pidUsed, prometheus.GaugeValue, float64(pids)) ch <- prometheus.MustNewConstMetric(c.pidMax, prometheus.GaugeValue, float64(pidM)) @@ -100,28 +113,80 @@ func (c *processCollector) Update(ch chan<- prometheus.Metric) error { return nil } -func (c *processCollector) getAllocatedThreads() (int, map[string]int32, int, error) { +func (c *processCollector) getAllocatedThreads() (int, map[string]int32, int, map[string]int32, error) { p, err := c.fs.AllProcs() if err != nil { - return 0, nil, 0, err + return 0, nil, 0, nil, fmt.Errorf("unable to list all processes: %w", err) } pids := 0 thread := 0 procStates := make(map[string]int32) + threadStates := make(map[string]int32) + for _, pid := range p { stat, err := pid.Stat() - // PIDs can vanish between getting the list and getting stats. - if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "file not found when retrieving stats for pid", "pid", pid, "err", err) - continue - } if err != nil { - level.Debug(c.logger).Log("msg", "error reading stat for pid", "pid", pid, "err", err) - return 0, nil, 0, err + // PIDs can vanish between getting the list and getting stats. + if c.isIgnoredError(err) { + c.logger.Debug("file not found when retrieving stats for pid", "pid", pid.PID, "err", err) + continue + } + c.logger.Debug("error reading stat for pid", "pid", pid.PID, "err", err) + return 0, nil, 0, nil, fmt.Errorf("error reading stat for pid %d: %w", pid.PID, err) } pids++ procStates[stat.State]++ thread += stat.NumThreads + err = c.getThreadStates(pid.PID, stat, threadStates) + if err != nil { + return 0, nil, 0, nil, err + } + } + return pids, procStates, thread, threadStates, nil +} + +func (c *processCollector) getThreadStates(pid int, pidStat procfs.ProcStat, threadStates map[string]int32) error { + fs, err := procfs.NewFS(procFilePath(path.Join(strconv.Itoa(pid), "task"))) + if err != nil { + if c.isIgnoredError(err) { + c.logger.Debug("file not found when retrieving tasks for pid", "pid", pid, "err", err) + return nil + } + c.logger.Debug("error reading tasks for pid", "pid", pid, "err", err) + return fmt.Errorf("error reading task for pid %d: %w", pid, err) + } + + t, err := fs.AllProcs() + if err != nil { + if c.isIgnoredError(err) { + c.logger.Debug("file not found when retrieving tasks for pid", "pid", pid, "err", err) + return nil + } + return fmt.Errorf("unable to list all threads for pid: %d %w", pid, err) + } + + for _, thread := range t { + if pid == thread.PID { + threadStates[pidStat.State]++ + continue + } + threadStat, err := thread.Stat() + if err != nil { + if c.isIgnoredError(err) { + c.logger.Debug("file not found when retrieving stats for thread", "pid", pid, "threadId", thread.PID, "err", err) + continue + } + c.logger.Debug("error reading stat for thread", "pid", pid, "threadId", thread.PID, "err", err) + return fmt.Errorf("error reading stat for pid:%d thread:%d err:%w", pid, thread.PID, err) + } + threadStates[threadStat.State]++ + } + return nil +} + +func (c *processCollector) isIgnoredError(err error) bool { + if errors.Is(err, os.ErrNotExist) || strings.Contains(err.Error(), syscall.ESRCH.Error()) { + return true } - return pids, procStates, thread, nil + return false } diff --git a/collector/processes_linux_test.go b/collector/processes_linux_test.go index cb01fbb7ee..4b20ef8d63 100644 --- a/collector/processes_linux_test.go +++ b/collector/processes_linux_test.go @@ -11,16 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noprocesses +//go:build !noprocesses package collector import ( - "github.com/go-kit/kit/log" + "io" + "log/slog" "testing" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/procfs" - kingpin "gopkg.in/alecthomas/kingpin.v2" ) func TestReadProcessStatus(t *testing.T) { @@ -32,8 +33,8 @@ func TestReadProcessStatus(t *testing.T) { if err != nil { t.Errorf("failed to open procfs: %v", err) } - c := processCollector{fs: fs, logger: log.NewNopLogger()} - pids, states, threads, err := c.getAllocatedThreads() + c := processCollector{fs: fs, logger: slog.New(slog.NewTextHandler(io.Discard, nil))} + pids, states, threads, _, err := c.getAllocatedThreads() if err != nil { t.Fatalf("Cannot retrieve data from procfs getAllocatedThreads function: %v ", err) } @@ -46,7 +47,7 @@ func TestReadProcessStatus(t *testing.T) { } maxPid, err := readUintFromFile(procFilePath("sys/kernel/pid_max")) if err != nil { - t.Fatalf("Unable to retrieve limit number of maximum pids alloved %v\n", err) + t.Fatalf("Unable to retrieve limit number of maximum pids allowed %v\n", err) } if uint64(pids) > maxPid || pids == 0 { t.Fatalf("Total running pids cannot be greater than %d or equals to 0", maxPid) diff --git a/collector/qdisc_linux.go b/collector/qdisc_linux.go index daba1999c6..9062da122c 100644 --- a/collector/qdisc_linux.go +++ b/collector/qdisc_linux.go @@ -11,32 +11,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noqdisc +//go:build !noqdisc package collector import ( "encoding/json" - "io/ioutil" + "fmt" + "log/slog" + "os" "path/filepath" + "github.com/alecthomas/kingpin/v2" "github.com/ema/qdisc" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" ) type qdiscStatCollector struct { - bytes typedDesc - packets typedDesc - drops typedDesc - requeues typedDesc - overlimits typedDesc - logger log.Logger + logger *slog.Logger + deviceFilter deviceFilter + bytes typedDesc + packets typedDesc + drops typedDesc + requeues typedDesc + overlimits typedDesc + qlength typedDesc + backlog typedDesc } var ( - collectorQdisc = kingpin.Flag("collector.qdisc.fixtures", "test fixtures to use for qdisc collector end-to-end testing").Default("").String() + collectorQdisc = kingpin.Flag("collector.qdisc.fixtures", "test fixtures to use for qdisc collector end-to-end testing").Default("").String() + collectorQdiscDeviceInclude = kingpin.Flag("collector.qdisc.device-include", "Regexp of qdisc devices to include (mutually exclusive to device-exclude).").String() + oldCollectorQdiskDeviceInclude = kingpin.Flag("collector.qdisk.device-include", "DEPRECATED: Use collector.qdisc.device-include").Hidden().String() + collectorQdiscDeviceExclude = kingpin.Flag("collector.qdisc.device-exclude", "Regexp of qdisc devices to exclude (mutually exclusive to device-include).").String() + oldCollectorQdiskDeviceExclude = kingpin.Flag("collector.qdisk.device-exclude", "DEPRECATED: Use collector.qdisc.device-exclude").Hidden().String() ) func init() { @@ -44,7 +52,29 @@ func init() { } // NewQdiscStatCollector returns a new Collector exposing queuing discipline statistics. -func NewQdiscStatCollector(logger log.Logger) (Collector, error) { +func NewQdiscStatCollector(logger *slog.Logger) (Collector, error) { + if *oldCollectorQdiskDeviceInclude != "" { + if *collectorQdiscDeviceInclude == "" { + logger.Warn("--collector.qdisk.device-include is DEPRECATED and will be removed in 2.0.0, use --collector.qdisc.device-include") + *collectorQdiscDeviceInclude = *oldCollectorQdiskDeviceInclude + } else { + return nil, fmt.Errorf("--collector.qdisk.device-include and --collector.qdisc.device-include are mutually exclusive") + } + } + + if *oldCollectorQdiskDeviceExclude != "" { + if *collectorQdiscDeviceExclude == "" { + logger.Warn("--collector.qdisk.device-exclude is DEPRECATED and will be removed in 2.0.0, use --collector.qdisc.device-exclude") + *collectorQdiscDeviceExclude = *oldCollectorQdiskDeviceExclude + } else { + return nil, fmt.Errorf("--collector.qdisk.device-exclude and --collector.qdisc.device-exclude are mutually exclusive") + } + } + + if *collectorQdiscDeviceExclude != "" && *collectorQdiscDeviceInclude != "" { + return nil, fmt.Errorf("collector.qdisc.device-include and collector.qdisc.device-exclude are mutaly exclusive") + } + return &qdiscStatCollector{ bytes: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "qdisc", "bytes_total"), @@ -71,14 +101,25 @@ func NewQdiscStatCollector(logger log.Logger) (Collector, error) { "Number of overlimit packets.", []string{"device", "kind"}, nil, ), prometheus.CounterValue}, - logger: logger, + qlength: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, "qdisc", "current_queue_length"), + "Number of packets currently in queue to be sent.", + []string{"device", "kind"}, nil, + ), prometheus.GaugeValue}, + backlog: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, "qdisc", "backlog"), + "Number of bytes currently in queue to be sent.", + []string{"device", "kind"}, nil, + ), prometheus.GaugeValue}, + logger: logger, + deviceFilter: newDeviceFilter(*collectorQdiscDeviceExclude, *collectorQdiscDeviceInclude), }, nil } func testQdiscGet(fixtures string) ([]qdisc.QdiscInfo, error) { var res []qdisc.QdiscInfo - b, err := ioutil.ReadFile(filepath.Join(fixtures, "results.json")) + b, err := os.ReadFile(filepath.Join(fixtures, "results.json")) if err != nil { return res, err } @@ -109,11 +150,17 @@ func (c *qdiscStatCollector) Update(ch chan<- prometheus.Metric) error { continue } + if c.deviceFilter.ignored(msg.IfaceName) { + continue + } + ch <- c.bytes.mustNewConstMetric(float64(msg.Bytes), msg.IfaceName, msg.Kind) ch <- c.packets.mustNewConstMetric(float64(msg.Packets), msg.IfaceName, msg.Kind) ch <- c.drops.mustNewConstMetric(float64(msg.Drops), msg.IfaceName, msg.Kind) ch <- c.requeues.mustNewConstMetric(float64(msg.Requeues), msg.IfaceName, msg.Kind) ch <- c.overlimits.mustNewConstMetric(float64(msg.Overlimits), msg.IfaceName, msg.Kind) + ch <- c.qlength.mustNewConstMetric(float64(msg.Qlen), msg.IfaceName, msg.Kind) + ch <- c.backlog.mustNewConstMetric(float64(msg.Backlog), msg.IfaceName, msg.Kind) } return nil diff --git a/collector/rapl_linux.go b/collector/rapl_linux.go index 25498c9423..92c402d539 100644 --- a/collector/rapl_linux.go +++ b/collector/rapl_linux.go @@ -11,36 +11,57 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !norapl +//go:build !norapl package collector import ( + "errors" + "fmt" + "log/slog" + "os" "strconv" - "github.com/go-kit/kit/log" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) +const raplCollectorSubsystem = "rapl" + type raplCollector struct { - fs sysfs.FS + fs sysfs.FS + logger *slog.Logger + + joulesMetricDesc *prometheus.Desc } func init() { - registerCollector("rapl", defaultEnabled, NewRaplCollector) + registerCollector(raplCollectorSubsystem, defaultEnabled, NewRaplCollector) } +var ( + raplZoneLabel = kingpin.Flag("collector.rapl.enable-zone-label", "Enables service unit metric unit_start_time_seconds").Bool() +) + // NewRaplCollector returns a new Collector exposing RAPL metrics. -func NewRaplCollector(logger log.Logger) (Collector, error) { +func NewRaplCollector(logger *slog.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, err } + joulesMetricDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, raplCollectorSubsystem, "joules_total"), + "Current RAPL value in joules", + []string{"index", "path", "rapl_zone"}, nil, + ) + collector := raplCollector{ - fs: fs, + fs: fs, + logger: logger, + joulesMetricDesc: joulesMetricDesc, } return &collector, nil } @@ -50,28 +71,68 @@ func (c *raplCollector) Update(ch chan<- prometheus.Metric) error { // nil zones are fine when platform doesn't have powercap files present. zones, err := sysfs.GetRaplZones(c.fs) if err != nil { - return nil + if errors.Is(err, os.ErrNotExist) { + c.logger.Debug("Platform doesn't have powercap files present", "err", err) + return ErrNoData + } + if errors.Is(err, os.ErrPermission) { + c.logger.Debug("Can't access powercap files", "err", err) + return ErrNoData + } + return fmt.Errorf("failed to retrieve rapl stats: %w", err) } for _, rz := range zones { - newMicrojoules, err := rz.GetEnergyMicrojoules() + microJoules, err := rz.GetEnergyMicrojoules() if err != nil { + if errors.Is(err, os.ErrPermission) { + c.logger.Debug("Can't access energy_uj file", "zone", rz, "err", err) + return ErrNoData + } return err } - index := strconv.Itoa(rz.Index) - - descriptor := prometheus.NewDesc( - prometheus.BuildFQName(namespace, "rapl", rz.Name+"_joules_total"), - "Current RAPL "+rz.Name+" value in joules", - []string{"index"}, nil, - ) - - ch <- prometheus.MustNewConstMetric( - descriptor, - prometheus.CounterValue, - float64(newMicrojoules)/1000000.0, - index, - ) + + joules := float64(microJoules) / 1000000.0 + + if *raplZoneLabel { + ch <- c.joulesMetricWithZoneLabel(rz, joules) + } else { + ch <- c.joulesMetric(rz, joules) + } } return nil } + +func (c *raplCollector) joulesMetric(z sysfs.RaplZone, v float64) prometheus.Metric { + index := strconv.Itoa(z.Index) + descriptor := prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + raplCollectorSubsystem, + fmt.Sprintf("%s_joules_total", SanitizeMetricName(z.Name)), + ), + fmt.Sprintf("Current RAPL %s value in joules", z.Name), + []string{"index", "path"}, nil, + ) + + return prometheus.MustNewConstMetric( + descriptor, + prometheus.CounterValue, + v, + index, + z.Path, + ) +} + +func (c *raplCollector) joulesMetricWithZoneLabel(z sysfs.RaplZone, v float64) prometheus.Metric { + index := strconv.Itoa(z.Index) + + return prometheus.MustNewConstMetric( + c.joulesMetricDesc, + prometheus.CounterValue, + v, + index, + z.Path, + z.Name, + ) +} diff --git a/collector/runit.go b/collector/runit.go index 9d889046a3..9fabd3ffc5 100644 --- a/collector/runit.go +++ b/collector/runit.go @@ -11,16 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !norunit +//go:build !norunit package collector import ( - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "log/slog" + + "github.com/alecthomas/kingpin/v2" + "github.com/prometheus-community/go-runit/runit" "github.com/prometheus/client_golang/prometheus" - "github.com/soundcloud/go-runit/runit" - "gopkg.in/alecthomas/kingpin.v2" ) var runitServiceDir = kingpin.Flag("collector.runit.servicedir", "Path to runit service directory.").Default("/etc/service").String() @@ -30,7 +30,7 @@ type runitCollector struct { stateDesired typedDesc stateNormal typedDesc stateTimestamp typedDesc - logger log.Logger + logger *slog.Logger } func init() { @@ -38,13 +38,15 @@ func init() { } // NewRunitCollector returns a new Collector exposing runit statistics. -func NewRunitCollector(logger log.Logger) (Collector, error) { +func NewRunitCollector(logger *slog.Logger) (Collector, error) { var ( subsystem = "service" constLabels = prometheus.Labels{"supervisor": "runit"} labelNames = []string{"service"} ) + logger.Warn("This collector is deprecated and will be removed in the next major version release.") + return &runitCollector{ state: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "state"), @@ -79,11 +81,11 @@ func (c *runitCollector) Update(ch chan<- prometheus.Metric) error { for _, service := range services { status, err := service.Status() if err != nil { - level.Debug(c.logger).Log("msg", "Couldn't get status", "service", service.Name, "err", err) + c.logger.Debug("Couldn't get status", "service", service.Name, "err", err) continue } - level.Debug(c.logger).Log("msg", "duration", "service", service.Name, "status", status.State, "pid", status.Pid, "duration_seconds", status.Duration) + c.logger.Debug("duration", "service", service.Name, "status", status.State, "pid", status.Pid, "duration_seconds", status.Duration) ch <- c.state.mustNewConstMetric(float64(status.State), service.Name) ch <- c.stateDesired.mustNewConstMetric(float64(status.Want), service.Name) ch <- c.stateTimestamp.mustNewConstMetric(float64(status.Timestamp.Unix()), service.Name) diff --git a/collector/schedstat_linux.go b/collector/schedstat_linux.go index 9f29a7e160..14882bdab8 100644 --- a/collector/schedstat_linux.go +++ b/collector/schedstat_linux.go @@ -11,17 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noshedstat +//go:build !noshedstat package collector import ( "errors" "fmt" + "log/slog" "os" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) @@ -52,7 +51,7 @@ var ( ) // NewSchedstatCollector returns a new Collector exposing task scheduler statistics -func NewSchedstatCollector(logger log.Logger) (Collector, error) { +func NewSchedstatCollector(logger *slog.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) @@ -63,7 +62,7 @@ func NewSchedstatCollector(logger log.Logger) (Collector, error) { type schedstatCollector struct { fs procfs.FS - logger log.Logger + logger *slog.Logger } func init() { @@ -74,7 +73,7 @@ func (c *schedstatCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.Schedstat() if err != nil { if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "schedstat file does not exist") + c.logger.Debug("schedstat file does not exist") return ErrNoData } return err diff --git a/collector/selinux_linux.go b/collector/selinux_linux.go new file mode 100644 index 0000000000..b0e7fac6e6 --- /dev/null +++ b/collector/selinux_linux.go @@ -0,0 +1,78 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noselinux + +package collector + +import ( + "log/slog" + + "github.com/opencontainers/selinux/go-selinux" + "github.com/prometheus/client_golang/prometheus" +) + +type selinuxCollector struct { + configMode *prometheus.Desc + currentMode *prometheus.Desc + enabled *prometheus.Desc + logger *slog.Logger +} + +func init() { + registerCollector("selinux", defaultEnabled, NewSelinuxCollector) +} + +// NewSelinuxCollector returns a new Collector exposing SELinux statistics. +func NewSelinuxCollector(logger *slog.Logger) (Collector, error) { + const subsystem = "selinux" + + return &selinuxCollector{ + configMode: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "config_mode"), + "Configured SELinux enforcement mode", + nil, nil, + ), + currentMode: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "current_mode"), + "Current SELinux enforcement mode", + nil, nil, + ), + enabled: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "enabled"), + "SELinux is enabled, 1 is true, 0 is false", + nil, nil, + ), + logger: logger, + }, nil +} + +func (c *selinuxCollector) Update(ch chan<- prometheus.Metric) error { + if !selinux.GetEnabled() { + ch <- prometheus.MustNewConstMetric( + c.enabled, prometheus.GaugeValue, 0) + + return nil + } + + ch <- prometheus.MustNewConstMetric( + c.enabled, prometheus.GaugeValue, 1) + + ch <- prometheus.MustNewConstMetric( + c.configMode, prometheus.GaugeValue, float64(selinux.DefaultEnforceMode())) + + ch <- prometheus.MustNewConstMetric( + c.currentMode, prometheus.GaugeValue, float64(selinux.EnforceMode())) + + return nil +} diff --git a/collector/slabinfo_linux.go b/collector/slabinfo_linux.go new file mode 100644 index 0000000000..48bbf2abfe --- /dev/null +++ b/collector/slabinfo_linux.go @@ -0,0 +1,131 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noslabinfo + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/alecthomas/kingpin/v2" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +var ( + slabNameInclude = kingpin.Flag("collector.slabinfo.slabs-include", "Regexp of slabs to include in slabinfo collector.").Default(".*").String() + slabNameExclude = kingpin.Flag("collector.slabinfo.slabs-exclude", "Regexp of slabs to exclude in slabinfo collector.").Default("").String() +) + +type slabinfoCollector struct { + fs procfs.FS + logger *slog.Logger + subsystem string + labels []string + slabNameFilter deviceFilter +} + +func init() { + registerCollector("slabinfo", defaultDisabled, NewSlabinfoCollector) +} + +func NewSlabinfoCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &slabinfoCollector{logger: logger, + fs: fs, + subsystem: "slabinfo", + labels: []string{"slab"}, + slabNameFilter: newDeviceFilter(*slabNameExclude, *slabNameInclude), + }, nil +} + +func (c *slabinfoCollector) Update(ch chan<- prometheus.Metric) error { + slabinfo, err := c.fs.SlabInfo() + if err != nil { + return fmt.Errorf("couldn't get %s: %w", c.subsystem, err) + } + + for _, slab := range slabinfo.Slabs { + if c.slabNameFilter.ignored(slab.Name) { + continue + } + ch <- c.activeObjects(slab.Name, slab.ObjActive) + ch <- c.objects(slab.Name, slab.ObjNum) + ch <- c.objectSizeBytes(slab.Name, slab.ObjSize) + ch <- c.objectsPerSlab(slab.Name, slab.ObjPerSlab) + ch <- c.pagesPerSlab(slab.Name, slab.PagesPerSlab) + } + + return nil +} + +func (c *slabinfoCollector) activeObjects(label string, val int64) prometheus.Metric { + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "active_objects"), + "The number of objects that are currently active (i.e., in use).", + c.labels, nil) + + return prometheus.MustNewConstMetric( + desc, prometheus.GaugeValue, float64(val), label, + ) +} + +func (c *slabinfoCollector) objects(label string, val int64) prometheus.Metric { + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "objects"), + "The total number of allocated objects (i.e., objects that are both in use and not in use).", + c.labels, nil) + + return prometheus.MustNewConstMetric( + desc, prometheus.GaugeValue, float64(val), label, + ) +} + +func (c *slabinfoCollector) objectSizeBytes(label string, val int64) prometheus.Metric { + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "object_size_bytes"), + "The size of objects in this slab, in bytes.", + c.labels, nil) + + return prometheus.MustNewConstMetric( + desc, prometheus.GaugeValue, float64(val), label, + ) +} + +func (c *slabinfoCollector) objectsPerSlab(label string, val int64) prometheus.Metric { + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "objects_per_slab"), + "The number of objects stored in each slab.", + c.labels, nil) + + return prometheus.MustNewConstMetric( + desc, prometheus.GaugeValue, float64(val), label, + ) +} + +func (c *slabinfoCollector) pagesPerSlab(label string, val int64) prometheus.Metric { + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "pages_per_slab"), + "The number of pages allocated for each slab.", + c.labels, nil) + + return prometheus.MustNewConstMetric( + desc, prometheus.GaugeValue, float64(val), label, + ) +} diff --git a/collector/sockstat_linux.go b/collector/sockstat_linux.go index 8f5a99fab1..0ec782bcd1 100644 --- a/collector/sockstat_linux.go +++ b/collector/sockstat_linux.go @@ -11,17 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nosockstat +//go:build !nosockstat package collector import ( "errors" "fmt" + "log/slog" "os" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) @@ -34,7 +33,7 @@ const ( var pageSize = os.Getpagesize() type sockStatCollector struct { - logger log.Logger + logger *slog.Logger } func init() { @@ -42,7 +41,7 @@ func init() { } // NewSockStatCollector returns a new Collector exposing socket stats. -func NewSockStatCollector(logger log.Logger) (Collector, error) { +func NewSockStatCollector(logger *slog.Logger) (Collector, error) { return &sockStatCollector{logger}, nil } @@ -57,7 +56,7 @@ func (c *sockStatCollector) Update(ch chan<- prometheus.Metric) error { switch { case err == nil: case errors.Is(err, os.ErrNotExist): - level.Debug(c.logger).Log("msg", "IPv4 sockstat statistics not found, skipping") + c.logger.Debug("IPv4 sockstat statistics not found, skipping") default: return fmt.Errorf("failed to get IPv4 sockstat data: %w", err) } @@ -66,7 +65,7 @@ func (c *sockStatCollector) Update(ch chan<- prometheus.Metric) error { switch { case err == nil: case errors.Is(err, os.ErrNotExist): - level.Debug(c.logger).Log("msg", "IPv6 sockstat statistics not found, skipping") + c.logger.Debug("IPv6 sockstat statistics not found, skipping") default: return fmt.Errorf("failed to get IPv6 sockstat data: %w", err) } diff --git a/collector/softirqs_common.go b/collector/softirqs_common.go new file mode 100644 index 0000000000..2022a4b5b7 --- /dev/null +++ b/collector/softirqs_common.go @@ -0,0 +1,50 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && !nosoftirqs + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +type softirqsCollector struct { + fs procfs.FS + desc typedDesc + logger *slog.Logger +} + +func init() { + registerCollector("softirqs", defaultDisabled, NewSoftirqsCollector) +} + +// NewSoftirqsCollector returns a new Collector exposing softirq stats. +func NewSoftirqsCollector(logger *slog.Logger) (Collector, error) { + desc := typedDesc{prometheus.NewDesc( + namespace+"_softirqs_functions_total", + "Softirq counts per CPU.", + softirqLabelNames, nil, + ), prometheus.CounterValue} + + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &softirqsCollector{fs, desc, logger}, nil +} diff --git a/collector/softirqs_linux.go b/collector/softirqs_linux.go new file mode 100644 index 0000000000..0c64751049 --- /dev/null +++ b/collector/softirqs_linux.go @@ -0,0 +1,67 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nosoftirqs + +package collector + +import ( + "fmt" + "strconv" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + softirqLabelNames = []string{"cpu", "type"} +) + +func (c *softirqsCollector) Update(ch chan<- prometheus.Metric) (err error) { + softirqs, err := c.fs.Softirqs() + if err != nil { + return fmt.Errorf("couldn't get softirqs: %w", err) + } + + for cpuNo, value := range softirqs.Hi { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "HI") + } + for cpuNo, value := range softirqs.Timer { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "TIMER") + } + for cpuNo, value := range softirqs.NetTx { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "NET_TX") + } + for cpuNo, value := range softirqs.NetRx { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "NET_RX") + } + for cpuNo, value := range softirqs.Block { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "BLOCK") + } + for cpuNo, value := range softirqs.IRQPoll { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "IRQ_POLL") + } + for cpuNo, value := range softirqs.Tasklet { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "TASKLET") + } + for cpuNo, value := range softirqs.Sched { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "SCHED") + } + for cpuNo, value := range softirqs.HRTimer { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "HRTIMER") + } + for cpuNo, value := range softirqs.RCU { + ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "RCU") + } + + return err +} diff --git a/collector/softnet_linux.go b/collector/softnet_linux.go index befec8ed7b..2644bbf9a4 100644 --- a/collector/softnet_linux.go +++ b/collector/softnet_linux.go @@ -11,25 +11,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nosoftnet +//go:build !nosoftnet package collector import ( "fmt" + "log/slog" "strconv" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type softnetCollector struct { - fs procfs.FS - processed *prometheus.Desc - dropped *prometheus.Desc - timeSqueezed *prometheus.Desc - logger log.Logger + fs procfs.FS + processed *prometheus.Desc + dropped *prometheus.Desc + timeSqueezed *prometheus.Desc + cpuCollision *prometheus.Desc + receivedRps *prometheus.Desc + flowLimitCount *prometheus.Desc + softnetBacklogLen *prometheus.Desc + logger *slog.Logger } const ( @@ -41,7 +45,7 @@ func init() { } // NewSoftnetCollector returns a new Collector exposing softnet metrics. -func NewSoftnetCollector(logger log.Logger) (Collector, error) { +func NewSoftnetCollector(logger *slog.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) @@ -64,19 +68,41 @@ func NewSoftnetCollector(logger log.Logger) (Collector, error) { "Number of times processing packets ran out of quota", []string{"cpu"}, nil, ), + cpuCollision: prometheus.NewDesc( + prometheus.BuildFQName(namespace, softnetSubsystem, "cpu_collision_total"), + "Number of collision occur while obtaining device lock while transmitting", + []string{"cpu"}, nil, + ), + receivedRps: prometheus.NewDesc( + prometheus.BuildFQName(namespace, softnetSubsystem, "received_rps_total"), + "Number of times cpu woken up received_rps", + []string{"cpu"}, nil, + ), + flowLimitCount: prometheus.NewDesc( + prometheus.BuildFQName(namespace, softnetSubsystem, "flow_limit_count_total"), + "Number of times flow limit has been reached", + []string{"cpu"}, nil, + ), + softnetBacklogLen: prometheus.NewDesc( + prometheus.BuildFQName(namespace, softnetSubsystem, "backlog_len"), + "Softnet backlog status", + []string{"cpu"}, nil, + ), logger: logger, }, nil } // Update gets parsed softnet statistics using procfs. func (c *softnetCollector) Update(ch chan<- prometheus.Metric) error { + var cpu string + stats, err := c.fs.NetSoftnetStat() if err != nil { return fmt.Errorf("could not get softnet statistics: %w", err) } - for cpuNumber, cpuStats := range stats { - cpu := strconv.Itoa(cpuNumber) + for _, cpuStats := range stats { + cpu = strconv.FormatUint(uint64(cpuStats.Index), 10) ch <- prometheus.MustNewConstMetric( c.processed, @@ -96,6 +122,30 @@ func (c *softnetCollector) Update(ch chan<- prometheus.Metric) error { float64(cpuStats.TimeSqueezed), cpu, ) + ch <- prometheus.MustNewConstMetric( + c.cpuCollision, + prometheus.CounterValue, + float64(cpuStats.CPUCollision), + cpu, + ) + ch <- prometheus.MustNewConstMetric( + c.receivedRps, + prometheus.CounterValue, + float64(cpuStats.ReceivedRps), + cpu, + ) + ch <- prometheus.MustNewConstMetric( + c.flowLimitCount, + prometheus.CounterValue, + float64(cpuStats.FlowLimitCount), + cpu, + ) + ch <- prometheus.MustNewConstMetric( + c.softnetBacklogLen, + prometheus.GaugeValue, + float64(cpuStats.SoftnetBacklogLen), + cpu, + ) } return nil diff --git a/collector/stat_linux.go b/collector/stat_linux.go index 667d9621c8..199390620e 100644 --- a/collector/stat_linux.go +++ b/collector/stat_linux.go @@ -11,14 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nostat +//go:build !nostat package collector import ( "fmt" + "log/slog" - "github.com/go-kit/kit/log" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) @@ -31,15 +32,18 @@ type statCollector struct { btime *prometheus.Desc procsRunning *prometheus.Desc procsBlocked *prometheus.Desc - logger log.Logger + softIRQ *prometheus.Desc + logger *slog.Logger } +var statSoftirqFlag = kingpin.Flag("collector.stat.softirq", "Export softirq calls per vector").Default("false").Bool() + func init() { registerCollector("stat", defaultEnabled, NewStatCollector) } // NewStatCollector returns a new Collector exposing kernel/system statistics. -func NewStatCollector(logger log.Logger) (Collector, error) { +func NewStatCollector(logger *slog.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) @@ -76,6 +80,11 @@ func NewStatCollector(logger log.Logger) (Collector, error) { "Number of processes blocked waiting for I/O to complete.", nil, nil, ), + softIRQ: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "softirqs_total"), + "Number of softirq calls.", + []string{"vector"}, nil, + ), logger: logger, }, nil } @@ -96,5 +105,27 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric(c.procsRunning, prometheus.GaugeValue, float64(stats.ProcessesRunning)) ch <- prometheus.MustNewConstMetric(c.procsBlocked, prometheus.GaugeValue, float64(stats.ProcessesBlocked)) + if *statSoftirqFlag { + si := stats.SoftIRQ + + for _, vec := range []struct { + name string + value uint64 + }{ + {name: "hi", value: si.Hi}, + {name: "timer", value: si.Timer}, + {name: "net_tx", value: si.NetTx}, + {name: "net_rx", value: si.NetRx}, + {name: "block", value: si.Block}, + {name: "block_iopoll", value: si.BlockIoPoll}, + {name: "tasklet", value: si.Tasklet}, + {name: "sched", value: si.Sched}, + {name: "hrtimer", value: si.Hrtimer}, + {name: "rcu", value: si.Rcu}, + } { + ch <- prometheus.MustNewConstMetric(c.softIRQ, prometheus.CounterValue, float64(vec.value), vec.name) + } + } + return nil } diff --git a/collector/supervisord.go b/collector/supervisord.go index 6a6b90910c..b3c4c3f68f 100644 --- a/collector/supervisord.go +++ b/collector/supervisord.go @@ -11,27 +11,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nosupervisord +//go:build !nosupervisord package collector import ( "context" "fmt" + "log/slog" "net" "net/http" "net/url" "time" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/mattn/go-xmlrpc" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" ) var ( - supervisordURL = kingpin.Flag("collector.supervisord.url", "XML RPC endpoint.").Default("http://localhost:9001/RPC2").String() + supervisordURL = kingpin.Flag("collector.supervisord.url", "XML RPC endpoint.").Default("http://localhost:9001/RPC2").Envar("SUPERVISORD_URL").String() xrpc *xmlrpc.Client ) @@ -40,7 +39,7 @@ type supervisordCollector struct { stateDesc *prometheus.Desc exitStatusDesc *prometheus.Desc startTimeDesc *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -48,7 +47,7 @@ func init() { } // NewSupervisordCollector returns a new Collector exposing supervisord statistics. -func NewSupervisordCollector(logger log.Logger) (Collector, error) { +func NewSupervisordCollector(logger *slog.Logger) (Collector, error) { var ( subsystem = "supervisord" labelNames = []string{"name", "group"} @@ -68,6 +67,8 @@ func NewSupervisordCollector(logger log.Logger) (Collector, error) { xrpc = xmlrpc.NewClient(*supervisordURL) } + logger.Warn("This collector is deprecated and will be removed in the next major version release.") + return &supervisordCollector{ upDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "up"), @@ -171,7 +172,7 @@ func (c *supervisordCollector) Update(ch chan<- prometheus.Metric) error { } else { ch <- prometheus.MustNewConstMetric(c.upDesc, prometheus.GaugeValue, 0, labels...) } - level.Debug(c.logger).Log("msg", "process info", "group", info.Group, "name", info.Name, "state", info.StateName, "pid", info.PID) + c.logger.Debug("process info", "group", info.Group, "name", info.Name, "state", info.StateName, "pid", info.PID) } return nil diff --git a/collector/swap_linux.go b/collector/swap_linux.go new file mode 100644 index 0000000000..9a275e1614 --- /dev/null +++ b/collector/swap_linux.go @@ -0,0 +1,129 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noswap + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +const ( + swapSubsystem = "swap" +) + +var swapLabelNames = []string{"device", "swap_type"} + +type swapCollector struct { + fs procfs.FS + logger *slog.Logger +} + +func init() { + registerCollector("swap", defaultDisabled, NewSwapCollector) +} + +// NewSwapCollector returns a new Collector exposing swap device statistics. +func NewSwapCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &swapCollector{ + fs: fs, + logger: logger, + }, nil +} + +type SwapsEntry struct { + Device string + Type string + Priority int + Size int + Used int +} + +func (c *swapCollector) getSwapInfo() ([]SwapsEntry, error) { + swaps, err := c.fs.Swaps() + if err != nil { + return nil, fmt.Errorf("couldn't get proc/swap information: %w", err) + } + + metrics := make([]SwapsEntry, 0, len(swaps)) + + for _, swap := range swaps { + metrics = append(metrics, SwapsEntry{Device: swap.Filename, Type: swap.Type, + Priority: swap.Priority, Size: swap.Size, Used: swap.Used}) + } + + return metrics, nil +} + +func (c *swapCollector) Update(ch chan<- prometheus.Metric) error { + swaps, err := c.getSwapInfo() + if err != nil { + return fmt.Errorf("couldn't get swap information: %w", err) + } + + for _, swap := range swaps { + swapLabelValues := []string{swap.Device, swap.Type} + + // Export swap size in bytes + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, swapSubsystem, "size_bytes"), + "Swap device size in bytes.", + []string{"device", "swap_type"}, nil, + ), + prometheus.GaugeValue, + // Size is provided in kbytes (not bytes), translate to bytes + // see https://github.com/torvalds/linux/blob/fd94619c43360eb44d28bd3ef326a4f85c600a07/mm/swapfile.c#L3079-L3080 + float64(swap.Size*1024), + swapLabelValues..., + ) + + // Export swap used in bytes + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, swapSubsystem, "used_bytes"), + "Swap device used in bytes.", + swapLabelNames, nil, + ), + prometheus.GaugeValue, + // Swap used is also provided in kbytes, translate to bytes + float64(swap.Used*1024), + swapLabelValues..., + ) + + // Export swap priority + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, swapSubsystem, "priority"), + "Swap device priority.", + swapLabelNames, nil, + ), + prometheus.GaugeValue, + float64(swap.Priority), + swapLabelValues..., + ) + + } + + return nil +} diff --git a/collector/swap_linux_test.go b/collector/swap_linux_test.go new file mode 100644 index 0000000000..f4e49c5050 --- /dev/null +++ b/collector/swap_linux_test.go @@ -0,0 +1,57 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noswap + +package collector + +import ( + "io" + "log/slog" + "testing" +) + +func TestSwap(t *testing.T) { + *procPath = "fixtures/proc" + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + + collector, err := NewSwapCollector(logger) + if err != nil { + panic(err) + } + + swapInfo, err := collector.(*swapCollector).getSwapInfo() + if err != nil { + panic(err) + } + + if want, got := "/dev/zram0", swapInfo[0].Device; want != got { + t.Errorf("want swap device %s, got %s", want, got) + } + + if want, got := "partition", swapInfo[0].Type; want != got { + t.Errorf("want swap type %s, got %s", want, got) + } + + if want, got := 100, swapInfo[0].Priority; want != got { + t.Errorf("want swap priority %d, got %d", want, got) + } + + if want, got := 8388604, swapInfo[0].Size; want != got { + t.Errorf("want swap size %d, got %d", want, got) + } + + if want, got := 76, swapInfo[0].Used; want != got { + t.Errorf("want swpa used %d, got %d", want, got) + } +} diff --git a/collector/sysctl_bsd.go b/collector/sysctl_bsd.go index a671bc2cdd..e3c8dc5cf4 100644 --- a/collector/sysctl_bsd.go +++ b/collector/sysctl_bsd.go @@ -11,8 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build freebsd dragonfly openbsd netbsd darwin -// +build cgo +//go:build (freebsd || dragonfly || openbsd || netbsd || darwin) && cgo package collector @@ -35,7 +34,6 @@ const ( // Default to uint32. bsdSysctlTypeUint32 bsdSysctlType = iota bsdSysctlTypeUint64 - bsdSysctlTypeStructTimeval bsdSysctlTypeCLong ) @@ -74,8 +72,6 @@ func (b bsdSysctl) Value() (float64, error) { case bsdSysctlTypeUint64: tmp64, err = unix.SysctlUint64(b.mib) tmpf64 = float64(tmp64) - case bsdSysctlTypeStructTimeval: - tmpf64, err = b.getStructTimeval() case bsdSysctlTypeCLong: tmpf64, err = b.getCLong() } @@ -91,28 +87,6 @@ func (b bsdSysctl) Value() (float64, error) { return tmpf64, nil } -func (b bsdSysctl) getStructTimeval() (float64, error) { - raw, err := unix.SysctlRaw(b.mib) - if err != nil { - return 0, err - } - - if len(raw) != int(unsafe.Sizeof(unix.Timeval{})) { - // Shouldn't get here. - return 0, fmt.Errorf( - "length of bytes received from sysctl (%d) does not match expected bytes (%d)", - len(raw), - unsafe.Sizeof(unix.Timeval{}), - ) - } - - tv := *(*unix.Timeval)(unsafe.Pointer(&raw[0])) - - // This conversion maintains the usec precision. Using the time - // package did not. - return (float64(tv.Sec) + (float64(tv.Usec) / float64(1000*1000))), nil -} - func (b bsdSysctl) getCLong() (float64, error) { raw, err := unix.SysctlRaw(b.mib) if err != nil { diff --git a/collector/sysctl_linux.go b/collector/sysctl_linux.go new file mode 100644 index 0000000000..0915b4e6f7 --- /dev/null +++ b/collector/sysctl_linux.go @@ -0,0 +1,218 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "fmt" + "log/slog" + "strconv" + "strings" + + "github.com/alecthomas/kingpin/v2" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +var ( + sysctlInclude = kingpin.Flag("collector.sysctl.include", "Select sysctl metrics to include").Strings() + sysctlIncludeInfo = kingpin.Flag("collector.sysctl.include-info", "Select sysctl metrics to include as info metrics").Strings() + + sysctlInfoDesc = prometheus.NewDesc(prometheus.BuildFQName(namespace, "sysctl", "info"), "sysctl info", []string{"name", "value", "index"}, nil) +) + +type sysctlCollector struct { + fs procfs.FS + logger *slog.Logger + sysctls []*sysctl +} + +func init() { + registerCollector("sysctl", defaultDisabled, NewSysctlCollector) +} + +func NewSysctlCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + c := &sysctlCollector{ + logger: logger, + fs: fs, + sysctls: []*sysctl{}, + } + + for _, include := range *sysctlInclude { + sysctl, err := newSysctl(include, true) + if err != nil { + return nil, err + } + c.sysctls = append(c.sysctls, sysctl) + } + + for _, include := range *sysctlIncludeInfo { + sysctl, err := newSysctl(include, false) + if err != nil { + return nil, err + } + c.sysctls = append(c.sysctls, sysctl) + } + return c, nil +} + +func (c *sysctlCollector) Update(ch chan<- prometheus.Metric) error { + for _, sysctl := range c.sysctls { + metrics, err := c.newMetrics(sysctl) + if err != nil { + return err + } + + for _, metric := range metrics { + ch <- metric + } + } + return nil +} + +func (c *sysctlCollector) newMetrics(s *sysctl) ([]prometheus.Metric, error) { + var ( + values any + length int + err error + ) + + if s.numeric { + values, err = c.fs.SysctlInts(s.name) + if err != nil { + return nil, fmt.Errorf("error obtaining sysctl info: %w", err) + } + length = len(values.([]int)) + } else { + values, err = c.fs.SysctlStrings(s.name) + if err != nil { + return nil, fmt.Errorf("error obtaining sysctl info: %w", err) + } + length = len(values.([]string)) + } + + switch length { + case 0: + return nil, fmt.Errorf("sysctl %s has no values", s.name) + case 1: + if len(s.keys) > 0 { + return nil, fmt.Errorf("sysctl %s has only one value, but expected %v", s.name, s.keys) + } + return []prometheus.Metric{s.newConstMetric(values)}, nil + + default: + + if len(s.keys) == 0 { + return s.newIndexedMetrics(values), nil + } + + if length != len(s.keys) { + return nil, fmt.Errorf("sysctl %s has %d keys but only %d defined in f lag", s.name, length, len(s.keys)) + } + + return s.newMappedMetrics(values) + } +} + +type sysctl struct { + numeric bool + name string + keys []string +} + +func newSysctl(include string, numeric bool) (*sysctl, error) { + parts := strings.SplitN(include, ":", 2) + s := &sysctl{ + numeric: numeric, + name: parts[0], + } + if len(parts) == 2 { + s.keys = strings.Split(parts[1], ",") + s.name = parts[0] + } + return s, nil +} + +func (s *sysctl) metricName() string { + return SanitizeMetricName(s.name) +} + +func (s *sysctl) newConstMetric(v any) prometheus.Metric { + if s.numeric { + return prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, "sysctl", s.metricName()), + fmt.Sprintf("sysctl %s", s.name), + nil, nil), + prometheus.UntypedValue, + float64(v.([]int)[0])) + } + return prometheus.MustNewConstMetric( + sysctlInfoDesc, + prometheus.GaugeValue, + 1.0, + s.name, + v.([]string)[0], + "0", + ) +} + +func (s *sysctl) newIndexedMetrics(v any) []prometheus.Metric { + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, "sysctl", s.metricName()), + fmt.Sprintf("sysctl %s", s.name), + []string{"index"}, nil, + ) + switch values := v.(type) { + case []int: + metrics := make([]prometheus.Metric, len(values)) + for i, n := range values { + metrics[i] = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(n), strconv.Itoa(i)) + } + return metrics + case []string: + metrics := make([]prometheus.Metric, len(values)) + for i, str := range values { + metrics[i] = prometheus.MustNewConstMetric(sysctlInfoDesc, prometheus.GaugeValue, 1.0, s.name, str, strconv.Itoa(i)) + } + return metrics + default: + panic(fmt.Sprintf("unexpected type %T", values)) + } +} + +func (s *sysctl) newMappedMetrics(v any) ([]prometheus.Metric, error) { + switch values := v.(type) { + case []int: + metrics := make([]prometheus.Metric, len(values)) + for i, n := range values { + key := s.keys[i] + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, "sysctl", s.metricName()+"_"+key), + fmt.Sprintf("sysctl %s, field %d", s.name, i), + nil, + nil, + ) + metrics[i] = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(n)) + } + return metrics, nil + case []string: + return nil, fmt.Errorf("mapped sysctl string values not supported") + default: + return nil, fmt.Errorf("unexpected type %T", values) + } +} diff --git a/collector/sysctl_openbsd_amd64.go b/collector/sysctl_openbsd_amd64.go new file mode 100644 index 0000000000..2b2988934e --- /dev/null +++ b/collector/sysctl_openbsd_amd64.go @@ -0,0 +1,87 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +func int8ToString(a []int8) string { + buf := make([]byte, len(a)) + for i, v := range a { + if byte(v) == 0 { + buf = buf[:i] + break + } + buf[i] = byte(v) + } + return string(buf) +} + +// unix._C_int +type _C_int int32 + +var _zero uintptr + +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case unix.EAGAIN: + return syscall.EAGAIN + case unix.EINVAL: + return syscall.EINVAL + case unix.ENOENT: + return syscall.ENOENT + } + return e +} + +func _sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + for { + _, _, e1 := unix.Syscall6(unix.SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + if err != unix.EINTR { + return + } + } + return +} + +func sysctl(mib []_C_int) ([]byte, error) { + n := uintptr(0) + if err := _sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + buf := make([]byte, n) + if err := _sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return nil, err + } + return buf[:n], nil +} diff --git a/collector/systemd_linux.go b/collector/systemd_linux.go index b374b3744c..51b82c0186 100644 --- a/collector/systemd_linux.go +++ b/collector/systemd_linux.go @@ -11,13 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nosystemd +//go:build !nosystemd package collector import ( + "context" "errors" "fmt" + "log/slog" "math" "regexp" "strconv" @@ -25,11 +27,9 @@ import ( "sync" "time" - "github.com/coreos/go-systemd/dbus" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" + "github.com/coreos/go-systemd/v22/dbus" "github.com/prometheus/client_golang/prometheus" - kingpin "gopkg.in/alecthomas/kingpin.v2" ) const ( @@ -40,14 +40,24 @@ const ( ) var ( - unitInclude = kingpin.Flag("collector.systemd.unit-include", "Regexp of systemd units to include. Units must both match include and not match exclude to be included.").Default(".+").String() - oldUnitInclude = kingpin.Flag("collector.systemd.unit-whitelist", "DEPRECATED: Use --collector.systemd.unit-include").Hidden().String() - unitExclude = kingpin.Flag("collector.systemd.unit-exclude", "Regexp of systemd units to exclude. Units must both match include and not match exclude to be included.").Default(".+\\.(automount|device|mount|scope|slice)").String() - oldUnitExclude = kingpin.Flag("collector.systemd.unit-blacklist", "DEPRECATED: Use collector.systemd.unit-exclude").Hidden().String() + systemdUnitIncludeSet bool + systemdUnitInclude = kingpin.Flag("collector.systemd.unit-include", "Regexp of systemd units to include. Units must both match include and not match exclude to be included.").Default(".+").PreAction(func(c *kingpin.ParseContext) error { + systemdUnitIncludeSet = true + return nil + }).String() + oldSystemdUnitInclude = kingpin.Flag("collector.systemd.unit-whitelist", "DEPRECATED: Use --collector.systemd.unit-include").Hidden().String() + systemdUnitExcludeSet bool + systemdUnitExclude = kingpin.Flag("collector.systemd.unit-exclude", "Regexp of systemd units to exclude. Units must both match include and not match exclude to be included.").Default(".+\\.(automount|device|mount|scope|slice)").PreAction(func(c *kingpin.ParseContext) error { + systemdUnitExcludeSet = true + return nil + }).String() + oldSystemdUnitExclude = kingpin.Flag("collector.systemd.unit-blacklist", "DEPRECATED: Use collector.systemd.unit-exclude").Hidden().String() systemdPrivate = kingpin.Flag("collector.systemd.private", "Establish a private, direct connection to systemd without dbus (Strongly discouraged since it requires root. For testing purposes only).").Hidden().Bool() enableTaskMetrics = kingpin.Flag("collector.systemd.enable-task-metrics", "Enables service unit tasks metrics unit_tasks_current and unit_tasks_max").Bool() enableRestartsMetrics = kingpin.Flag("collector.systemd.enable-restarts-metrics", "Enables service unit metric service_restart_total").Bool() enableStartTimeMetrics = kingpin.Flag("collector.systemd.enable-start-time-metrics", "Enables service unit metric unit_start_time_seconds").Bool() + + systemdVersionRE = regexp.MustCompile(`[0-9]{3,}(\.[0-9]+)?`) ) type systemdCollector struct { @@ -63,10 +73,11 @@ type systemdCollector struct { socketCurrentConnectionsDesc *prometheus.Desc socketRefusedConnectionsDesc *prometheus.Desc systemdVersionDesc *prometheus.Desc - systemdVersion int - unitIncludePattern *regexp.Regexp - unitExcludePattern *regexp.Regexp - logger log.Logger + virtualizationDesc *prometheus.Desc + // Use regexps for more flexibility than device_filter.go allows + systemdUnitIncludePattern *regexp.Regexp + systemdUnitExcludePattern *regexp.Regexp + logger *slog.Logger } var unitStatesName = []string{"active", "activating", "deactivating", "inactive", "failed"} @@ -76,7 +87,7 @@ func init() { } // NewSystemdCollector returns a new Collector exposing systemd statistics. -func NewSystemdCollector(logger log.Logger) (Collector, error) { +func NewSystemdCollector(logger *slog.Logger) (Collector, error) { const subsystem = "systemd" unitDesc := prometheus.NewDesc( @@ -120,34 +131,31 @@ func NewSystemdCollector(logger log.Logger) (Collector, error) { "Total number of refused socket connections", []string{"name"}, nil) systemdVersionDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "version"), - "Detected systemd version", []string{}, nil) - - if *oldUnitExclude != "" { - if *unitExclude == "" { - level.Warn(logger).Log("msg", "--collector.systemd.unit-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-exclude") - *unitExclude = *oldUnitExclude + "Detected systemd version", []string{"version"}, nil) + virtualizationDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "virtualization_info"), + "Detected virtualization technology", []string{"virtualization_type"}, nil) + + if *oldSystemdUnitExclude != "" { + if !systemdUnitExcludeSet { + logger.Warn("--collector.systemd.unit-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-exclude") + *systemdUnitExclude = *oldSystemdUnitExclude } else { return nil, errors.New("--collector.systemd.unit-blacklist and --collector.systemd.unit-exclude are mutually exclusive") } } - if *oldUnitInclude != "" { - if *unitInclude == "" { - level.Warn(logger).Log("msg", "--collector.systemd.unit-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-include") - *unitInclude = *oldUnitInclude + if *oldSystemdUnitInclude != "" { + if !systemdUnitIncludeSet { + logger.Warn("--collector.systemd.unit-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-include") + *systemdUnitInclude = *oldSystemdUnitInclude } else { return nil, errors.New("--collector.systemd.unit-whitelist and --collector.systemd.unit-include are mutually exclusive") } } - level.Info(logger).Log("msg", "Parsed flag --collector.systemd.unit-include", "flag", *unitInclude) - unitIncludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitInclude)) - level.Info(logger).Log("msg", "Parsed flag --collector.systemd.unit-exclude", "flag", *unitExclude) - unitExcludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitExclude)) - - systemdVersion := getSystemdVersion(logger) - if systemdVersion < minSystemdVersionSystemState { - level.Warn(logger).Log("msg", "Detected systemd version is lower than minimum", "current", systemdVersion, "minimum", minSystemdVersionSystemState) - level.Warn(logger).Log("msg", "Some systemd state and timer metrics will not be available") - } + logger.Info("Parsed flag --collector.systemd.unit-include", "flag", *systemdUnitInclude) + systemdUnitIncludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *systemdUnitInclude)) + logger.Info("Parsed flag --collector.systemd.unit-exclude", "flag", *systemdUnitExclude) + systemdUnitExcludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *systemdUnitExclude)) return &systemdCollector{ unitDesc: unitDesc, @@ -162,9 +170,9 @@ func NewSystemdCollector(logger log.Logger) (Collector, error) { socketCurrentConnectionsDesc: socketCurrentConnectionsDesc, socketRefusedConnectionsDesc: socketRefusedConnectionsDesc, systemdVersionDesc: systemdVersionDesc, - systemdVersion: systemdVersion, - unitIncludePattern: unitIncludePattern, - unitExcludePattern: unitExcludePattern, + virtualizationDesc: virtualizationDesc, + systemdUnitIncludePattern: systemdUnitIncludePattern, + systemdUnitExcludePattern: systemdUnitExcludePattern, logger: logger, }, nil } @@ -179,20 +187,39 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error { } defer conn.Close() + systemdVersion, systemdVersionFull := c.getSystemdVersion(conn) + if systemdVersion < minSystemdVersionSystemState { + c.logger.Debug("Detected systemd version is lower than minimum, some systemd state and timer metrics will not be available", "current", systemdVersion, "minimum", minSystemdVersionSystemState) + } + ch <- prometheus.MustNewConstMetric( + c.systemdVersionDesc, + prometheus.GaugeValue, + systemdVersion, + systemdVersionFull, + ) + + systemdVirtualization := c.getSystemdVirtualization(conn) + ch <- prometheus.MustNewConstMetric( + c.virtualizationDesc, + prometheus.GaugeValue, + 1.0, + systemdVirtualization, + ) + allUnits, err := c.getAllUnits(conn) if err != nil { return fmt.Errorf("couldn't get units: %w", err) } - level.Debug(c.logger).Log("msg", "getAllUnits took", "duration_seconds", time.Since(begin).Seconds()) + c.logger.Debug("getAllUnits took", "duration_seconds", time.Since(begin).Seconds()) begin = time.Now() summary := summarizeUnits(allUnits) c.collectSummaryMetrics(ch, summary) - level.Debug(c.logger).Log("msg", "collectSummaryMetrics took", "duration_seconds", time.Since(begin).Seconds()) + c.logger.Debug("collectSummaryMetrics took", "duration_seconds", time.Since(begin).Seconds()) begin = time.Now() - units := filterUnits(allUnits, c.unitIncludePattern, c.unitExcludePattern, c.logger) - level.Debug(c.logger).Log("msg", "filterUnits took", "duration_seconds", time.Since(begin).Seconds()) + units := filterUnits(allUnits, c.systemdUnitIncludePattern, c.systemdUnitExcludePattern, c.logger) + c.logger.Debug("filterUnits took", "duration_seconds", time.Since(begin).Seconds()) var wg sync.WaitGroup defer wg.Wait() @@ -200,18 +227,18 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error { wg.Add(1) go func() { defer wg.Done() - begin = time.Now() + begin := time.Now() c.collectUnitStatusMetrics(conn, ch, units) - level.Debug(c.logger).Log("msg", "collectUnitStatusMetrics took", "duration_seconds", time.Since(begin).Seconds()) + c.logger.Debug("collectUnitStatusMetrics took", "duration_seconds", time.Since(begin).Seconds()) }() if *enableStartTimeMetrics { wg.Add(1) go func() { defer wg.Done() - begin = time.Now() + begin := time.Now() c.collectUnitStartTimeMetrics(conn, ch, units) - level.Debug(c.logger).Log("msg", "collectUnitStartTimeMetrics took", "duration_seconds", time.Since(begin).Seconds()) + c.logger.Debug("collectUnitStartTimeMetrics took", "duration_seconds", time.Since(begin).Seconds()) }() } @@ -219,39 +246,36 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error { wg.Add(1) go func() { defer wg.Done() - begin = time.Now() + begin := time.Now() c.collectUnitTasksMetrics(conn, ch, units) - level.Debug(c.logger).Log("msg", "collectUnitTasksMetrics took", "duration_seconds", time.Since(begin).Seconds()) + c.logger.Debug("collectUnitTasksMetrics took", "duration_seconds", time.Since(begin).Seconds()) }() } - if c.systemdVersion >= minSystemdVersionSystemState { + if systemdVersion >= minSystemdVersionSystemState { wg.Add(1) go func() { defer wg.Done() - begin = time.Now() + begin := time.Now() c.collectTimers(conn, ch, units) - level.Debug(c.logger).Log("msg", "collectTimers took", "duration_seconds", time.Since(begin).Seconds()) + c.logger.Debug("collectTimers took", "duration_seconds", time.Since(begin).Seconds()) }() } wg.Add(1) go func() { defer wg.Done() - begin = time.Now() + begin := time.Now() c.collectSockets(conn, ch, units) - level.Debug(c.logger).Log("msg", "collectSockets took", "duration_seconds", time.Since(begin).Seconds()) + c.logger.Debug("collectSockets took", "duration_seconds", time.Since(begin).Seconds()) }() - if c.systemdVersion >= minSystemdVersionSystemState { - begin = time.Now() + if systemdVersion >= minSystemdVersionSystemState { + begin := time.Now() err = c.collectSystemState(conn, ch) - level.Debug(c.logger).Log("msg", "collectSystemState took", "duration_seconds", time.Since(begin).Seconds()) + c.logger.Debug("collectSystemState took", "duration_seconds", time.Since(begin).Seconds()) } - ch <- prometheus.MustNewConstMetric( - c.systemdVersionDesc, prometheus.GaugeValue, float64(c.systemdVersion)) - return err } @@ -259,16 +283,16 @@ func (c *systemdCollector) collectUnitStatusMetrics(conn *dbus.Conn, ch chan<- p for _, unit := range units { serviceType := "" if strings.HasSuffix(unit.Name, ".service") { - serviceTypeProperty, err := conn.GetUnitTypeProperty(unit.Name, "Service", "Type") + serviceTypeProperty, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Service", "Type") if err != nil { - level.Debug(c.logger).Log("msg", "couldn't get unit type", "unit", unit.Name, "err", err) + c.logger.Debug("couldn't get unit type", "unit", unit.Name, "err", err) } else { serviceType = serviceTypeProperty.Value.Value().(string) } } else if strings.HasSuffix(unit.Name, ".mount") { - serviceTypeProperty, err := conn.GetUnitTypeProperty(unit.Name, "Mount", "Type") + serviceTypeProperty, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Mount", "Type") if err != nil { - level.Debug(c.logger).Log("msg", "couldn't get unit type", "unit", unit.Name, "err", err) + c.logger.Debug("couldn't get unit type", "unit", unit.Name, "err", err) } else { serviceType = serviceTypeProperty.Value.Value().(string) } @@ -284,9 +308,9 @@ func (c *systemdCollector) collectUnitStatusMetrics(conn *dbus.Conn, ch chan<- p } if *enableRestartsMetrics && strings.HasSuffix(unit.Name, ".service") { // NRestarts wasn't added until systemd 235. - restartsCount, err := conn.GetUnitTypeProperty(unit.Name, "Service", "NRestarts") + restartsCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Service", "NRestarts") if err != nil { - level.Debug(c.logger).Log("msg", "couldn't get unit NRestarts", "unit", unit.Name, "err", err) + c.logger.Debug("couldn't get unit NRestarts", "unit", unit.Name, "err", err) } else { ch <- prometheus.MustNewConstMetric( c.nRestartsDesc, prometheus.CounterValue, @@ -302,18 +326,18 @@ func (c *systemdCollector) collectSockets(conn *dbus.Conn, ch chan<- prometheus. continue } - acceptedConnectionCount, err := conn.GetUnitTypeProperty(unit.Name, "Socket", "NAccepted") + acceptedConnectionCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Socket", "NAccepted") if err != nil { - level.Debug(c.logger).Log("msg", "couldn't get unit NAccepted", "unit", unit.Name, "err", err) + c.logger.Debug("couldn't get unit NAccepted", "unit", unit.Name, "err", err) continue } ch <- prometheus.MustNewConstMetric( c.socketAcceptedConnectionsDesc, prometheus.CounterValue, float64(acceptedConnectionCount.Value.Value().(uint32)), unit.Name) - currentConnectionCount, err := conn.GetUnitTypeProperty(unit.Name, "Socket", "NConnections") + currentConnectionCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Socket", "NConnections") if err != nil { - level.Debug(c.logger).Log("msg", "couldn't get unit NConnections", "unit", unit.Name, "err", err) + c.logger.Debug("couldn't get unit NConnections", "unit", unit.Name, "err", err) continue } ch <- prometheus.MustNewConstMetric( @@ -321,10 +345,8 @@ func (c *systemdCollector) collectSockets(conn *dbus.Conn, ch chan<- prometheus. float64(currentConnectionCount.Value.Value().(uint32)), unit.Name) // NRefused wasn't added until systemd 239. - refusedConnectionCount, err := conn.GetUnitTypeProperty(unit.Name, "Socket", "NRefused") - if err != nil { - //log.Debugf("couldn't get unit '%s' NRefused: %s", unit.Name, err) - } else { + refusedConnectionCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Socket", "NRefused") + if err == nil { ch <- prometheus.MustNewConstMetric( c.socketRefusedConnectionsDesc, prometheus.GaugeValue, float64(refusedConnectionCount.Value.Value().(uint32)), unit.Name) @@ -339,9 +361,9 @@ func (c *systemdCollector) collectUnitStartTimeMetrics(conn *dbus.Conn, ch chan< if unit.ActiveState != "active" { startTimeUsec = 0 } else { - timestampValue, err := conn.GetUnitProperty(unit.Name, "ActiveEnterTimestamp") + timestampValue, err := conn.GetUnitPropertyContext(context.TODO(), unit.Name, "ActiveEnterTimestamp") if err != nil { - level.Debug(c.logger).Log("msg", "couldn't get unit StartTimeUsec", "unit", unit.Name, "err", err) + c.logger.Debug("couldn't get unit StartTimeUsec", "unit", unit.Name, "err", err) continue } startTimeUsec = timestampValue.Value.Value().(uint64) @@ -357,9 +379,9 @@ func (c *systemdCollector) collectUnitTasksMetrics(conn *dbus.Conn, ch chan<- pr var val uint64 for _, unit := range units { if strings.HasSuffix(unit.Name, ".service") { - tasksCurrentCount, err := conn.GetUnitTypeProperty(unit.Name, "Service", "TasksCurrent") + tasksCurrentCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Service", "TasksCurrent") if err != nil { - level.Debug(c.logger).Log("msg", "couldn't get unit TasksCurrent", "unit", unit.Name, "err", err) + c.logger.Debug("couldn't get unit TasksCurrent", "unit", unit.Name, "err", err) } else { val = tasksCurrentCount.Value.Value().(uint64) // Don't set if tasksCurrent if dbus reports MaxUint64. @@ -369,9 +391,9 @@ func (c *systemdCollector) collectUnitTasksMetrics(conn *dbus.Conn, ch chan<- pr float64(val), unit.Name) } } - tasksMaxCount, err := conn.GetUnitTypeProperty(unit.Name, "Service", "TasksMax") + tasksMaxCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Service", "TasksMax") if err != nil { - level.Debug(c.logger).Log("msg", "couldn't get unit TasksMax", "unit", unit.Name, "err", err) + c.logger.Debug("couldn't get unit TasksMax", "unit", unit.Name, "err", err) } else { val = tasksMaxCount.Value.Value().(uint64) // Don't set if tasksMax if dbus reports MaxUint64. @@ -391,9 +413,9 @@ func (c *systemdCollector) collectTimers(conn *dbus.Conn, ch chan<- prometheus.M continue } - lastTriggerValue, err := conn.GetUnitTypeProperty(unit.Name, "Timer", "LastTriggerUSec") + lastTriggerValue, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Timer", "LastTriggerUSec") if err != nil { - level.Debug(c.logger).Log("msg", "couldn't get unit LastTriggerUSec", "unit", unit.Name, "err", err) + c.logger.Debug("couldn't get unit LastTriggerUSec", "unit", unit.Name, "err", err) continue } @@ -425,9 +447,9 @@ func (c *systemdCollector) collectSystemState(conn *dbus.Conn, ch chan<- prometh func newSystemdDbusConn() (*dbus.Conn, error) { if *systemdPrivate { - return dbus.NewSystemdConnection() + return dbus.NewSystemdConnectionContext(context.TODO()) } - return dbus.New() + return dbus.NewWithContext(context.TODO()) } type unit struct { @@ -435,7 +457,7 @@ type unit struct { } func (c *systemdCollector) getAllUnits(conn *dbus.Conn) ([]unit, error) { - allUnits, err := conn.ListUnits() + allUnits, err := conn.ListUnitsContext(context.TODO()) if err != nil { return nil, err } @@ -465,38 +487,49 @@ func summarizeUnits(units []unit) map[string]float64 { return summarized } -func filterUnits(units []unit, includePattern, excludePattern *regexp.Regexp, logger log.Logger) []unit { +func filterUnits(units []unit, includePattern, excludePattern *regexp.Regexp, logger *slog.Logger) []unit { filtered := make([]unit, 0, len(units)) for _, unit := range units { if includePattern.MatchString(unit.Name) && !excludePattern.MatchString(unit.Name) && unit.LoadState == "loaded" { - level.Debug(logger).Log("msg", "Adding unit", "unit", unit.Name) + logger.Debug("Adding unit", "unit", unit.Name) filtered = append(filtered, unit) } else { - level.Debug(logger).Log("msg", "Ignoring unit", "unit", unit.Name) + logger.Debug("Ignoring unit", "unit", unit.Name) } } return filtered } -func getSystemdVersion(logger log.Logger) int { - conn, err := newSystemdDbusConn() +func (c *systemdCollector) getSystemdVersion(conn *dbus.Conn) (float64, string) { + version, err := conn.GetManagerProperty("Version") if err != nil { - level.Warn(logger).Log("msg", "Unable to get systemd dbus connection, defaulting systemd version to 0", "err", err) - return 0 + c.logger.Debug("Unable to get systemd version property, defaulting to 0") + return 0, "" } - defer conn.Close() - version, err := conn.GetManagerProperty("Version") + version = strings.TrimPrefix(strings.TrimSuffix(version, `"`), `"`) + c.logger.Debug("Got systemd version", "version", version) + parsedVersion := systemdVersionRE.FindString(version) + v, err := strconv.ParseFloat(parsedVersion, 64) if err != nil { - level.Warn(logger).Log("msg", "Unable to get systemd version property, defaulting to 0") - return 0 + c.logger.Debug("Got invalid systemd version", "version", version) + return 0, "" } - re := regexp.MustCompile(`[0-9][0-9][0-9]`) - version = re.FindString(version) - v, err := strconv.Atoi(version) + return v, version +} + +func (c *systemdCollector) getSystemdVirtualization(conn *dbus.Conn) string { + virt, err := conn.GetManagerProperty("Virtualization") if err != nil { - level.Warn(logger).Log("msg", "Got invalid systemd version", "version", version) - return 0 + c.logger.Debug("Could not get Virtualization property", "err", err) + return "unknown" } - return v + + virtStr := strings.Trim(virt, `"`) + if virtStr == "" { + // If no virtualization type is returned, assume it's bare metal. + return "none" + } + + return virtStr } diff --git a/collector/systemd_linux_test.go b/collector/systemd_linux_test.go index 93137f2263..98668ed6be 100644 --- a/collector/systemd_linux_test.go +++ b/collector/systemd_linux_test.go @@ -11,14 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nosystemd + package collector import ( - "github.com/go-kit/kit/log" + "io" + "log/slog" "regexp" "testing" - "github.com/coreos/go-systemd/dbus" + "github.com/coreos/go-systemd/v22/dbus" ) // Creates mock UnitLists @@ -91,7 +94,7 @@ func TestSystemdIgnoreFilter(t *testing.T) { fixtures := getUnitListFixtures() includePattern := regexp.MustCompile("^foo$") excludePattern := regexp.MustCompile("^bar$") - filtered := filterUnits(fixtures[0], includePattern, excludePattern, log.NewNopLogger()) + filtered := filterUnits(fixtures[0], includePattern, excludePattern, slog.New(slog.NewTextHandler(io.Discard, nil))) for _, unit := range filtered { if excludePattern.MatchString(unit.Name) || !includePattern.MatchString(unit.Name) { t.Error(unit.Name, "should not be in the filtered list") @@ -99,14 +102,14 @@ func TestSystemdIgnoreFilter(t *testing.T) { } } func TestSystemdIgnoreFilterDefaultKeepsAll(t *testing.T) { - logger := log.NewNopLogger() + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) c, err := NewSystemdCollector(logger) if err != nil { t.Fatal(err) } fixtures := getUnitListFixtures() collector := c.(*systemdCollector) - filtered := filterUnits(fixtures[0], collector.unitIncludePattern, collector.unitExcludePattern, logger) + filtered := filterUnits(fixtures[0], collector.systemdUnitIncludePattern, collector.systemdUnitExcludePattern, logger) // Adjust fixtures by 3 "not-found" units. if len(filtered) != len(fixtures[0])-3 { t.Error("Default filters removed units") @@ -118,11 +121,12 @@ func TestSystemdSummary(t *testing.T) { summary := summarizeUnits(fixtures[0]) for _, state := range unitStatesName { - if state == "inactive" { + switch state { + case "inactive": testSummaryHelper(t, state, summary[state], 3.0) - } else if state == "active" { + case "active": testSummaryHelper(t, state, summary[state], 1.0) - } else { + default: testSummaryHelper(t, state, summary[state], 0.0) } } diff --git a/collector/tapestats_linux.go b/collector/tapestats_linux.go new file mode 100644 index 0000000000..e195ce1c51 --- /dev/null +++ b/collector/tapestats_linux.go @@ -0,0 +1,150 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !notapestats + +package collector + +import ( + "fmt" + "log/slog" + "os" + "regexp" + + "github.com/alecthomas/kingpin/v2" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +var ( + ignoredTapeDevices = kingpin.Flag("collector.tapestats.ignored-devices", "Regexp of devices to ignore for tapestats.").Default("^$").String() +) + +type tapestatsCollector struct { + ignoredDevicesPattern *regexp.Regexp + ioNow *prometheus.Desc + ioTimeSeconds *prometheus.Desc + othersCompletedTotal *prometheus.Desc + readByteTotal *prometheus.Desc + readsCompletedTotal *prometheus.Desc + readTimeSeconds *prometheus.Desc + writtenByteTotal *prometheus.Desc + writesCompletedTotal *prometheus.Desc + writeTimeSeconds *prometheus.Desc + residualTotal *prometheus.Desc + fs sysfs.FS + logger *slog.Logger +} + +func init() { + registerCollector("tapestats", defaultEnabled, NewTapestatsCollector) +} + +// NewTapestatsCollector returns a new Collector exposing tape device stats. +// Docs from https://www.kernel.org/doc/html/latest/scsi/st.html#sysfs-and-statistics-for-tape-devices +func NewTapestatsCollector(logger *slog.Logger) (Collector, error) { + var tapeLabelNames = []string{"device"} + + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + tapeSubsystem := "tape" + + return &tapestatsCollector{ + ignoredDevicesPattern: regexp.MustCompile(*ignoredTapeDevices), + + ioNow: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "io_now"), + "The number of I/Os currently outstanding to this device.", + tapeLabelNames, nil, + ), + ioTimeSeconds: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "io_time_seconds_total"), + "The amount of time spent waiting for all I/O to complete (including read and write). This includes tape movement commands such as seeking between file or set marks and implicit tape movement such as when rewind on close tape devices are used.", + tapeLabelNames, nil, + ), + othersCompletedTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "io_others_total"), + "The number of I/Os issued to the tape drive other than read or write commands. The time taken to complete these commands uses the following calculation io_time_seconds_total-read_time_seconds_total-write_time_seconds_total", + tapeLabelNames, nil, + ), + readByteTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "read_bytes_total"), + "The number of bytes read from the tape drive.", + tapeLabelNames, nil, + ), + readsCompletedTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "reads_completed_total"), + "The number of read requests issued to the tape drive.", + tapeLabelNames, nil, + ), + readTimeSeconds: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "read_time_seconds_total"), + "The amount of time spent waiting for read requests to complete.", + tapeLabelNames, nil, + ), + writtenByteTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "written_bytes_total"), + "The number of bytes written to the tape drive.", + tapeLabelNames, nil, + ), + writesCompletedTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "writes_completed_total"), + "The number of write requests issued to the tape drive.", + tapeLabelNames, nil, + ), + writeTimeSeconds: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "write_time_seconds_total"), + "The amount of time spent waiting for write requests to complete.", + tapeLabelNames, nil, + ), + residualTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "residual_total"), + "The number of times during a read or write we found the residual amount to be non-zero. This should mean that a program is issuing a read larger thean the block size on tape. For write not all data made it to tape.", + tapeLabelNames, nil, + ), + logger: logger, + fs: fs, + }, nil +} + +func (c *tapestatsCollector) Update(ch chan<- prometheus.Metric) error { + tapes, err := c.fs.SCSITapeClass() + if err != nil { + if os.IsNotExist(err) { + c.logger.Debug("scsi_tape stats not found, skipping") + return ErrNoData + } + return fmt.Errorf("error obtaining SCSITape class info: %s", err) + } + + for _, tape := range tapes { + if c.ignoredDevicesPattern.MatchString(tape.Name) { + c.logger.Debug("Ignoring device", "device", tape.Name) + continue + } + ch <- prometheus.MustNewConstMetric(c.ioNow, prometheus.GaugeValue, float64(tape.Counters.InFlight), tape.Name) + ch <- prometheus.MustNewConstMetric(c.ioTimeSeconds, prometheus.CounterValue, float64(tape.Counters.IoNs)*0.000000001, tape.Name) + ch <- prometheus.MustNewConstMetric(c.othersCompletedTotal, prometheus.CounterValue, float64(tape.Counters.OtherCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.readByteTotal, prometheus.CounterValue, float64(tape.Counters.ReadByteCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.readsCompletedTotal, prometheus.CounterValue, float64(tape.Counters.ReadCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.readTimeSeconds, prometheus.CounterValue, float64(tape.Counters.ReadNs)*0.000000001, tape.Name) + ch <- prometheus.MustNewConstMetric(c.residualTotal, prometheus.CounterValue, float64(tape.Counters.ResidCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.writtenByteTotal, prometheus.CounterValue, float64(tape.Counters.WriteByteCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.writesCompletedTotal, prometheus.CounterValue, float64(tape.Counters.WriteCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.writeTimeSeconds, prometheus.CounterValue, float64(tape.Counters.WriteNs)*0.000000001, tape.Name) + } + return nil +} diff --git a/collector/tcpstat_linux.go b/collector/tcpstat_linux.go index af5ae981ce..555e46e91f 100644 --- a/collector/tcpstat_linux.go +++ b/collector/tcpstat_linux.go @@ -11,19 +11,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !notcpstat +//go:build !notcpstat package collector import ( "fmt" - "io" - "io/ioutil" + "log/slog" "os" - "strconv" - "strings" + "syscall" + "unsafe" - "github.com/go-kit/kit/log" + "github.com/mdlayher/netlink" "github.com/prometheus/client_golang/prometheus" ) @@ -60,7 +59,7 @@ const ( type tcpStatCollector struct { desc typedDesc - logger log.Logger + logger *slog.Logger } func init() { @@ -68,7 +67,7 @@ func init() { } // NewTCPStatCollector returns a new Collector exposing network stats. -func NewTCPStatCollector(logger log.Logger) (Collector, error) { +func NewTCPStatCollector(logger *slog.Logger) (Collector, error) { return &tcpStatCollector{ desc: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "tcp", "connection_states"), @@ -79,16 +78,64 @@ func NewTCPStatCollector(logger log.Logger) (Collector, error) { }, nil } +// InetDiagSockID (inet_diag_sockid) contains the socket identity. +// https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L13 +type InetDiagSockID struct { + SourcePort [2]byte + DestPort [2]byte + SourceIP [4][4]byte + DestIP [4][4]byte + Interface uint32 + Cookie [2]uint32 +} + +// InetDiagReqV2 (inet_diag_req_v2) is used to request diagnostic data. +// https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L37 +type InetDiagReqV2 struct { + Family uint8 + Protocol uint8 + Ext uint8 + Pad uint8 + States uint32 + ID InetDiagSockID +} + +const sizeOfDiagRequest = 0x38 + +func (req *InetDiagReqV2) Serialize() []byte { + return (*(*[sizeOfDiagRequest]byte)(unsafe.Pointer(req)))[:] +} + +func (req *InetDiagReqV2) Len() int { + return sizeOfDiagRequest +} + +type InetDiagMsg struct { + Family uint8 + State uint8 + Timer uint8 + Retrans uint8 + ID InetDiagSockID + Expires uint32 + RQueue uint32 + WQueue uint32 + UID uint32 + Inode uint32 +} + +func parseInetDiagMsg(b []byte) *InetDiagMsg { + return (*InetDiagMsg)(unsafe.Pointer(&b[0])) +} + func (c *tcpStatCollector) Update(ch chan<- prometheus.Metric) error { - tcpStats, err := getTCPStats(procFilePath("net/tcp")) + tcpStats, err := getTCPStats(syscall.AF_INET) if err != nil { return fmt.Errorf("couldn't get tcpstats: %w", err) } // if enabled ipv6 system - tcp6File := procFilePath("net/tcp6") - if _, hasIPv6 := os.Stat(tcp6File); hasIPv6 == nil { - tcp6Stats, err := getTCPStats(tcp6File) + if _, hasIPv6 := os.Stat(procFilePath("net/tcp6")); hasIPv6 == nil { + tcp6Stats, err := getTCPStats(syscall.AF_INET6) if err != nil { return fmt.Errorf("couldn't get tcp6stats: %w", err) } @@ -101,59 +148,51 @@ func (c *tcpStatCollector) Update(ch chan<- prometheus.Metric) error { for st, value := range tcpStats { ch <- c.desc.mustNewConstMetric(value, st.String()) } + return nil } -func getTCPStats(statsFile string) (map[tcpConnectionState]float64, error) { - file, err := os.Open(statsFile) +func getTCPStats(family uint8) (map[tcpConnectionState]float64, error) { + const TCPFAll = 0xFFF + const InetDiagInfo = 2 + const SockDiagByFamily = 20 + + conn, err := netlink.Dial(syscall.NETLINK_INET_DIAG, nil) if err != nil { - return nil, err + return nil, fmt.Errorf("couldn't connect netlink: %w", err) + } + defer conn.Close() + + msg := netlink.Message{ + Header: netlink.Header{ + Type: SockDiagByFamily, + Flags: syscall.NLM_F_REQUEST | syscall.NLM_F_DUMP, + }, + Data: (&InetDiagReqV2{ + Family: family, + Protocol: syscall.IPPROTO_TCP, + States: TCPFAll, + Ext: 0 | 1<<(InetDiagInfo-1), + }).Serialize(), } - defer file.Close() - - return parseTCPStats(file) -} -func parseTCPStats(r io.Reader) (map[tcpConnectionState]float64, error) { - tcpStats := map[tcpConnectionState]float64{} - contents, err := ioutil.ReadAll(r) + messages, err := conn.Execute(msg) if err != nil { return nil, err } - for _, line := range strings.Split(string(contents), "\n")[1:] { - parts := strings.Fields(line) - if len(parts) == 0 { - continue - } - if len(parts) < 5 { - return nil, fmt.Errorf("invalid TCP stats line: %q", line) - } - - qu := strings.Split(parts[4], ":") - if len(qu) < 2 { - return nil, fmt.Errorf("cannot parse tx_queues and rx_queues: %q", line) - } - - tx, err := strconv.ParseUint(qu[0], 16, 64) - if err != nil { - return nil, err - } - tcpStats[tcpConnectionState(tcpTxQueuedBytes)] += float64(tx) - - rx, err := strconv.ParseUint(qu[1], 16, 64) - if err != nil { - return nil, err - } - tcpStats[tcpConnectionState(tcpRxQueuedBytes)] += float64(rx) + return parseTCPStats(messages) +} - st, err := strconv.ParseInt(parts[3], 16, 8) - if err != nil { - return nil, err - } +func parseTCPStats(msgs []netlink.Message) (map[tcpConnectionState]float64, error) { + tcpStats := map[tcpConnectionState]float64{} - tcpStats[tcpConnectionState(st)]++ + for _, m := range msgs { + msg := parseInetDiagMsg(m.Data) + tcpStats[tcpTxQueuedBytes] += float64(msg.WQueue) + tcpStats[tcpRxQueuedBytes] += float64(msg.RQueue) + tcpStats[tcpConnectionState(msg.State)]++ } return tcpStats, nil diff --git a/collector/tcpstat_linux_test.go b/collector/tcpstat_linux_test.go index b609b84679..8a56af9ccd 100644 --- a/collector/tcpstat_linux_test.go +++ b/collector/tcpstat_linux_test.go @@ -11,69 +11,61 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !notcpstat + package collector import ( - "os" - "strings" + "bytes" + "encoding/binary" + "syscall" "testing" + + "github.com/mdlayher/netlink" ) -func Test_parseTCPStatsError(t *testing.T) { - tests := []struct { - name string - in string - }{ - { - name: "too few fields", - in: "sl local_address\n 0: 00000000:0016", - }, - { - name: "missing colon in tx-rx field", - in: "sl local_address rem_address st tx_queue rx_queue\n" + - " 1: 0F02000A:0016 0202000A:8B6B 01 0000000000000001", - }, - { - name: "tx parsing issue", - in: "sl local_address rem_address st tx_queue rx_queue\n" + - " 1: 0F02000A:0016 0202000A:8B6B 01 0000000x:00000001", - }, +func Test_parseTCPStats(t *testing.T) { + encode := func(m InetDiagMsg) []byte { + var buf bytes.Buffer + err := binary.Write(&buf, binary.NativeEndian, m) + if err != nil { + panic(err) + } + return buf.Bytes() + } + + msg := []netlink.Message{ { - name: "rx parsing issue", - in: "sl local_address rem_address st tx_queue rx_queue\n" + - " 1: 0F02000A:0016 0202000A:8B6B 01 00000000:0000000x", + Data: encode(InetDiagMsg{ + Family: syscall.AF_INET, + State: uint8(tcpEstablished), + Timer: 0, + Retrans: 0, + ID: InetDiagSockID{}, + Expires: 0, + RQueue: 11, + WQueue: 21, + UID: 0, + Inode: 0, + }), }, { - name: "state parsing issue", - in: "sl local_address rem_address st tx_queue rx_queue\n" + - " 1: 0F02000A:0016 0202000A:8B6B 0H 00000000:00000001", + Data: encode(InetDiagMsg{ + Family: syscall.AF_INET, + State: uint8(tcpListen), + Timer: 0, + Retrans: 0, + ID: InetDiagSockID{}, + Expires: 0, + RQueue: 11, + WQueue: 21, + UID: 0, + Inode: 0, + }), }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if _, err := parseTCPStats(strings.NewReader(tt.in)); err == nil { - t.Fatal("expected an error, but none occurred") - } - }) - } -} - -func TestTCPStat(t *testing.T) { - - noFile, _ := os.Open("follow the white rabbit") - defer noFile.Close() - if _, err := parseTCPStats(noFile); err == nil { - t.Fatal("expected an error, but none occurred") - } - - file, err := os.Open("fixtures/proc/net/tcpstat") - if err != nil { - t.Fatal(err) - } - defer file.Close() - - tcpStats, err := parseTCPStats(file) + tcpStats, err := parseTCPStats(msg) if err != nil { t.Fatal(err) } @@ -89,35 +81,8 @@ func TestTCPStat(t *testing.T) { if want, got := 42, int(tcpStats[tcpTxQueuedBytes]); want != got { t.Errorf("want tcpstat number of bytes in tx queue %d, got %d", want, got) } - if want, got := 1, int(tcpStats[tcpRxQueuedBytes]); want != got { + if want, got := 22, int(tcpStats[tcpRxQueuedBytes]); want != got { t.Errorf("want tcpstat number of bytes in rx queue %d, got %d", want, got) } } - -func Test_getTCPStats(t *testing.T) { - type args struct { - statsFile string - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "file not found", - args: args{statsFile: "somewhere over the rainbow"}, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := getTCPStats(tt.args.statsFile) - if (err != nil) != tt.wantErr { - t.Errorf("getTCPStats() error = %v, wantErr %v", err, tt.wantErr) - return - } - // other cases are covered by TestTCPStat() - }) - } -} diff --git a/collector/textfile.go b/collector/textfile.go index 50c1807592..17e4a60de2 100644 --- a/collector/textfile.go +++ b/collector/textfile.go @@ -11,30 +11,30 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !notextfile +//go:build !notextfile package collector import ( "fmt" - "io/ioutil" + "log/slog" "os" "path/filepath" + "slices" "sort" "strings" "time" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" - kingpin "gopkg.in/alecthomas/kingpin.v2" + "github.com/prometheus/common/model" ) var ( - textFileDirectory = kingpin.Flag("collector.textfile.directory", "Directory to read text files with metrics from.").Default("").String() - mtimeDesc = prometheus.NewDesc( + textFileDirectories = kingpin.Flag("collector.textfile.directory", "Directory to read text files with metrics from, supports glob matching. (repeatable)").Default("").Strings() + mtimeDesc = prometheus.NewDesc( "node_textfile_mtime_seconds", "Unixtime mtime of textfiles successfully read.", []string{"file"}, @@ -43,10 +43,10 @@ var ( ) type textFileCollector struct { - path string + paths []string // Only set for testing to get predictable output. mtime *float64 - logger log.Logger + logger *slog.Logger } func init() { @@ -55,15 +55,15 @@ func init() { // NewTextFileCollector returns a new Collector exposing metrics read from files // in the given textfile directory. -func NewTextFileCollector(logger log.Logger) (Collector, error) { +func NewTextFileCollector(logger *slog.Logger) (Collector, error) { c := &textFileCollector{ - path: *textFileDirectory, + paths: *textFileDirectories, logger: logger, } return c, nil } -func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric, logger log.Logger) { +func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric, logger *slog.Logger) { var valType prometheus.ValueType var val float64 @@ -79,7 +79,7 @@ func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Me for _, metric := range metricFamily.Metric { if metric.TimestampMs != nil { - level.Warn(logger).Log("msg", "Ignoring unsupported custom timestamp on textfile collector metric", "metric", metric) + logger.Warn("Ignoring unsupported custom timestamp on textfile collector metric", "metric", metric) } labels := metric.GetLabel() @@ -91,14 +91,7 @@ func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Me } for k := range allLabelNames { - present := false - for _, name := range names { - if k == name { - present = true - break - } - } - if !present { + if !slices.Contains(names, k) { names = append(names, k) values = append(values, "") } @@ -171,18 +164,18 @@ func (c *textFileCollector) exportMTimes(mtimes map[string]time.Time, ch chan<- // Export the mtimes of the successful files. // Sorting is needed for predictable output comparison in tests. - filenames := make([]string, 0, len(mtimes)) - for filename := range mtimes { - filenames = append(filenames, filename) + filepaths := make([]string, 0, len(mtimes)) + for path := range mtimes { + filepaths = append(filepaths, path) } - sort.Strings(filenames) + sort.Strings(filepaths) - for _, filename := range filenames { - mtime := float64(mtimes[filename].UnixNano() / 1e9) + for _, path := range filepaths { + mtime := float64(mtimes[path].UnixNano() / 1e9) if c.mtime != nil { mtime = *c.mtime } - ch <- prometheus.MustNewConstMetric(mtimeDesc, prometheus.GaugeValue, mtime, filename) + ch <- prometheus.MustNewConstMetric(mtimeDesc, prometheus.GaugeValue, mtime, path) } } @@ -191,26 +184,84 @@ func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error { // Iterate over files and accumulate their metrics, but also track any // parsing errors so an error metric can be reported. var errored bool - files, err := ioutil.ReadDir(c.path) - if err != nil && c.path != "" { - errored = true - level.Error(c.logger).Log("msg", "failed to read textfile collector directory", "path", c.path, "err", err) + var parsedFamilies []*dto.MetricFamily + metricsNamesToFiles := map[string][]string{} + metricsNamesToHelpTexts := map[string][2]string{} + + paths := []string{} + for _, glob := range c.paths { + ps, err := filepath.Glob(glob) + if err != nil || len(ps) == 0 { + // not glob or not accessible path either way assume single + // directory and let os.ReadDir handle it + ps = []string{glob} + } + paths = append(paths, ps...) } - mtimes := make(map[string]time.Time, len(files)) - for _, f := range files { - if !strings.HasSuffix(f.Name(), ".prom") { - continue + mtimes := make(map[string]time.Time) + for _, path := range paths { + files, err := os.ReadDir(path) + if err != nil && path != "" { + errored = true + c.logger.Error("failed to read textfile collector directory", "path", path, "err", err) } - mtime, err := c.processFile(f.Name(), ch) - if err != nil { - errored = true - level.Error(c.logger).Log("msg", "failed to collect textfile data", "file", f.Name(), "err", err) - continue + for _, f := range files { + metricsFilePath := filepath.Join(path, f.Name()) + if !strings.HasSuffix(f.Name(), ".prom") { + continue + } + + mtime, families, err := c.processFile(path, f.Name(), ch) + + for _, mf := range families { + // Check for metrics with inconsistent help texts and take the first help text occurrence. + if helpTexts, seen := metricsNamesToHelpTexts[*mf.Name]; seen { + if mf.Help != nil && helpTexts[0] != *mf.Help || helpTexts[1] != "" { + metricsNamesToHelpTexts[*mf.Name] = [2]string{helpTexts[0], *mf.Help} + errored = true + c.logger.Error("inconsistent metric help text", + "metric", *mf.Name, + "original_help_text", helpTexts[0], + "new_help_text", *mf.Help, + // Only the first file path will be recorded in case of two or more inconsistent help texts. + "file", metricsNamesToFiles[*mf.Name][0]) + continue + } + } + if mf.Help != nil { + metricsNamesToHelpTexts[*mf.Name] = [2]string{*mf.Help} + } + metricsNamesToFiles[*mf.Name] = append(metricsNamesToFiles[*mf.Name], metricsFilePath) + parsedFamilies = append(parsedFamilies, mf) + } + + if err != nil { + errored = true + c.logger.Error("failed to collect textfile data", "file", f.Name(), "err", err) + continue + } + + mtimes[metricsFilePath] = *mtime } + } - mtimes[f.Name()] = *mtime + mfHelp := make(map[string]*string) + for _, mf := range parsedFamilies { + if mf.Help == nil { + if help, ok := mfHelp[*mf.Name]; ok { + mf.Help = help + continue + } + help := fmt.Sprintf("Metric read from %s", strings.Join(metricsNamesToFiles[*mf.Name], ", ")) + mf.Help = &help + mfHelp[*mf.Name] = &help + } + } + + for _, mf := range parsedFamilies { + convertMetricFamily(mf, ch, c.logger) } c.exportMTimes(mtimes, ch) @@ -234,44 +285,33 @@ func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error { } // processFile processes a single file, returning its modification time on success. -func (c *textFileCollector) processFile(name string, ch chan<- prometheus.Metric) (*time.Time, error) { - path := filepath.Join(c.path, name) +func (c *textFileCollector) processFile(dir, name string, ch chan<- prometheus.Metric) (*time.Time, map[string]*dto.MetricFamily, error) { + path := filepath.Join(dir, name) f, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("failed to open textfile data file %q: %w", path, err) + return nil, nil, fmt.Errorf("failed to open textfile data file %q: %w", path, err) } defer f.Close() - var parser expfmt.TextParser + parser := expfmt.NewTextParser(model.LegacyValidation) families, err := parser.TextToMetricFamilies(f) if err != nil { - return nil, fmt.Errorf("failed to parse textfile data from %q: %w", path, err) + return nil, nil, fmt.Errorf("failed to parse textfile data from %q: %w", path, err) } if hasTimestamps(families) { - return nil, fmt.Errorf("textfile %q contains unsupported client-side timestamps, skipping entire file", path) - } - - for _, mf := range families { - if mf.Help == nil { - help := fmt.Sprintf("Metric read from %s", path) - mf.Help = &help - } - } - - for _, mf := range families { - convertMetricFamily(mf, ch, c.logger) + return nil, nil, fmt.Errorf("textfile %q contains unsupported client-side timestamps, skipping entire file", path) } // Only stat the file once it has been parsed and validated, so that // a failure does not appear fresh. stat, err := f.Stat() if err != nil { - return nil, fmt.Errorf("failed to stat %q: %w", path, err) + return nil, families, fmt.Errorf("failed to stat %q: %w", path, err) } t := stat.ModTime() - return &t, nil + return &t, families, nil } // hasTimestamps returns true when metrics contain unsupported timestamps. diff --git a/collector/textfile_test.go b/collector/textfile_test.go index f0e93a8bd4..2fb31faca1 100644 --- a/collector/textfile_test.go +++ b/collector/textfile_test.go @@ -11,21 +11,24 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !notextfile + package collector import ( "fmt" - "io/ioutil" + "io" + "log/slog" "net/http" "net/http/httptest" + "os" "testing" - "github.com/go-kit/kit/log" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/common/promlog" - "github.com/prometheus/common/promlog/flag" - "gopkg.in/alecthomas/kingpin.v2" + "github.com/prometheus/common/promslog" + "github.com/prometheus/common/promslog/flag" ) type collectorAdapter struct { @@ -48,63 +51,90 @@ func (a collectorAdapter) Collect(ch chan<- prometheus.Metric) { func TestTextfileCollector(t *testing.T) { tests := []struct { - path string - out string + paths []string + out string }{ { - path: "fixtures/textfile/no_metric_files", - out: "fixtures/textfile/no_metric_files.out", + paths: []string{"fixtures/textfile/no_metric_files"}, + out: "fixtures/textfile/no_metric_files.out", + }, + { + paths: []string{"fixtures/textfile/two_metric_files"}, + out: "fixtures/textfile/two_metric_files.out", + }, + { + paths: []string{"fixtures/textfile/nonexistent_path"}, + out: "fixtures/textfile/nonexistent_path.out", + }, + { + paths: []string{"fixtures/textfile/client_side_timestamp"}, + out: "fixtures/textfile/client_side_timestamp.out", + }, + { + paths: []string{"fixtures/textfile/different_metric_types"}, + out: "fixtures/textfile/different_metric_types.out", + }, + { + paths: []string{"fixtures/textfile/inconsistent_metrics"}, + out: "fixtures/textfile/inconsistent_metrics.out", + }, + { + paths: []string{"fixtures/textfile/histogram"}, + out: "fixtures/textfile/histogram.out", }, { - path: "fixtures/textfile/two_metric_files", - out: "fixtures/textfile/two_metric_files.out", + paths: []string{"fixtures/textfile/histogram_extra_dimension"}, + out: "fixtures/textfile/histogram_extra_dimension.out", }, { - path: "fixtures/textfile/nonexistent_path", - out: "fixtures/textfile/nonexistent_path.out", + paths: []string{"fixtures/textfile/summary"}, + out: "fixtures/textfile/summary.out", }, { - path: "fixtures/textfile/client_side_timestamp", - out: "fixtures/textfile/client_side_timestamp.out", + paths: []string{"fixtures/textfile/summary_extra_dimension"}, + out: "fixtures/textfile/summary_extra_dimension.out", }, { - path: "fixtures/textfile/different_metric_types", - out: "fixtures/textfile/different_metric_types.out", + paths: []string{ + "fixtures/textfile/histogram_extra_dimension", + "fixtures/textfile/summary_extra_dimension", + }, + out: "fixtures/textfile/glob_extra_dimension.out", }, { - path: "fixtures/textfile/inconsistent_metrics", - out: "fixtures/textfile/inconsistent_metrics.out", + paths: []string{"fixtures/textfile/*_extra_dimension"}, + out: "fixtures/textfile/glob_extra_dimension.out", }, { - path: "fixtures/textfile/histogram", - out: "fixtures/textfile/histogram.out", + paths: []string{"fixtures/textfile/metrics_merge_empty_help"}, + out: "fixtures/textfile/metrics_merge_empty_help.out", }, { - path: "fixtures/textfile/histogram_extra_dimension", - out: "fixtures/textfile/histogram_extra_dimension.out", + paths: []string{"fixtures/textfile/metrics_merge_no_help"}, + out: "fixtures/textfile/metrics_merge_no_help.out", }, { - path: "fixtures/textfile/summary", - out: "fixtures/textfile/summary.out", + paths: []string{"fixtures/textfile/metrics_merge_same_help"}, + out: "fixtures/textfile/metrics_merge_same_help.out", }, { - path: "fixtures/textfile/summary_extra_dimension", - out: "fixtures/textfile/summary_extra_dimension.out", + paths: []string{"fixtures/textfile/metrics_merge_different_help"}, + out: "fixtures/textfile/metrics_merge_different_help.out", }, } for i, test := range tests { mtime := 1.0 c := &textFileCollector{ - path: test.path, + paths: test.paths, mtime: &mtime, - logger: log.NewNopLogger(), + logger: slog.New(slog.NewTextHandler(io.Discard, nil)), } // Suppress a log message about `nonexistent_path` not existing, this is // expected and clutters the test output. - promlogConfig := &promlog.Config{} - flag.AddFlags(kingpin.CommandLine, promlogConfig) + promslogConfig := &promslog.Config{} + flag.AddFlags(kingpin.CommandLine, promslogConfig) if _, err := kingpin.CommandLine.Parse([]string{"--log.level", "debug"}); err != nil { t.Fatal(err) } @@ -113,16 +143,16 @@ func TestTextfileCollector(t *testing.T) { registry.MustRegister(collectorAdapter{c}) rw := httptest.NewRecorder() - promhttp.HandlerFor(registry, promhttp.HandlerOpts{}).ServeHTTP(rw, &http.Request{}) + promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}).ServeHTTP(rw, &http.Request{}) got := string(rw.Body.String()) - want, err := ioutil.ReadFile(test.out) + want, err := os.ReadFile(test.out) if err != nil { t.Fatalf("%d. error reading fixture file %s: %s", i, test.out, err) } if string(want) != got { - t.Fatalf("%d.%q want:\n\n%s\n\ngot:\n\n%s", i, test.path, string(want), got) + t.Fatalf("%d.%q want:\n\n%s\n\ngot:\n\n%s", i, test.paths, string(want), got) } } } diff --git a/collector/thermal_darwin.go b/collector/thermal_darwin.go new file mode 100644 index 0000000000..b51f1822ca --- /dev/null +++ b/collector/thermal_darwin.go @@ -0,0 +1,188 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !notherm + +package collector + +/* +#cgo LDFLAGS: -framework IOKit -framework CoreFoundation +#include +#include +#include +#include +#include + +struct ref_with_ret { + CFDictionaryRef ref; + IOReturn ret; +}; + +struct ref_with_ret FetchThermal(); + +struct ref_with_ret FetchThermal() { + CFDictionaryRef ref; + IOReturn ret; + ret = IOPMCopyCPUPowerStatus(&ref); + struct ref_with_ret result = { + ref, + ret, + }; + return result; +} +*/ +import "C" + +import ( + "errors" + "fmt" + "log/slog" + "unsafe" + + "github.com/prometheus/node_exporter/collector/utils" + + "github.com/prometheus/client_golang/prometheus" +) + +type thermCollector struct { + cpuSchedulerLimit typedDesc + cpuAvailableCPU typedDesc + cpuSpeedLimit typedDesc + logger *slog.Logger +} + +const thermal = "thermal" + +func init() { + registerCollector(thermal, defaultEnabled, NewThermCollector) +} + +// NewThermCollector returns a new Collector exposing current CPU power levels. +func NewThermCollector(logger *slog.Logger) (Collector, error) { + return &thermCollector{ + cpuSchedulerLimit: typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, thermal, "cpu_scheduler_limit_ratio"), + "Represents the percentage (0-100) of CPU time available. 100% at normal operation. The OS may limit this time for a percentage less than 100%.", + nil, + nil), + valueType: prometheus.GaugeValue, + }, + cpuAvailableCPU: typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, thermal, "cpu_available_cpu"), + "Reflects how many, if any, CPUs have been taken offline. Represented as an integer number of CPUs (0 - Max CPUs).", + nil, + nil, + ), + valueType: prometheus.GaugeValue, + }, + cpuSpeedLimit: typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, thermal, "cpu_speed_limit_ratio"), + "Defines the speed & voltage limits placed on the CPU. Represented as a percentage (0-100) of maximum CPU speed.", + nil, + nil, + ), + valueType: prometheus.GaugeValue, + }, + logger: logger, + }, nil +} + +func (c *thermCollector) Update(ch chan<- prometheus.Metric) error { + cpuPowerStatus, err := fetchCPUPowerStatus() + if err != nil { + return err + } + if value, ok := cpuPowerStatus[(string(C.kIOPMCPUPowerLimitSchedulerTimeKey))]; ok { + ch <- c.cpuSchedulerLimit.mustNewConstMetric(float64(value) / 100.0) + } + if value, ok := cpuPowerStatus[(string(C.kIOPMCPUPowerLimitProcessorCountKey))]; ok { + ch <- c.cpuAvailableCPU.mustNewConstMetric(float64(value)) + } + if value, ok := cpuPowerStatus[(string(C.kIOPMCPUPowerLimitProcessorSpeedKey))]; ok { + ch <- c.cpuSpeedLimit.mustNewConstMetric(float64(value) / 100.0) + } + return nil +} + +func fetchCPUPowerStatus() (map[string]int, error) { + cfDictRef, _ := C.FetchThermal() + defer func() { + if cfDictRef.ref != 0x0 { + C.CFRelease(C.CFTypeRef(cfDictRef.ref)) + } + }() + + if C.kIOReturnNotFound == cfDictRef.ret { + return nil, errors.New("no CPU power status has been recorded") + } + + if C.kIOReturnSuccess != cfDictRef.ret { + return nil, fmt.Errorf("no CPU power status with error code 0x%08x", int(cfDictRef.ret)) + } + + // mapping CFDictionary to map + cfDict := CFDict(cfDictRef.ref) + return mappingCFDictToMap(cfDict), nil +} + +type CFDict uintptr + +func mappingCFDictToMap(dict CFDict) map[string]int { + if C.CFNullRef(dict) == C.kCFNull { + return nil + } + cfDict := C.CFDictionaryRef(dict) + + var result map[string]int + count := C.CFDictionaryGetCount(cfDict) + if count > 0 { + keys := make([]C.CFTypeRef, count) + values := make([]C.CFTypeRef, count) + C.CFDictionaryGetKeysAndValues(cfDict, (*unsafe.Pointer)(unsafe.Pointer(&keys[0])), (*unsafe.Pointer)(unsafe.Pointer(&values[0]))) + result = make(map[string]int, count) + for i := C.CFIndex(0); i < count; i++ { + result[mappingCFStringToString(C.CFStringRef(keys[i]))] = mappingCFNumberLongToInt(C.CFNumberRef(values[i])) + } + } + return result +} + +// CFStringToString converts a CFStringRef to a string. +func mappingCFStringToString(s C.CFStringRef) string { + p := C.CFStringGetCStringPtr(s, C.kCFStringEncodingUTF8) + if p != nil { + return C.GoString(p) + } + length := C.CFStringGetLength(s) + if length == 0 { + return "" + } + maxBufLen := C.CFStringGetMaximumSizeForEncoding(length, C.kCFStringEncodingUTF8) + if maxBufLen == 0 { + return "" + } + buf := make([]byte, maxBufLen) + var usedBufLen C.CFIndex + _ = C.CFStringGetBytes(s, C.CFRange{0, length}, C.kCFStringEncodingUTF8, C.UInt8(0), C.false, (*C.UInt8)(&buf[0]), maxBufLen, &usedBufLen) + return utils.SafeBytesToString(buf[:usedBufLen]) +} + +func mappingCFNumberLongToInt(n C.CFNumberRef) int { + typ := C.CFNumberGetType(n) + var long C.long + C.CFNumberGetValue(n, typ, unsafe.Pointer(&long)) + return int(long) +} diff --git a/collector/thermal_zone_linux.go b/collector/thermal_zone_linux.go index 5d223bcd4f..a50278e998 100644 --- a/collector/thermal_zone_linux.go +++ b/collector/thermal_zone_linux.go @@ -11,14 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nothermalzone +//go:build !nothermalzone package collector import ( + "errors" "fmt" + "log/slog" + "os" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) @@ -31,7 +33,7 @@ type thermalZoneCollector struct { coolingDeviceCurState *prometheus.Desc coolingDeviceMaxState *prometheus.Desc zoneTemp *prometheus.Desc - logger log.Logger + logger *slog.Logger } func init() { @@ -39,7 +41,7 @@ func init() { } // NewThermalZoneCollector returns a new Collector exposing kernel/system statistics. -func NewThermalZoneCollector(logger log.Logger) (Collector, error) { +func NewThermalZoneCollector(logger *slog.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) @@ -69,6 +71,10 @@ func NewThermalZoneCollector(logger log.Logger) (Collector, error) { func (c *thermalZoneCollector) Update(ch chan<- prometheus.Metric) error { thermalZones, err := c.fs.ClassThermalZoneStats() if err != nil { + if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) || errors.Is(err, os.ErrInvalid) { + c.logger.Debug("Could not read thermal zone stats", "err", err) + return ErrNoData + } return err } diff --git a/collector/time.go b/collector/time.go index 76a2d1e968..1851ba1a7e 100644 --- a/collector/time.go +++ b/collector/time.go @@ -11,21 +11,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !notime +//go:build !notime package collector import ( + "log/slog" "time" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" ) type timeCollector struct { - desc *prometheus.Desc - logger log.Logger + now typedDesc + zone typedDesc + clocksourcesAvailable typedDesc + clocksourceCurrent typedDesc + logger *slog.Logger } func init() { @@ -34,20 +36,41 @@ func init() { // NewTimeCollector returns a new Collector exposing the current system time in // seconds since epoch. -func NewTimeCollector(logger log.Logger) (Collector, error) { +func NewTimeCollector(logger *slog.Logger) (Collector, error) { + const subsystem = "time" return &timeCollector{ - desc: prometheus.NewDesc( - namespace+"_time_seconds", + now: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "seconds"), "System time in seconds since epoch (1970).", nil, nil, - ), + ), prometheus.GaugeValue}, + zone: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "zone_offset_seconds"), + "System time zone offset in seconds.", + []string{"time_zone"}, nil, + ), prometheus.GaugeValue}, + clocksourcesAvailable: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "clocksource_available_info"), + "Available clocksources read from '/sys/devices/system/clocksource'.", + []string{"device", "clocksource"}, nil, + ), prometheus.GaugeValue}, + clocksourceCurrent: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "clocksource_current_info"), + "Current clocksource read from '/sys/devices/system/clocksource'.", + []string{"device", "clocksource"}, nil, + ), prometheus.GaugeValue}, logger: logger, }, nil } func (c *timeCollector) Update(ch chan<- prometheus.Metric) error { - now := float64(time.Now().UnixNano()) / 1e9 - level.Debug(c.logger).Log("msg", "Return time", "now", now) - ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, now) - return nil + now := time.Now() + nowSec := float64(now.UnixNano()) / 1e9 + zone, zoneOffset := now.Zone() + + c.logger.Debug("Return time", "now", nowSec) + ch <- c.now.mustNewConstMetric(nowSec) + c.logger.Debug("Zone offset", "offset", zoneOffset, "time_zone", zone) + ch <- c.zone.mustNewConstMetric(float64(zoneOffset), zone) + return c.update(ch) } diff --git a/collector/time_linux.go b/collector/time_linux.go new file mode 100644 index 0000000000..afd7208e3e --- /dev/null +++ b/collector/time_linux.go @@ -0,0 +1,46 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !notime + +package collector + +import ( + "fmt" + "strconv" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +func (c *timeCollector) update(ch chan<- prometheus.Metric) error { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return fmt.Errorf("failed to open procfs: %w", err) + } + + clocksources, err := fs.ClockSources() + if err != nil { + return fmt.Errorf("couldn't get clocksources: %w", err) + } + c.logger.Debug("in Update", "clocksources", fmt.Sprintf("%v", clocksources)) + + for i, clocksource := range clocksources { + is := strconv.Itoa(i) + for _, cs := range clocksource.Available { + ch <- c.clocksourcesAvailable.mustNewConstMetric(1.0, is, cs) + } + ch <- c.clocksourceCurrent.mustNewConstMetric(1.0, is, clocksource.Current) + } + return nil +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go b/collector/time_other.go similarity index 70% rename from vendor/github.com/prometheus/procfs/cpuinfo_arm.go rename to collector/time_other.go index 8355507706..5658fe4810 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go +++ b/collector/time_other.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,8 +11,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux +//go:build !linux && !notime -package procfs +package collector -var parseCPUInfo = parseCPUInfoARM +import ( + "github.com/prometheus/client_golang/prometheus" +) + +func (c *timeCollector) update(ch chan<- prometheus.Metric) error { + return nil +} diff --git a/collector/timex.go b/collector/timex.go index 4dc4d6c35c..6494726ef7 100644 --- a/collector/timex.go +++ b/collector/timex.go @@ -11,15 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !notimex +//go:build linux && !notimex package collector import ( + "errors" "fmt" + "log/slog" + "os" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) @@ -35,6 +36,9 @@ const ( // 1 second in nanoSeconds = 1000000000 microSeconds = 1000000 + + // See NOTES in adjtimex(2). + ppm16frac = 1000000.0 * 65536.0 ) type timexCollector struct { @@ -55,7 +59,7 @@ type timexCollector struct { stbcnt, tai, syncStatus typedDesc - logger log.Logger + logger *slog.Logger } func init() { @@ -63,7 +67,7 @@ func init() { } // NewTimexCollector returns a new Collector exposing adjtime(3) stats. -func NewTimexCollector(logger log.Logger) (Collector, error) { +func NewTimexCollector(logger *slog.Logger) (Collector, error) { const subsystem = "timex" return &timexCollector{ @@ -163,6 +167,10 @@ func (c *timexCollector) Update(ch chan<- prometheus.Metric) error { status, err := unix.Adjtimex(timex) if err != nil { + if errors.Is(err, os.ErrPermission) { + c.logger.Debug("Not collecting timex metrics", "err", err) + return ErrNoData + } return fmt.Errorf("failed to retrieve adjtimex stats: %w", err) } @@ -176,8 +184,6 @@ func (c *timexCollector) Update(ch chan<- prometheus.Metric) error { } else { divisor = microSeconds } - // See NOTES in adjtimex(2). - const ppm16frac = 1000000.0 * 65536.0 ch <- c.syncStatus.mustNewConstMetric(syncStatus) ch <- c.offset.mustNewConstMetric(float64(timex.Offset) / divisor) diff --git a/collector/udp_queues_linux.go b/collector/udp_queues_linux.go index e1b347ed59..0ebd930098 100644 --- a/collector/udp_queues_linux.go +++ b/collector/udp_queues_linux.go @@ -11,17 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noudp_queues +//go:build !noudp_queues package collector import ( "errors" "fmt" + "log/slog" "os" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) @@ -30,7 +29,7 @@ type ( udpQueuesCollector struct { fs procfs.FS desc *prometheus.Desc - logger log.Logger + logger *slog.Logger } ) @@ -39,7 +38,7 @@ func init() { } // NewUDPqueuesCollector returns a new Collector exposing network udp queued bytes. -func NewUDPqueuesCollector(logger log.Logger) (Collector, error) { +func NewUDPqueuesCollector(logger *slog.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) @@ -63,9 +62,9 @@ func (c *udpQueuesCollector) Update(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s4.RxQueueLength), "rx", "v4") } else { if errors.Is(errIPv4, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "not collecting ipv4 based metrics") + c.logger.Debug("not collecting ipv4 based metrics") } else { - return fmt.Errorf("couldn't get upd queued bytes: %w", errIPv4) + return fmt.Errorf("couldn't get udp queued bytes: %w", errIPv4) } } @@ -75,9 +74,9 @@ func (c *udpQueuesCollector) Update(ch chan<- prometheus.Metric) error { ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s6.RxQueueLength), "rx", "v6") } else { if errors.Is(errIPv6, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "not collecting ipv6 based metrics") + c.logger.Debug("not collecting ipv6 based metrics") } else { - return fmt.Errorf("couldn't get upd6 queued bytes: %w", errIPv6) + return fmt.Errorf("couldn't get udp6 queued bytes: %w", errIPv6) } } diff --git a/collector/uname.go b/collector/uname.go index 2c0c3068a1..737a5a8712 100644 --- a/collector/uname.go +++ b/collector/uname.go @@ -11,13 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin freebsd openbsd linux -// +build !nouname +//go:build (darwin || freebsd || openbsd || netbsd || linux || aix) && !nouname package collector import ( - "github.com/go-kit/kit/log" + "log/slog" + "github.com/prometheus/client_golang/prometheus" ) @@ -36,7 +36,7 @@ var unameDesc = prometheus.NewDesc( ) type unameCollector struct { - logger log.Logger + logger *slog.Logger } type uname struct { SysName string @@ -48,11 +48,11 @@ type uname struct { } func init() { - registerCollector("uname", defaultEnabled, newUnameCollector) + registerCollector("uname", defaultEnabled, NewUnameCollector) } // NewUnameCollector returns new unameCollector. -func newUnameCollector(logger log.Logger) (Collector, error) { +func NewUnameCollector(logger *slog.Logger) (Collector, error) { return &unameCollector{logger}, nil } diff --git a/collector/uname_bsd.go b/collector/uname_bsd.go index fd8db9b4d9..07f7aca063 100644 --- a/collector/uname_bsd.go +++ b/collector/uname_bsd.go @@ -11,13 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin freebsd openbsd -// +build !nouname +//go:build (darwin || freebsd || openbsd || netbsd || aix) && !nouname package collector import ( - "bytes" "strings" "golang.org/x/sys/unix" @@ -32,10 +30,10 @@ func getUname() (uname, error) { nodeName, domainName := parseHostNameAndDomainName(utsname) output := uname{ - SysName: string(utsname.Sysname[:bytes.IndexByte(utsname.Sysname[:], 0)]), - Release: string(utsname.Release[:bytes.IndexByte(utsname.Release[:], 0)]), - Version: string(utsname.Version[:bytes.IndexByte(utsname.Version[:], 0)]), - Machine: string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), + SysName: unix.ByteSliceToString(utsname.Sysname[:]), + Release: unix.ByteSliceToString(utsname.Release[:]), + Version: unix.ByteSliceToString(utsname.Version[:]), + Machine: unix.ByteSliceToString(utsname.Machine[:]), NodeName: nodeName, DomainName: domainName, } @@ -46,7 +44,7 @@ func getUname() (uname, error) { // parseHostNameAndDomainName for FreeBSD,OpenBSD,Darwin. // Attempts to emulate what happens in the Linux uname calls since these OS doesn't have a Domainname. func parseHostNameAndDomainName(utsname unix.Utsname) (hostname string, domainname string) { - nodename := string(utsname.Nodename[:bytes.IndexByte(utsname.Nodename[:], 0)]) + nodename := unix.ByteSliceToString(utsname.Nodename[:]) split := strings.SplitN(nodename, ".", 2) // We'll always have at least a single element in the array. We assume this diff --git a/collector/uname_linux.go b/collector/uname_linux.go index 65de6297bb..d3c4b5aefb 100644 --- a/collector/uname_linux.go +++ b/collector/uname_linux.go @@ -11,15 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nouname +//go:build !nouname package collector -import ( - "bytes" - - "golang.org/x/sys/unix" -) +import "golang.org/x/sys/unix" func getUname() (uname, error) { var utsname unix.Utsname @@ -28,12 +24,12 @@ func getUname() (uname, error) { } output := uname{ - SysName: string(utsname.Sysname[:bytes.IndexByte(utsname.Sysname[:], 0)]), - Release: string(utsname.Release[:bytes.IndexByte(utsname.Release[:], 0)]), - Version: string(utsname.Version[:bytes.IndexByte(utsname.Version[:], 0)]), - Machine: string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), - NodeName: string(utsname.Nodename[:bytes.IndexByte(utsname.Nodename[:], 0)]), - DomainName: string(utsname.Domainname[:bytes.IndexByte(utsname.Domainname[:], 0)]), + SysName: unix.ByteSliceToString(utsname.Sysname[:]), + Release: unix.ByteSliceToString(utsname.Release[:]), + Version: unix.ByteSliceToString(utsname.Version[:]), + Machine: unix.ByteSliceToString(utsname.Machine[:]), + NodeName: unix.ByteSliceToString(utsname.Nodename[:]), + DomainName: unix.ByteSliceToString(utsname.Domainname[:]), } return output, nil diff --git a/collector/utils/utils.go b/collector/utils/utils.go new file mode 100644 index 0000000000..9bcaf4c8ea --- /dev/null +++ b/collector/utils/utils.go @@ -0,0 +1,47 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "strings" +) + +func SafeDereference[T any](s ...*T) []T { + var resolved []T + for _, v := range s { + if v != nil { + resolved = append(resolved, *v) + } else { + var zeroValue T + resolved = append(resolved, zeroValue) + } + } + return resolved +} + +// SafeBytesToString takes a slice of bytes and sanitizes it for Prometheus label +// values. +// * Terminate the string at the first null byte. +// * Convert any invalid UTF-8 to "�". +func SafeBytesToString(b []byte) string { + var s string + zeroIndex := bytes.IndexByte(b, 0) + if zeroIndex == -1 { + s = string(b) + } else { + s = string(b[:zeroIndex]) + } + return strings.ToValidUTF8(s, "�") +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/collector/utils/utils_test.go similarity index 51% rename from vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go rename to collector/utils/utils_test.go index bd55b45377..3246ebc31f 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/collector/utils/utils_test.go @@ -1,6 +1,6 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2025 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// you may not use this file ewcept in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 @@ -11,16 +11,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,appengine !linux - -package util +package utils import ( - "fmt" + "testing" ) -// SysReadFile is here implemented as a noop for builds that do not support -// the read syscall. For example Windows, or Linux on Google App Engine. -func SysReadFile(file string) (string, error) { - return "", fmt.Errorf("not supported on this platform") +func TestSafeBytesToString(t *testing.T) { + foo := []byte("foo\x00") + if want, got := SafeBytesToString(foo), "foo"; want != got { + t.Errorf("Expected: %s, Got: %s", want, got) + } + + foo = []byte{115, 97, 110, 101, 253, 190, 214} + if want, got := SafeBytesToString(foo), "sane�"; want != got { + t.Errorf("Expected: %s, Got: %s", want, got) + } } diff --git a/collector/vmstat_linux.go b/collector/vmstat_linux.go index 605c38adb6..987aebe9eb 100644 --- a/collector/vmstat_linux.go +++ b/collector/vmstat_linux.go @@ -11,21 +11,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !novmstat +//go:build !novmstat package collector import ( "bufio" "fmt" + "log/slog" "os" "regexp" "strconv" "strings" - "github.com/go-kit/kit/log" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" ) const ( @@ -38,7 +38,7 @@ var ( type vmStatCollector struct { fieldPattern *regexp.Regexp - logger log.Logger + logger *slog.Logger } func init() { @@ -46,7 +46,7 @@ func init() { } // NewvmStatCollector returns a new Collector exposing vmstat stats. -func NewvmStatCollector(logger log.Logger) (Collector, error) { +func NewvmStatCollector(logger *slog.Logger) (Collector, error) { pattern := regexp.MustCompile(*vmStatFields) return &vmStatCollector{ fieldPattern: pattern, diff --git a/collector/watchdog.go b/collector/watchdog.go new file mode 100644 index 0000000000..d3e041d5b8 --- /dev/null +++ b/collector/watchdog.go @@ -0,0 +1,138 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && !nowatchdog + +package collector + +import ( + "errors" + "fmt" + "log/slog" + "os" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +type watchdogCollector struct { + fs sysfs.FS + logger *slog.Logger +} + +func init() { + registerCollector("watchdog", defaultEnabled, NewWatchdogCollector) +} + +// NewWatchdogCollector returns a new Collector exposing watchdog stats. +func NewWatchdogCollector(logger *slog.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &watchdogCollector{ + fs: fs, + logger: logger, + }, nil +} + +var ( + watchdogBootstatusDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "watchdog", "bootstatus"), + "Value of /sys/class/watchdog//bootstatus", + []string{"name"}, nil, + ) + watchdogFwVersionDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "watchdog", "fw_version"), + "Value of /sys/class/watchdog//fw_version", + []string{"name"}, nil, + ) + watchdogNowayoutDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "watchdog", "nowayout"), + "Value of /sys/class/watchdog//nowayout", + []string{"name"}, nil, + ) + watchdogTimeleftDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "watchdog", "timeleft_seconds"), + "Value of /sys/class/watchdog//timeleft", + []string{"name"}, nil, + ) + watchdogTimeoutDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "watchdog", "timeout_seconds"), + "Value of /sys/class/watchdog//timeout", + []string{"name"}, nil, + ) + watchdogPretimeoutDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "watchdog", "pretimeout_seconds"), + "Value of /sys/class/watchdog//pretimeout", + []string{"name"}, nil, + ) + watchdogAccessCs0Desc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "watchdog", "access_cs0"), + "Value of /sys/class/watchdog//access_cs0", + []string{"name"}, nil, + ) + watchdogInfoDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "watchdog", "info"), + "Info of /sys/class/watchdog/", + []string{"name", "options", "identity", "state", "status", "pretimeout_governor"}, nil, + ) +) + +func toLabelValue(ptr *string) string { + if ptr == nil { + return "" + } + return *ptr +} + +func (c *watchdogCollector) Update(ch chan<- prometheus.Metric) error { + watchdogClass, err := c.fs.WatchdogClass() + if err != nil { + if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) || errors.Is(err, os.ErrInvalid) { + c.logger.Debug("Could not read watchdog stats", "err", err) + return ErrNoData + } + return err + } + + for _, wd := range watchdogClass { + if wd.Bootstatus != nil { + ch <- prometheus.MustNewConstMetric(watchdogBootstatusDesc, prometheus.GaugeValue, float64(*wd.Bootstatus), wd.Name) + } + if wd.FwVersion != nil { + ch <- prometheus.MustNewConstMetric(watchdogFwVersionDesc, prometheus.GaugeValue, float64(*wd.FwVersion), wd.Name) + } + if wd.Nowayout != nil { + ch <- prometheus.MustNewConstMetric(watchdogNowayoutDesc, prometheus.GaugeValue, float64(*wd.Nowayout), wd.Name) + } + if wd.Timeleft != nil { + ch <- prometheus.MustNewConstMetric(watchdogTimeleftDesc, prometheus.GaugeValue, float64(*wd.Timeleft), wd.Name) + } + if wd.Timeout != nil { + ch <- prometheus.MustNewConstMetric(watchdogTimeoutDesc, prometheus.GaugeValue, float64(*wd.Timeout), wd.Name) + } + if wd.Pretimeout != nil { + ch <- prometheus.MustNewConstMetric(watchdogPretimeoutDesc, prometheus.GaugeValue, float64(*wd.Pretimeout), wd.Name) + } + if wd.AccessCs0 != nil { + ch <- prometheus.MustNewConstMetric(watchdogAccessCs0Desc, prometheus.GaugeValue, float64(*wd.AccessCs0), wd.Name) + } + + ch <- prometheus.MustNewConstMetric(watchdogInfoDesc, prometheus.GaugeValue, 1.0, + wd.Name, toLabelValue(wd.Options), toLabelValue(wd.Identity), toLabelValue(wd.State), toLabelValue(wd.Status), toLabelValue(wd.PretimeoutGovernor)) + } + + return nil +} diff --git a/collector/watchdog_test.go b/collector/watchdog_test.go new file mode 100644 index 0000000000..4fbcff7d53 --- /dev/null +++ b/collector/watchdog_test.go @@ -0,0 +1,91 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file ewcept in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && !nowatchdog + +package collector + +import ( + "fmt" + "io" + "log/slog" + "strings" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" +) + +type testWatchdogCollector struct { + wc Collector +} + +func (c testWatchdogCollector) Collect(ch chan<- prometheus.Metric) { + c.wc.Update(ch) +} + +func (c testWatchdogCollector) Describe(ch chan<- *prometheus.Desc) { + prometheus.DescribeByCollect(c, ch) +} + +func TestWatchdogStats(t *testing.T) { + testcase := `# HELP node_watchdog_access_cs0 Value of /sys/class/watchdog//access_cs0 + # TYPE node_watchdog_access_cs0 gauge + node_watchdog_access_cs0{name="watchdog0"} 0 + # HELP node_watchdog_bootstatus Value of /sys/class/watchdog//bootstatus + # TYPE node_watchdog_bootstatus gauge + node_watchdog_bootstatus{name="watchdog0"} 1 + # HELP node_watchdog_fw_version Value of /sys/class/watchdog//fw_version + # TYPE node_watchdog_fw_version gauge + node_watchdog_fw_version{name="watchdog0"} 2 + # HELP node_watchdog_info Info of /sys/class/watchdog/ + # TYPE node_watchdog_info gauge + node_watchdog_info{identity="",name="watchdog1",options="",pretimeout_governor="",state="",status=""} 1 + node_watchdog_info{identity="Software Watchdog",name="watchdog0",options="0x8380",pretimeout_governor="noop",state="active",status="0x8000"} 1 + # HELP node_watchdog_nowayout Value of /sys/class/watchdog//nowayout + # TYPE node_watchdog_nowayout gauge + node_watchdog_nowayout{name="watchdog0"} 0 + # HELP node_watchdog_pretimeout_seconds Value of /sys/class/watchdog//pretimeout + # TYPE node_watchdog_pretimeout_seconds gauge + node_watchdog_pretimeout_seconds{name="watchdog0"} 120 + # HELP node_watchdog_timeleft_seconds Value of /sys/class/watchdog//timeleft + # TYPE node_watchdog_timeleft_seconds gauge + node_watchdog_timeleft_seconds{name="watchdog0"} 300 + # HELP node_watchdog_timeout_seconds Value of /sys/class/watchdog//timeout + # TYPE node_watchdog_timeout_seconds gauge + node_watchdog_timeout_seconds{name="watchdog0"} 60 + ` + *sysPath = "fixtures/sys" + + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + c, err := NewWatchdogCollector(logger) + if err != nil { + t.Fatal(err) + } + reg := prometheus.NewRegistry() + reg.MustRegister(&testWatchdogCollector{wc: c}) + + sink := make(chan prometheus.Metric) + go func() { + err = c.Update(sink) + if err != nil { + panic(fmt.Errorf("failed to update collector: %s", err)) + } + close(sink) + }() + + err = testutil.GatherAndCompare(reg, strings.NewReader(testcase)) + if err != nil { + t.Fatal(err) + } +} diff --git a/collector/wifi_linux.go b/collector/wifi_linux.go index 076982d712..0d2a6b088b 100644 --- a/collector/wifi_linux.go +++ b/collector/wifi_linux.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nowifi +//go:build !nowifi package collector @@ -19,33 +19,33 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "log/slog" "os" "path/filepath" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/mdlayher/wifi" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" ) type wifiCollector struct { interfaceFrequencyHertz *prometheus.Desc stationInfo *prometheus.Desc - stationConnectedSecondsTotal *prometheus.Desc - stationInactiveSeconds *prometheus.Desc - stationReceiveBitsPerSecond *prometheus.Desc - stationTransmitBitsPerSecond *prometheus.Desc - stationReceiveBytesTotal *prometheus.Desc - stationTransmitBytesTotal *prometheus.Desc - stationSignalDBM *prometheus.Desc - stationTransmitRetriesTotal *prometheus.Desc - stationTransmitFailedTotal *prometheus.Desc - stationBeaconLossTotal *prometheus.Desc - - logger log.Logger + stationConnectedSecondsTotal *prometheus.Desc + stationInactiveSeconds *prometheus.Desc + stationReceiveBitsPerSecond *prometheus.Desc + stationTransmitBitsPerSecond *prometheus.Desc + stationReceiveBytesTotal *prometheus.Desc + stationTransmitBytesTotal *prometheus.Desc + stationSignalDBM *prometheus.Desc + stationTransmitRetriesTotal *prometheus.Desc + stationTransmitFailedTotal *prometheus.Desc + stationBeaconLossTotal *prometheus.Desc + stationTransmittedPacketsTotal *prometheus.Desc + stationReceivedPacketsTotal *prometheus.Desc + + logger *slog.Logger } var ( @@ -67,7 +67,7 @@ type wifiStater interface { } // NewWifiCollector returns a new Collector exposing Wifi statistics. -func NewWifiCollector(logger log.Logger) (Collector, error) { +func NewWifiCollector(logger *slog.Logger) (Collector, error) { const ( subsystem = "wifi" ) @@ -160,6 +160,20 @@ func NewWifiCollector(logger log.Logger) (Collector, error) { labels, nil, ), + + stationTransmittedPacketsTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "station_transmitted_packets_total"), + "The total number of packets transmitted by a station.", + labels, + nil, + ), + + stationReceivedPacketsTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "station_received_packets_total"), + "The total number of packets received by a station.", + labels, + nil, + ), logger: logger, }, nil } @@ -169,11 +183,11 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error { if err != nil { // Cannot access wifi metrics, report no error. if errors.Is(err, os.ErrNotExist) { - level.Debug(c.logger).Log("msg", "wifi collector metrics are not available for this system") + c.logger.Debug("wifi collector metrics are not available for this system") return ErrNoData } if errors.Is(err, os.ErrPermission) { - level.Debug(c.logger).Log("msg", "wifi collector got permission denied when accessing metrics") + c.logger.Debug("wifi collector got permission denied when accessing metrics") return ErrNoData } @@ -192,7 +206,7 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error { continue } - level.Debug(c.logger).Log("msg", "probing wifi device with type", "wifi", ifi.Name, "type", ifi.Type) + c.logger.Debug("probing wifi device with type", "wifi", ifi.Name, "type", ifi.Type) ch <- prometheus.MustNewConstMetric( c.interfaceFrequencyHertz, @@ -210,7 +224,7 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error { case err == nil: c.updateBSSStats(ch, ifi.Name, bss) case errors.Is(err, os.ErrNotExist): - level.Debug(c.logger).Log("msg", "BSS information not found for wifi device", "name", ifi.Name) + c.logger.Debug("BSS information not found for wifi device", "name", ifi.Name) default: return fmt.Errorf("failed to retrieve BSS for device %s: %v", ifi.Name, err) @@ -223,7 +237,7 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error { c.updateStationStats(ch, ifi.Name, station) } case errors.Is(err, os.ErrNotExist): - level.Debug(c.logger).Log("msg", "station information not found for wifi device", "name", ifi.Name) + c.logger.Debug("station information not found for wifi device", "name", ifi.Name) default: return fmt.Errorf("failed to retrieve station info for device %q: %v", ifi.Name, err) @@ -326,6 +340,22 @@ func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device s device, info.HardwareAddr.String(), ) + + ch <- prometheus.MustNewConstMetric( + c.stationTransmittedPacketsTotal, + prometheus.CounterValue, + float64(info.TransmittedPackets), + device, + info.HardwareAddr.String(), + ) + + ch <- prometheus.MustNewConstMetric( + c.stationReceivedPacketsTotal, + prometheus.CounterValue, + float64(info.ReceivedPackets), + device, + info.HardwareAddr.String(), + ) } func mHzToHz(mHz int) float64 { @@ -364,8 +394,8 @@ type mockWifiStater struct { fixtures string } -func (s *mockWifiStater) unmarshalJSONFile(filename string, v interface{}) error { - b, err := ioutil.ReadFile(filepath.Join(s.fixtures, filename)) +func (s *mockWifiStater) unmarshalJSONFile(filename string, v any) error { + b, err := os.ReadFile(filepath.Join(s.fixtures, filename)) if err != nil { return err } diff --git a/collector/xfrm.go b/collector/xfrm.go new file mode 100644 index 0000000000..f69d362c32 --- /dev/null +++ b/collector/xfrm.go @@ -0,0 +1,227 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noxfrm + +package collector + +import ( + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +type xfrmCollector struct { + fs procfs.FS + logger *slog.Logger +} + +func init() { + registerCollector("xfrm", defaultDisabled, NewXfrmCollector) +} + +// NewXfrmCollector returns a new Collector exposing XFRM stats. +func NewXfrmCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &xfrmCollector{ + fs: fs, + logger: logger, + }, nil +} + +var ( + xfrmInErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_error_packets_total"), + "All errors not matched by other", + nil, nil, + ) + xfrmInBufferErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_buffer_error_packets_total"), + "No buffer is left", + nil, nil, + ) + xfrmInHdrErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_hdr_error_packets_total"), + "Header error", + nil, nil, + ) + xfrmInNoStatesDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_no_states_packets_total"), + "No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong", + nil, nil, + ) + xfrmInStateProtoErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_state_proto_error_packets_total"), + "Transformation protocol specific error e.g. SA key is wrong", + nil, nil, + ) + xfrmInStateModeErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_state_mode_error_packets_total"), + "Transformation mode specific error", + nil, nil, + ) + xfrmInStateSeqErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_state_seq_error_packets_total"), + "Sequence error i.e. Sequence number is out of window", + nil, nil, + ) + xfrmInStateExpiredDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_state_expired_packets_total"), + "State is expired", + nil, nil, + ) + xfrmInStateMismatchDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_state_mismatch_packets_total"), + "State has mismatch option e.g. UDP encapsulation type is mismatch", + nil, nil, + ) + xfrmInStateInvalidDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_state_invalid_packets_total"), + "State is invalid", + nil, nil, + ) + xfrmInTmplMismatchDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_tmpl_mismatch_packets_total"), + "No matching template for states e.g. Inbound SAs are correct but SP rule is wrong", + nil, nil, + ) + xfrmInNoPolsDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_no_pols_packets_total"), + "No policy is found for states e.g. Inbound SAs are correct but no SP is found", + nil, nil, + ) + xfrmInPolBlockDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_pol_block_packets_total"), + "Policy discards", + nil, nil, + ) + xfrmInPolErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "in_pol_error_packets_total"), + "Policy error", + nil, nil, + ) + xfrmOutErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_error_packets_total"), + "All errors which is not matched others", + nil, nil, + ) + xfrmOutBundleGenErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_bundle_gen_error_packets_total"), + "Bundle generation error", + nil, nil, + ) + xfrmOutBundleCheckErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_bundle_check_error_packets_total"), + "Bundle check error", + nil, nil, + ) + xfrmOutNoStatesDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_no_states_packets_total"), + "No state is found", + nil, nil, + ) + xfrmOutStateProtoErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_state_proto_error_packets_total"), + "Transformation protocol specific error", + nil, nil, + ) + xfrmOutStateModeErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_state_mode_error_packets_total"), + "Transformation mode specific error", + nil, nil, + ) + xfrmOutStateSeqErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_state_seq_error_packets_total"), + "Sequence error i.e. Sequence number overflow", + nil, nil, + ) + xfrmOutStateExpiredDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_state_expired_packets_total"), + "State is expired", + nil, nil, + ) + xfrmOutPolBlockDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_pol_block_packets_total"), + "Policy discards", + nil, nil, + ) + xfrmOutPolDeadDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_pol_dead_packets_total"), + "Policy is dead", + nil, nil, + ) + xfrmOutPolErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_pol_error_packets_total"), + "Policy error", + nil, nil, + ) + xfrmFwdHdrErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "fwd_hdr_error_packets_total"), + "Forward routing of a packet is not allowed", + nil, nil, + ) + xfrmOutStateInvalidDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "out_state_invalid_packets_total"), + "State is invalid, perhaps expired", + nil, nil, + ) + xfrmAcquireErrorDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "xfrm", "acquire_error_packets_total"), + "State hasn’t been fully acquired before use", + nil, nil, + ) +) + +func (c *xfrmCollector) Update(ch chan<- prometheus.Metric) error { + stat, err := c.fs.NewXfrmStat() + if err != nil { + return err + } + + ch <- prometheus.MustNewConstMetric(xfrmInErrorDesc, prometheus.CounterValue, float64(stat.XfrmInError)) + ch <- prometheus.MustNewConstMetric(xfrmInBufferErrorDesc, prometheus.CounterValue, float64(stat.XfrmInBufferError)) + ch <- prometheus.MustNewConstMetric(xfrmInHdrErrorDesc, prometheus.CounterValue, float64(stat.XfrmInHdrError)) + ch <- prometheus.MustNewConstMetric(xfrmInNoStatesDesc, prometheus.CounterValue, float64(stat.XfrmInNoStates)) + ch <- prometheus.MustNewConstMetric(xfrmInStateProtoErrorDesc, prometheus.CounterValue, float64(stat.XfrmInStateProtoError)) + ch <- prometheus.MustNewConstMetric(xfrmInStateModeErrorDesc, prometheus.CounterValue, float64(stat.XfrmInStateModeError)) + ch <- prometheus.MustNewConstMetric(xfrmInStateSeqErrorDesc, prometheus.CounterValue, float64(stat.XfrmInStateSeqError)) + ch <- prometheus.MustNewConstMetric(xfrmInStateExpiredDesc, prometheus.CounterValue, float64(stat.XfrmInStateExpired)) + ch <- prometheus.MustNewConstMetric(xfrmInStateMismatchDesc, prometheus.CounterValue, float64(stat.XfrmInStateMismatch)) + ch <- prometheus.MustNewConstMetric(xfrmInStateInvalidDesc, prometheus.CounterValue, float64(stat.XfrmInStateInvalid)) + ch <- prometheus.MustNewConstMetric(xfrmInTmplMismatchDesc, prometheus.CounterValue, float64(stat.XfrmInTmplMismatch)) + ch <- prometheus.MustNewConstMetric(xfrmInNoPolsDesc, prometheus.CounterValue, float64(stat.XfrmInNoPols)) + ch <- prometheus.MustNewConstMetric(xfrmInPolBlockDesc, prometheus.CounterValue, float64(stat.XfrmInPolBlock)) + ch <- prometheus.MustNewConstMetric(xfrmInPolErrorDesc, prometheus.CounterValue, float64(stat.XfrmInPolError)) + ch <- prometheus.MustNewConstMetric(xfrmOutErrorDesc, prometheus.CounterValue, float64(stat.XfrmOutError)) + ch <- prometheus.MustNewConstMetric(xfrmOutBundleGenErrorDesc, prometheus.CounterValue, float64(stat.XfrmOutBundleGenError)) + ch <- prometheus.MustNewConstMetric(xfrmOutBundleCheckErrorDesc, prometheus.CounterValue, float64(stat.XfrmOutBundleCheckError)) + ch <- prometheus.MustNewConstMetric(xfrmOutNoStatesDesc, prometheus.CounterValue, float64(stat.XfrmOutNoStates)) + ch <- prometheus.MustNewConstMetric(xfrmOutStateProtoErrorDesc, prometheus.CounterValue, float64(stat.XfrmOutStateProtoError)) + ch <- prometheus.MustNewConstMetric(xfrmOutStateModeErrorDesc, prometheus.CounterValue, float64(stat.XfrmOutStateModeError)) + ch <- prometheus.MustNewConstMetric(xfrmOutStateSeqErrorDesc, prometheus.CounterValue, float64(stat.XfrmOutStateSeqError)) + ch <- prometheus.MustNewConstMetric(xfrmOutStateExpiredDesc, prometheus.CounterValue, float64(stat.XfrmOutStateExpired)) + ch <- prometheus.MustNewConstMetric(xfrmOutPolBlockDesc, prometheus.CounterValue, float64(stat.XfrmOutPolBlock)) + ch <- prometheus.MustNewConstMetric(xfrmOutPolDeadDesc, prometheus.CounterValue, float64(stat.XfrmOutPolDead)) + ch <- prometheus.MustNewConstMetric(xfrmOutPolErrorDesc, prometheus.CounterValue, float64(stat.XfrmOutPolError)) + ch <- prometheus.MustNewConstMetric(xfrmFwdHdrErrorDesc, prometheus.CounterValue, float64(stat.XfrmFwdHdrError)) + ch <- prometheus.MustNewConstMetric(xfrmOutStateInvalidDesc, prometheus.CounterValue, float64(stat.XfrmOutStateInvalid)) + ch <- prometheus.MustNewConstMetric(xfrmAcquireErrorDesc, prometheus.CounterValue, float64(stat.XfrmAcquireError)) + + return err +} diff --git a/collector/xfrm_test.go b/collector/xfrm_test.go new file mode 100644 index 0000000000..9598e113bf --- /dev/null +++ b/collector/xfrm_test.go @@ -0,0 +1,150 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noxfrm + +package collector + +import ( + "fmt" + "io" + "log/slog" + "strings" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" +) + +type testXfrmCollector struct { + xc Collector +} + +func (c testXfrmCollector) Collect(ch chan<- prometheus.Metric) { + c.xc.Update(ch) +} + +func (c testXfrmCollector) Describe(ch chan<- *prometheus.Desc) { + prometheus.DescribeByCollect(c, ch) +} + +func TestXfrmStats(t *testing.T) { + testcase := `# HELP node_xfrm_acquire_error_packets_total State hasn’t been fully acquired before use + # TYPE node_xfrm_acquire_error_packets_total counter + node_xfrm_acquire_error_packets_total 24532 + # HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed + # TYPE node_xfrm_fwd_hdr_error_packets_total counter + node_xfrm_fwd_hdr_error_packets_total 6654 + # HELP node_xfrm_in_buffer_error_packets_total No buffer is left + # TYPE node_xfrm_in_buffer_error_packets_total counter + node_xfrm_in_buffer_error_packets_total 2 + # HELP node_xfrm_in_error_packets_total All errors not matched by other + # TYPE node_xfrm_in_error_packets_total counter + node_xfrm_in_error_packets_total 1 + # HELP node_xfrm_in_hdr_error_packets_total Header error + # TYPE node_xfrm_in_hdr_error_packets_total counter + node_xfrm_in_hdr_error_packets_total 4 + # HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found + # TYPE node_xfrm_in_no_pols_packets_total counter + node_xfrm_in_no_pols_packets_total 65432 + # HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong + # TYPE node_xfrm_in_no_states_packets_total counter + node_xfrm_in_no_states_packets_total 3 + # HELP node_xfrm_in_pol_block_packets_total Policy discards + # TYPE node_xfrm_in_pol_block_packets_total counter + node_xfrm_in_pol_block_packets_total 100 + # HELP node_xfrm_in_pol_error_packets_total Policy error + # TYPE node_xfrm_in_pol_error_packets_total counter + node_xfrm_in_pol_error_packets_total 10000 + # HELP node_xfrm_in_state_expired_packets_total State is expired + # TYPE node_xfrm_in_state_expired_packets_total counter + node_xfrm_in_state_expired_packets_total 7 + # HELP node_xfrm_in_state_invalid_packets_total State is invalid + # TYPE node_xfrm_in_state_invalid_packets_total counter + node_xfrm_in_state_invalid_packets_total 55555 + # HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch + # TYPE node_xfrm_in_state_mismatch_packets_total counter + node_xfrm_in_state_mismatch_packets_total 23451 + # HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error + # TYPE node_xfrm_in_state_mode_error_packets_total counter + node_xfrm_in_state_mode_error_packets_total 100 + # HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong + # TYPE node_xfrm_in_state_proto_error_packets_total counter + node_xfrm_in_state_proto_error_packets_total 40 + # HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window + # TYPE node_xfrm_in_state_seq_error_packets_total counter + node_xfrm_in_state_seq_error_packets_total 6000 + # HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong + # TYPE node_xfrm_in_tmpl_mismatch_packets_total counter + node_xfrm_in_tmpl_mismatch_packets_total 51 + # HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error + # TYPE node_xfrm_out_bundle_check_error_packets_total counter + node_xfrm_out_bundle_check_error_packets_total 555 + # HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error + # TYPE node_xfrm_out_bundle_gen_error_packets_total counter + node_xfrm_out_bundle_gen_error_packets_total 43321 + # HELP node_xfrm_out_error_packets_total All errors which is not matched others + # TYPE node_xfrm_out_error_packets_total counter + node_xfrm_out_error_packets_total 1e+06 + # HELP node_xfrm_out_no_states_packets_total No state is found + # TYPE node_xfrm_out_no_states_packets_total counter + node_xfrm_out_no_states_packets_total 869 + # HELP node_xfrm_out_pol_block_packets_total Policy discards + # TYPE node_xfrm_out_pol_block_packets_total counter + node_xfrm_out_pol_block_packets_total 43456 + # HELP node_xfrm_out_pol_dead_packets_total Policy is dead + # TYPE node_xfrm_out_pol_dead_packets_total counter + node_xfrm_out_pol_dead_packets_total 7656 + # HELP node_xfrm_out_pol_error_packets_total Policy error + # TYPE node_xfrm_out_pol_error_packets_total counter + node_xfrm_out_pol_error_packets_total 1454 + # HELP node_xfrm_out_state_expired_packets_total State is expired + # TYPE node_xfrm_out_state_expired_packets_total counter + node_xfrm_out_state_expired_packets_total 565 + # HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired + # TYPE node_xfrm_out_state_invalid_packets_total counter + node_xfrm_out_state_invalid_packets_total 28765 + # HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error + # TYPE node_xfrm_out_state_mode_error_packets_total counter + node_xfrm_out_state_mode_error_packets_total 8 + # HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error + # TYPE node_xfrm_out_state_proto_error_packets_total counter + node_xfrm_out_state_proto_error_packets_total 4542 + # HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow + # TYPE node_xfrm_out_state_seq_error_packets_total counter + node_xfrm_out_state_seq_error_packets_total 543 + ` + *procPath = "fixtures/proc" + + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + c, err := NewXfrmCollector(logger) + if err != nil { + t.Fatal(err) + } + reg := prometheus.NewRegistry() + reg.MustRegister(&testXfrmCollector{xc: c}) + + sink := make(chan prometheus.Metric) + go func() { + err = c.Update(sink) + if err != nil { + panic(fmt.Errorf("failed to update collector: %s", err)) + } + close(sink) + }() + + err = testutil.GatherAndCompare(reg, strings.NewReader(testcase)) + if err != nil { + t.Fatal(err) + } +} diff --git a/collector/xfs_linux.go b/collector/xfs_linux.go index 36dfff5000..b5157849bc 100644 --- a/collector/xfs_linux.go +++ b/collector/xfs_linux.go @@ -11,14 +11,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !noxfs +//go:build !noxfs package collector import ( "fmt" + "log/slog" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/xfs" ) @@ -26,7 +26,7 @@ import ( // An xfsCollector is a Collector which gathers metrics from XFS filesystems. type xfsCollector struct { fs xfs.FS - logger log.Logger + logger *slog.Logger } func init() { @@ -34,7 +34,7 @@ func init() { } // NewXFSCollector returns a new Collector exposing XFS statistics. -func NewXFSCollector(logger log.Logger) (Collector, error) { +func NewXFSCollector(logger *slog.Logger) (Collector, error) { fs, err := xfs.NewFS(*procPath, *sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) @@ -199,6 +199,41 @@ func (c *xfsCollector) updateXFSStats(ch chan<- prometheus.Metric, s *xfs.Stats) desc: "Number of times the directory getdents operation was performed for a filesystem.", value: float64(s.DirectoryOperation.Getdents), }, + { + name: "inode_operation_attempts_total", + desc: "Number of times the OS looked for an XFS inode in the inode cache.", + value: float64(s.InodeOperation.Attempts), + }, + { + name: "inode_operation_found_total", + desc: "Number of times the OS looked for and found an XFS inode in the inode cache.", + value: float64(s.InodeOperation.Found), + }, + { + name: "inode_operation_recycled_total", + desc: "Number of times the OS found an XFS inode in the cache, but could not use it as it was being recycled.", + value: float64(s.InodeOperation.Recycle), + }, + { + name: "inode_operation_missed_total", + desc: "Number of times the OS looked for an XFS inode in the cache, but did not find it.", + value: float64(s.InodeOperation.Missed), + }, + { + name: "inode_operation_duplicates_total", + desc: "Number of times the OS tried to add a missing XFS inode to the inode cache, but found it had already been added by another process.", + value: float64(s.InodeOperation.Duplicate), + }, + { + name: "inode_operation_reclaims_total", + desc: "Number of times the OS reclaimed an XFS inode from the inode cache to free memory for another purpose.", + value: float64(s.InodeOperation.Reclaims), + }, + { + name: "inode_operation_attribute_changes_total", + desc: "Number of times the OS explicitly changed the attributes of an XFS inode.", + value: float64(s.InodeOperation.AttributeChange), + }, { name: "read_calls_total", desc: "Number of read(2) system calls made to files in a filesystem.", diff --git a/collector/zfs.go b/collector/zfs.go deleted file mode 100644 index b530e350bd..0000000000 --- a/collector/zfs.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux -// +build !nozfs - -package collector - -import ( - "errors" - "strings" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/prometheus/client_golang/prometheus" -) - -var errZFSNotAvailable = errors.New("ZFS / ZFS statistics are not available") - -type zfsSysctl string - -func init() { - registerCollector("zfs", defaultEnabled, NewZFSCollector) -} - -type zfsCollector struct { - linuxProcpathBase string - linuxZpoolIoPath string - linuxZpoolObjsetPath string - linuxPathMap map[string]string - logger log.Logger -} - -// NewZFSCollector returns a new Collector exposing ZFS statistics. -func NewZFSCollector(logger log.Logger) (Collector, error) { - return &zfsCollector{ - linuxProcpathBase: "spl/kstat/zfs", - linuxZpoolIoPath: "/*/io", - linuxZpoolObjsetPath: "/*/objset-*", - linuxPathMap: map[string]string{ - "zfs_abd": "abdstats", - "zfs_arc": "arcstats", - "zfs_dbuf": "dbuf_stats", - "zfs_dmu_tx": "dmu_tx", - "zfs_dnode": "dnodestats", - "zfs_fm": "fm", - "zfs_vdev_cache": "vdev_cache_stats", // vdev_cache is deprecated - "zfs_vdev_mirror": "vdev_mirror_stats", - "zfs_xuio": "xuio_stats", // no known consumers of the XUIO interface on Linux exist - "zfs_zfetch": "zfetchstats", - "zfs_zil": "zil", - }, - logger: logger, - }, nil -} - -func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error { - for subsystem := range c.linuxPathMap { - if err := c.updateZfsStats(subsystem, ch); err != nil { - if err == errZFSNotAvailable { - level.Debug(c.logger).Log("err", err) - // ZFS /proc files are added as new features to ZFS arrive, it is ok to continue - continue - } - return err - } - } - - // Pool stats - return c.updatePoolStats(ch) -} - -func (s zfsSysctl) metricName() string { - parts := strings.Split(string(s), ".") - return strings.Replace(parts[len(parts)-1], "-", "_", -1) -} - -func (c *zfsCollector) constSysctlMetric(subsystem string, sysctl zfsSysctl, value uint64) prometheus.Metric { - metricName := sysctl.metricName() - - return prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, metricName), - string(sysctl), - nil, - nil, - ), - prometheus.UntypedValue, - float64(value), - ) -} - -func (c *zfsCollector) constPoolMetric(poolName string, sysctl zfsSysctl, value uint64) prometheus.Metric { - metricName := sysctl.metricName() - - return prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(namespace, "zfs_zpool", metricName), - string(sysctl), - []string{"zpool"}, - nil, - ), - prometheus.UntypedValue, - float64(value), - poolName, - ) -} - -func (c *zfsCollector) constPoolObjsetMetric(poolName string, datasetName string, sysctl zfsSysctl, value uint64) prometheus.Metric { - metricName := sysctl.metricName() - - return prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(namespace, "zfs_zpool_dataset", metricName), - string(sysctl), - []string{"zpool", "dataset"}, - nil, - ), - prometheus.UntypedValue, - float64(value), - poolName, - datasetName, - ) -} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go b/collector/zfs_common.go similarity index 74% rename from vendor/github.com/prometheus/procfs/cpuinfo_arm64.go rename to collector/zfs_common.go index 4f5d172a35..f939ae9933 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go +++ b/collector/zfs_common.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,9 +11,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build arm64 +//go:build !nozfs && (freebsd || linux || solaris) -package procfs +package collector -var parseCPUInfo = parseCPUInfoARM +func init() { + registerCollector("zfs", defaultEnabled, NewZFSCollector) +} diff --git a/collector/zfs_freebsd.go b/collector/zfs_freebsd.go index 2f200967f1..9354c96d15 100644 --- a/collector/zfs_freebsd.go +++ b/collector/zfs_freebsd.go @@ -11,31 +11,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nozfs +//go:build !nozfs package collector import ( - "fmt" + "log/slog" - "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) type zfsCollector struct { sysctls []bsdSysctl - logger log.Logger + logger *slog.Logger } const ( zfsCollectorSubsystem = "zfs" ) -func init() { - registerCollector("zfs", defaultEnabled, NewZfsCollector) -} - -func NewZfsCollector(logger log.Logger) (Collector, error) { +func NewZFSCollector(logger *slog.Logger) (Collector, error) { return &zfsCollector{ sysctls: []bsdSysctl{ { @@ -213,6 +208,7 @@ func NewZfsCollector(logger log.Logger) (Collector, error) { dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, }, + // when FreeBSD 14.0+, `meta/pm/pd` install of `p`. { name: "arcstats_p_bytes", description: "ZFS ARC MRU target size", @@ -220,6 +216,27 @@ func NewZfsCollector(logger log.Logger) (Collector, error) { dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, }, + { + name: "arcstats_meta_bytes", + description: "ZFS ARC metadata target frac ", + mib: "kstat.zfs.misc.arcstats.meta", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_pd_bytes", + description: "ZFS ARC data MRU target frac", + mib: "kstat.zfs.misc.arcstats.pd", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_pm_bytes", + description: "ZFS ARC meta MRU target frac", + mib: "kstat.zfs.misc.arcstats.pm", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, { name: "arcstats_size_bytes", description: "ZFS ARC size", @@ -250,7 +267,9 @@ func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error { for _, m := range c.sysctls { v, err := m.Value() if err != nil { - return fmt.Errorf("couldn't get sysctl: %w", err) + // debug logging + c.logger.Debug(m.name, "mib", m.mib, "couldn't get sysctl:", err) + continue } ch <- prometheus.MustNewConstMetric( diff --git a/collector/zfs_linux.go b/collector/zfs_linux.go index f55d98a5d9..9e968f73fe 100644 --- a/collector/zfs_linux.go +++ b/collector/zfs_linux.go @@ -11,20 +11,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nozfs +//go:build !nozfs package collector import ( "bufio" + "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strconv" "strings" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" ) @@ -34,20 +35,82 @@ const ( // kstatDataChar = "0" // kstatDataInt32 = "1" // kstatDataUint32 = "2" - // kstatDataInt64 = "3" + kstatDataInt64 = "3" kstatDataUint64 = "4" // kstatDataLong = "5" // kstatDataUlong = "6" // kstatDataString = "7" ) +var ( + errZFSNotAvailable = errors.New("ZFS / ZFS statistics are not available") + + zfsPoolStatesName = [...]string{"online", "degraded", "faulted", "offline", "removed", "unavail", "suspended"} +) + +type zfsCollector struct { + linuxProcpathBase string + linuxZpoolIoPath string + linuxZpoolObjsetPath string + linuxZpoolStatePath string + linuxPathMap map[string]string + logger *slog.Logger +} + +// NewZFSCollector returns a new Collector exposing ZFS statistics. +func NewZFSCollector(logger *slog.Logger) (Collector, error) { + return &zfsCollector{ + linuxProcpathBase: "spl/kstat/zfs", + linuxZpoolIoPath: "/*/io", + linuxZpoolObjsetPath: "/*/objset-*", + linuxZpoolStatePath: "/*/state", + linuxPathMap: map[string]string{ + "zfs_abd": "abdstats", + "zfs_arc": "arcstats", + "zfs_dbuf": "dbufstats", + "zfs_dmu_tx": "dmu_tx", + "zfs_dnode": "dnodestats", + "zfs_fm": "fm", + "zfs_vdev_cache": "vdev_cache_stats", // vdev_cache is deprecated + "zfs_vdev_mirror": "vdev_mirror_stats", + "zfs_xuio": "xuio_stats", // no known consumers of the XUIO interface on Linux exist + "zfs_zfetch": "zfetchstats", + "zfs_zil": "zil", + }, + logger: logger, + }, nil +} + +func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error { + if _, err := c.openProcFile(c.linuxProcpathBase); err != nil { + if err == errZFSNotAvailable { + c.logger.Debug(err.Error()) + return ErrNoData + } + } + + for subsystem := range c.linuxPathMap { + if err := c.updateZfsStats(subsystem, ch); err != nil { + if err == errZFSNotAvailable { + c.logger.Debug(err.Error()) + // ZFS /proc files are added as new features to ZFS arrive, it is ok to continue + continue + } + return err + } + } + + // Pool stats + return c.updatePoolStats(ch) +} + func (c *zfsCollector) openProcFile(path string) (*os.File, error) { file, err := os.Open(procFilePath(path)) if err != nil { // file not found error can occur if: // 1. zfs module is not loaded // 2. zfs version does not have the feature with metrics -- ok to ignore - level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", procFilePath(path)) + c.logger.Debug("Cannot open file for reading", "path", procFilePath(path)) return nil, errZFSNotAvailable } return file, nil @@ -60,8 +123,15 @@ func (c *zfsCollector) updateZfsStats(subsystem string, ch chan<- prometheus.Met } defer file.Close() - return c.parseProcfsFile(file, c.linuxPathMap[subsystem], func(s zfsSysctl, v uint64) { - ch <- c.constSysctlMetric(subsystem, s, v) + return c.parseProcfsFile(file, c.linuxPathMap[subsystem], func(s zfsSysctl, v any) { + var valueAsFloat64 float64 + switch value := v.(type) { + case int64: + valueAsFloat64 = float64(value) + case uint64: + valueAsFloat64 = float64(value) + } + ch <- c.constSysctlMetric(subsystem, s, valueAsFloat64) }) } @@ -71,15 +141,11 @@ func (c *zfsCollector) updatePoolStats(ch chan<- prometheus.Metric) error { return err } - if zpoolPaths == nil { - return nil - } - for _, zpoolPath := range zpoolPaths { file, err := os.Open(zpoolPath) if err != nil { // this file should exist, but there is a race where an exporting pool can remove the files -- ok to ignore - level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", zpoolPath) + c.logger.Debug("Cannot open file for reading", "path", zpoolPath) return errZFSNotAvailable } @@ -97,15 +163,11 @@ func (c *zfsCollector) updatePoolStats(ch chan<- prometheus.Metric) error { return err } - if zpoolObjsetPaths == nil { - return nil - } - for _, zpoolPath := range zpoolObjsetPaths { file, err := os.Open(zpoolPath) if err != nil { - // this file should exist, but there is a race where an exporting pool can remove the files -- ok to ignore - level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", zpoolPath) + // This file should exist, but there is a race where an exporting pool can remove the files. Ok to ignore. + c.logger.Debug("Cannot open file for reading", "path", zpoolPath) return errZFSNotAvailable } @@ -117,10 +179,39 @@ func (c *zfsCollector) updatePoolStats(ch chan<- prometheus.Metric) error { return err } } + + zpoolStatePaths, err := filepath.Glob(procFilePath(filepath.Join(c.linuxProcpathBase, c.linuxZpoolStatePath))) + if err != nil { + return err + } + + if zpoolStatePaths == nil { + c.logger.Debug("No pool state files found") + return nil + } + + for _, zpoolPath := range zpoolStatePaths { + file, err := os.Open(zpoolPath) + if err != nil { + // This file should exist, but there is a race where an exporting pool can remove the files. Ok to ignore. + c.logger.Debug("Cannot open file for reading", "path", zpoolPath) + return errZFSNotAvailable + } + + err = c.parsePoolStateFile(file, zpoolPath, func(poolName string, stateName string, isActive uint64) { + ch <- c.constPoolStateMetric(poolName, stateName, isActive) + }) + + file.Close() + if err != nil { + return err + } + } + return nil } -func (c *zfsCollector) parseProcfsFile(reader io.Reader, fmtExt string, handler func(zfsSysctl, uint64)) error { +func (c *zfsCollector) parseProcfsFile(reader io.Reader, fmtExt string, handler func(zfsSysctl, any)) error { scanner := bufio.NewScanner(reader) parseLine := false @@ -139,11 +230,18 @@ func (c *zfsCollector) parseProcfsFile(reader io.Reader, fmtExt string, handler // kstat data type (column 2) should be KSTAT_DATA_UINT64, otherwise ignore // TODO: when other KSTAT_DATA_* types arrive, much of this will need to be restructured - if parts[1] == kstatDataUint64 { - key := fmt.Sprintf("kstat.zfs.misc.%s.%s", fmtExt, parts[0]) + key := fmt.Sprintf("kstat.zfs.misc.%s.%s", fmtExt, parts[0]) + switch parts[1] { + case kstatDataUint64: value, err := strconv.ParseUint(parts[2], 10, 64) if err != nil { - return fmt.Errorf("could not parse expected integer value for %q", key) + return fmt.Errorf("could not parse expected unsigned integer value for %q: %w", key, err) + } + handler(zfsSysctl(key), value) + case kstatDataInt64: + value, err := strconv.ParseInt(parts[2], 10, 64) + if err != nil { + return fmt.Errorf("could not parse expected signed integer value for %q: %w", key, err) } handler(zfsSysctl(key), value) } @@ -202,7 +300,8 @@ func (c *zfsCollector) parsePoolObjsetFile(reader io.Reader, zpoolPath string, h parseLine := false var zpoolName, datasetName string for scanner.Scan() { - parts := strings.Fields(scanner.Text()) + line := scanner.Text() + parts := strings.Fields(line) if !parseLine && len(parts) == 3 && parts[0] == "name" && parts[1] == "type" && parts[2] == "data" { parseLine = true @@ -216,7 +315,7 @@ func (c *zfsCollector) parsePoolObjsetFile(reader io.Reader, zpoolPath string, h zpoolPathElements := strings.Split(zpoolPath, "/") pathLen := len(zpoolPathElements) zpoolName = zpoolPathElements[pathLen-2] - datasetName = parts[2] + datasetName = line[strings.Index(line, parts[2]):] continue } @@ -235,3 +334,105 @@ func (c *zfsCollector) parsePoolObjsetFile(reader io.Reader, zpoolPath string, h return scanner.Err() } + +func (c *zfsCollector) parsePoolStateFile(reader io.Reader, zpoolPath string, handler func(string, string, uint64)) error { + scanner := bufio.NewScanner(reader) + scanner.Scan() + + actualStateName, err := scanner.Text(), scanner.Err() + if err != nil { + return err + } + + actualStateName = strings.ToLower(actualStateName) + + zpoolPathElements := strings.Split(zpoolPath, "/") + pathLen := len(zpoolPathElements) + if pathLen < 2 { + return fmt.Errorf("zpool path did not return at least two elements") + } + + zpoolName := zpoolPathElements[pathLen-2] + + for _, stateName := range zfsPoolStatesName { + isActive := uint64(0) + + if actualStateName == stateName { + isActive = 1 + } + + handler(zpoolName, stateName, isActive) + } + + return nil +} + +func (c *zfsCollector) constSysctlMetric(subsystem string, sysctl zfsSysctl, value float64) prometheus.Metric { + metricName := sysctl.metricName() + + return prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, metricName), + string(sysctl), + nil, + nil, + ), + prometheus.UntypedValue, + value, + ) +} + +func (c *zfsCollector) constPoolMetric(poolName string, sysctl zfsSysctl, value uint64) prometheus.Metric { + metricName := sysctl.metricName() + + return prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, "zfs_zpool", metricName), + string(sysctl), + []string{"zpool"}, + nil, + ), + prometheus.UntypedValue, + float64(value), + poolName, + ) +} + +func (c *zfsCollector) constPoolObjsetMetric(poolName string, datasetName string, sysctl zfsSysctl, value uint64) prometheus.Metric { + metricName := sysctl.metricName() + + return prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, "zfs_zpool_dataset", metricName), + string(sysctl), + []string{"zpool", "dataset"}, + nil, + ), + prometheus.UntypedValue, + float64(value), + poolName, + datasetName, + ) +} + +func (c *zfsCollector) constPoolStateMetric(poolName string, stateName string, isActive uint64) prometheus.Metric { + return prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, "zfs_zpool", "state"), + "kstat.zfs.misc.state", + []string{"zpool", "state"}, + nil, + ), + prometheus.GaugeValue, + float64(isActive), + poolName, + stateName, + ) +} + +type zfsSysctl string + +func (s zfsSysctl) metricName() string { + parts := strings.Split(string(s), ".") + return strings.ReplaceAll(parts[len(parts)-1], "-", "_") +} diff --git a/collector/zfs_linux_test.go b/collector/zfs_linux_test.go index 29d28e8f47..182a324083 100644 --- a/collector/zfs_linux_test.go +++ b/collector/zfs_linux_test.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nozfs + package collector import ( @@ -32,24 +34,25 @@ func TestArcstatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(arcstatsFile, "arcstats", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(arcstatsFile, "arcstats", func(s zfsSysctl, v any) { - if s != zfsSysctl("kstat.zfs.misc.arcstats.hits") { + if s == zfsSysctl("kstat.zfs.misc.arcstats.hits") { + if v.(uint64) != 8772612 { + t.Fatalf("Incorrect value parsed from procfs data") + } + } else if s == zfsSysctl("kstat.zfs.misc.arcstats.memory_available_bytes") { + if v.(int64) != -922337203685477580 { + t.Fatalf("Incorrect value parsed from procfs data") + } + } else { return } handlerCalled = true - - if v != uint64(8772612) { - t.Fatalf("Incorrect value parsed from procfs data") - } - }) - if err != nil { t.Fatal(err) } - if !handlerCalled { t.Fatal("Arcstats parsing handler was not called for some expected sysctls") } @@ -68,7 +71,7 @@ func TestZfetchstatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(zfetchstatsFile, "zfetchstats", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(zfetchstatsFile, "zfetchstats", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.zfetchstats.hits") { return @@ -76,7 +79,7 @@ func TestZfetchstatsParsing(t *testing.T) { handlerCalled = true - if v != uint64(7067992) { + if v.(uint64) != 7067992 { t.Fatalf("Incorrect value parsed from procfs data") } @@ -104,7 +107,7 @@ func TestZilParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(zilFile, "zil", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(zilFile, "zil", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.zil.zil_commit_count") { return @@ -112,7 +115,7 @@ func TestZilParsing(t *testing.T) { handlerCalled = true - if v != uint64(10) { + if v.(uint64) != 10 { t.Fatalf("Incorrect value parsed from procfs data") } @@ -140,7 +143,7 @@ func TestVdevCacheStatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(vdevCacheStatsFile, "vdev_cache_stats", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(vdevCacheStatsFile, "vdev_cache_stats", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.vdev_cache_stats.delegations") { return @@ -148,7 +151,7 @@ func TestVdevCacheStatsParsing(t *testing.T) { handlerCalled = true - if v != uint64(40) { + if v.(uint64) != 40 { t.Fatalf("Incorrect value parsed from procfs data") } @@ -176,7 +179,7 @@ func TestXuioStatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(xuioStatsFile, "xuio_stats", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(xuioStatsFile, "xuio_stats", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.xuio_stats.onloan_read_buf") { return @@ -184,7 +187,7 @@ func TestXuioStatsParsing(t *testing.T) { handlerCalled = true - if v != uint64(32) { + if v.(uint64) != 32 { t.Fatalf("Incorrect value parsed from procfs data") } @@ -212,7 +215,7 @@ func TestFmParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(fmFile, "fm", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(fmFile, "fm", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.fm.erpt-dropped") { return @@ -220,7 +223,7 @@ func TestFmParsing(t *testing.T) { handlerCalled = true - if v != uint64(18) { + if v.(uint64) != 18 { t.Fatalf("Incorrect value parsed from procfs data") } @@ -248,7 +251,7 @@ func TestDmuTxParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(dmuTxFile, "dmu_tx", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(dmuTxFile, "dmu_tx", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.dmu_tx.dmu_tx_assigned") { return @@ -256,7 +259,7 @@ func TestDmuTxParsing(t *testing.T) { handlerCalled = true - if v != uint64(3532844) { + if v.(uint64) != 3532844 { t.Fatalf("Incorrect value parsed from procfs data") } @@ -296,7 +299,7 @@ func TestZpoolParsing(t *testing.T) { handlerCalled = true - if v != uint64(1884160) && v != uint64(2826240) { + if v != 1884160 && v != 2826240 { t.Fatalf("Incorrect value parsed from procfs data %v", v) } @@ -311,6 +314,55 @@ func TestZpoolParsing(t *testing.T) { } } +func TestZpoolObjsetParsingWithSpace(t *testing.T) { + tests := []struct { + path string + expectedDataset string + }{ + { + path: "fixtures/proc/spl/kstat/zfs/pool1/objset-1", + expectedDataset: "pool1", + }, + { + path: "fixtures/proc/spl/kstat/zfs/pool1/objset-2", + expectedDataset: "pool1/dataset1", + }, + { + path: "fixtures/proc/spl/kstat/zfs/pool3/objset-1", + expectedDataset: "pool3", + }, + { + path: "fixtures/proc/spl/kstat/zfs/pool3/objset-2", + expectedDataset: "pool3/dataset with space", + }, + } + + c := zfsCollector{} + + var handlerCalled bool + for _, test := range tests { + file, err := os.Open(test.path) + if err != nil { + t.Fatal(err) + } + + handlerCalled = false + err = c.parsePoolObjsetFile(file, test.path, func(poolName string, datasetName string, s zfsSysctl, v uint64) { + handlerCalled = true + if test.expectedDataset != datasetName { + t.Fatalf("Incorrectly parsed dataset name: expected: '%s', got: '%s'", test.expectedDataset, datasetName) + } + }) + file.Close() + if err != nil { + t.Fatal(err) + } + if !handlerCalled { + t.Fatalf("Zpool parsing handler was not called for '%s'", test.path) + } + } +} + func TestZpoolObjsetParsing(t *testing.T) { zpoolPaths, err := filepath.Glob("fixtures/proc/spl/kstat/zfs/*/objset-*") if err != nil { @@ -336,7 +388,7 @@ func TestZpoolObjsetParsing(t *testing.T) { handlerCalled = true - if v != uint64(0) && v != uint64(4) && v != uint64(10) { + if v != 0 && v != 4 && v != 10 { t.Fatalf("Incorrect value parsed from procfs data %v", v) } @@ -364,7 +416,7 @@ func TestAbdstatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(abdstatsFile, "abdstats", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(abdstatsFile, "abdstats", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.abdstats.linear_data_size") { return @@ -372,7 +424,7 @@ func TestAbdstatsParsing(t *testing.T) { handlerCalled = true - if v != uint64(223232) { + if v.(uint64) != 223232 { t.Fatalf("Incorrect value parsed from procfs abdstats data") } @@ -388,7 +440,7 @@ func TestAbdstatsParsing(t *testing.T) { } func TestDbufstatsParsing(t *testing.T) { - dbufstatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/dbuf_stats") + dbufstatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/dbufstats") if err != nil { t.Fatal(err) } @@ -400,7 +452,7 @@ func TestDbufstatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(dbufstatsFile, "dbufstats", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(dbufstatsFile, "dbufstats", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.dbufstats.hash_hits") { return @@ -408,7 +460,7 @@ func TestDbufstatsParsing(t *testing.T) { handlerCalled = true - if v != uint64(108807) { + if v.(uint64) != 108807 { t.Fatalf("Incorrect value parsed from procfs dbufstats data") } @@ -436,7 +488,7 @@ func TestDnodestatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(dnodestatsFile, "dnodestats", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(dnodestatsFile, "dnodestats", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.dnodestats.dnode_hold_alloc_hits") { return @@ -444,7 +496,7 @@ func TestDnodestatsParsing(t *testing.T) { handlerCalled = true - if v != uint64(37617) { + if v.(uint64) != 37617 { t.Fatalf("Incorrect value parsed from procfs dnodestats data") } @@ -472,7 +524,7 @@ func TestVdevMirrorstatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(vdevMirrorStatsFile, "vdev_mirror_stats", func(s zfsSysctl, v uint64) { + err = c.parseProcfsFile(vdevMirrorStatsFile, "vdev_mirror_stats", func(s zfsSysctl, v any) { if s != zfsSysctl("kstat.zfs.misc.vdev_mirror_stats.preferred_not_found") { return @@ -480,7 +532,7 @@ func TestVdevMirrorstatsParsing(t *testing.T) { handlerCalled = true - if v != uint64(94) { + if v.(uint64) != 94 { t.Fatalf("Incorrect value parsed from procfs vdev_mirror_stats data") } @@ -494,3 +546,61 @@ func TestVdevMirrorstatsParsing(t *testing.T) { t.Fatal("VdevMirrorStats parsing handler was not called for some expected sysctls") } } + +func TestPoolStateParsing(t *testing.T) { + zpoolPaths, err := filepath.Glob("fixtures/proc/spl/kstat/zfs/*/state") + if err != nil { + t.Fatal(err) + } + + c := zfsCollector{} + if err != nil { + t.Fatal(err) + } + + handlerCalled := false + for _, zpoolPath := range zpoolPaths { + file, err := os.Open(zpoolPath) + if err != nil { + t.Fatal(err) + } + + err = c.parsePoolStateFile(file, zpoolPath, func(poolName string, stateName string, isActive uint64) { + handlerCalled = true + + if poolName == "pool1" { + if isActive != 1 && stateName == "online" { + t.Fatalf("Incorrect parsed value for online state") + } + if isActive != 0 && stateName != "online" { + t.Fatalf("Incorrect parsed value for online state") + } + } + if poolName == "poolz1" { + if isActive != 1 && stateName == "degraded" { + t.Fatalf("Incorrect parsed value for degraded state") + } + if isActive != 0 && stateName != "degraded" { + t.Fatalf("Incorrect parsed value for degraded state") + } + } + if poolName == "pool2" { + if isActive != 1 && stateName == "suspended" { + t.Fatalf("Incorrect parsed value for suspended state") + } + if isActive != 0 && stateName != "suspended" { + t.Fatalf("Incorrect parsed value for suspended state") + } + } + + }) + file.Close() + if err != nil { + t.Fatal(err) + } + } + if !handlerCalled { + t.Fatal("Zpool parsing handler was not called for some expected sysctls") + } + +} diff --git a/collector/zfs_solaris.go b/collector/zfs_solaris.go index 1c0460c186..0f858ad9b0 100644 --- a/collector/zfs_solaris.go +++ b/collector/zfs_solaris.go @@ -11,17 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build solaris -// +build !nozfs +//go:build !nozfs package collector import ( + "log/slog" "strings" - "github.com/go-kit/kit/log" + "github.com/illumos/go-kstat" "github.com/prometheus/client_golang/prometheus" - "github.com/siebenmann/go-kstat" ) type zfsCollector struct { @@ -54,18 +53,14 @@ type zfsCollector struct { arcstatsSize *prometheus.Desc zfetchstatsHits *prometheus.Desc zfetchstatsMisses *prometheus.Desc - logger log.Logger + logger *slog.Logger } const ( zfsCollectorSubsystem = "zfs" ) -func init() { - registerCollector("zfs", defaultEnabled, NewZfsCollector) -} - -func NewZfsCollector(logger log.Logger) (Collector, error) { +func NewZFSCollector(logger *slog.Logger) (Collector, error) { return &zfsCollector{ abdstatsLinearCount: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_linear_count_total"), @@ -299,6 +294,9 @@ func (c *zfsCollector) updateZfsFetchStats(ch chan<- prometheus.Metric) error { defer tok.Close() ksZFSInfo, err := tok.Lookup("zfs", 0, "zfetchstats") + if err != nil { + return err + } for k, v := range map[string]*prometheus.Desc{ "hits": c.zfetchstatsHits, diff --git a/collector/zoneinfo_linux.go b/collector/zoneinfo_linux.go new file mode 100644 index 0000000000..0b40fd3c59 --- /dev/null +++ b/collector/zoneinfo_linux.go @@ -0,0 +1,239 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "fmt" + "log/slog" + "reflect" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +const zoneinfoSubsystem = "zoneinfo" + +type zoneinfoCollector struct { + gaugeMetricDescs map[string]*prometheus.Desc + counterMetricDescs map[string]*prometheus.Desc + logger *slog.Logger + fs procfs.FS +} + +func init() { + registerCollector("zoneinfo", defaultDisabled, NewZoneinfoCollector) +} + +// NewZoneinfoCollector returns a new Collector exposing zone stats. +func NewZoneinfoCollector(logger *slog.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + return &zoneinfoCollector{ + gaugeMetricDescs: createGaugeMetricDescriptions(), + counterMetricDescs: createCounterMetricDescriptions(), + logger: logger, + fs: fs, + }, nil +} + +func (c *zoneinfoCollector) Update(ch chan<- prometheus.Metric) error { + metrics, err := c.fs.Zoneinfo() + if err != nil { + return fmt.Errorf("couldn't get zoneinfo: %w", err) + } + for _, metric := range metrics { + node := metric.Node + zone := metric.Zone + metricStruct := reflect.ValueOf(metric) + typeOfMetricStruct := metricStruct.Type() + for i := 0; i < metricStruct.NumField(); i++ { + value := reflect.Indirect(metricStruct.Field(i)) + if value.Kind() != reflect.Int64 { + continue + } + metricName := typeOfMetricStruct.Field(i).Name + desc, ok := c.gaugeMetricDescs[metricName] + metricType := prometheus.GaugeValue + if !ok { + desc = c.counterMetricDescs[metricName] + metricType = prometheus.CounterValue + } + ch <- prometheus.MustNewConstMetric(desc, metricType, + float64(reflect.Indirect(metricStruct.Field(i)).Int()), + node, zone) + } + for i, value := range metric.Protection { + metricName := fmt.Sprintf("protection_%d", i) + desc, ok := c.gaugeMetricDescs[metricName] + if !ok { + desc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, metricName), + fmt.Sprintf("Protection array %d. field", i), + []string{"node", "zone"}, nil) + c.gaugeMetricDescs[metricName] = desc + } + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, + float64(*value), node, zone) + } + + } + return nil +} +func createGaugeMetricDescriptions() map[string]*prometheus.Desc { + return map[string]*prometheus.Desc{ + "NrFreePages": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_free_pages"), + "Total number of free pages in the zone", + []string{"node", "zone"}, nil), + "Min": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "min_pages"), + "Zone watermark pages_min", + []string{"node", "zone"}, nil), + "Low": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "low_pages"), + "Zone watermark pages_low", + []string{"node", "zone"}, nil), + "High": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "high_pages"), + "Zone watermark pages_high", + []string{"node", "zone"}, nil), + "Scanned": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "scanned_pages"), + "Pages scanned since last reclaim", + []string{"node", "zone"}, nil), + "Spanned": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "spanned_pages"), + "Total pages spanned by the zone, including holes", + []string{"node", "zone"}, nil), + "Present": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "present_pages"), + "Physical pages existing within the zone", + []string{"node", "zone"}, nil), + "Managed": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "managed_pages"), + "Present pages managed by the buddy system", + []string{"node", "zone"}, nil), + "NrActiveAnon": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_active_anon_pages"), + "Number of anonymous pages recently more used", + []string{"node", "zone"}, nil), + "NrInactiveAnon": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_inactive_anon_pages"), + "Number of anonymous pages recently less used", + []string{"node", "zone"}, nil), + "NrIsolatedAnon": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_isolated_anon_pages"), + "Temporary isolated pages from anon lru", + []string{"node", "zone"}, nil), + "NrAnonPages": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_anon_pages"), + "Number of anonymous pages currently used by the system", + []string{"node", "zone"}, nil), + "NrAnonTransparentHugepages": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_anon_transparent_hugepages"), + "Number of anonymous transparent huge pages currently used by the system", + []string{"node", "zone"}, nil), + "NrActiveFile": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_active_file_pages"), + "Number of active pages with file-backing", + []string{"node", "zone"}, nil), + "NrInactiveFile": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_inactive_file_pages"), + "Number of inactive pages with file-backing", + []string{"node", "zone"}, nil), + "NrIsolatedFile": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_isolated_file_pages"), + "Temporary isolated pages from file lru", + []string{"node", "zone"}, nil), + "NrFilePages": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_file_pages"), + "Number of file pages", + []string{"node", "zone"}, nil), + "NrSlabReclaimable": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_slab_reclaimable_pages"), + "Number of reclaimable slab pages", + []string{"node", "zone"}, nil), + "NrSlabUnreclaimable": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_slab_unreclaimable_pages"), + "Number of unreclaimable slab pages", + []string{"node", "zone"}, nil), + "NrMlockStack": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_mlock_stack_pages"), + "mlock()ed pages found and moved off LRU", + []string{"node", "zone"}, nil), + "NrKernelStack": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_kernel_stacks"), + "Number of kernel stacks", + []string{"node", "zone"}, nil), + "NrMapped": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_mapped_pages"), + "Number of mapped pages", + []string{"node", "zone"}, nil), + "NrDirty": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_dirty_pages"), + "Number of dirty pages", + []string{"node", "zone"}, nil), + "NrWriteback": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_writeback_pages"), + "Number of writeback pages", + []string{"node", "zone"}, nil), + "NrUnevictable": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_unevictable_pages"), + "Number of unevictable pages", + []string{"node", "zone"}, nil), + "NrShmem": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_shmem_pages"), + "Number of shmem pages (included tmpfs/GEM pages)", + []string{"node", "zone"}, nil), + } + +} +func createCounterMetricDescriptions() map[string]*prometheus.Desc { + return map[string]*prometheus.Desc{ + "NrDirtied": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_dirtied_total"), + "Page dirtyings since bootup", + []string{"node", "zone"}, nil), + "NrWritten": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_written_total"), + "Page writings since bootup", + []string{"node", "zone"}, nil), + "NumaHit": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_hit_total"), + "Allocated in intended node", + []string{"node", "zone"}, nil), + "NumaMiss": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_miss_total"), + "Allocated in non intended node", + []string{"node", "zone"}, nil), + "NumaForeign": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_foreign_total"), + "Was intended here, hit elsewhere", + []string{"node", "zone"}, nil), + "NumaInterleave": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_interleave_total"), + "Interleaver preferred this zone", + []string{"node", "zone"}, nil), + "NumaLocal": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_local_total"), + "Allocation from local node", + []string{"node", "zone"}, nil), + "NumaOther": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_other_total"), + "Allocation from other node", + []string{"node", "zone"}, nil), + } +} diff --git a/debian/changelog b/debian/changelog index 31a371a347..6b39cb5d4c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +cl-node-exporter (1.2.0-1) stable; urgency=medium + + [ Ruslan Koliada ] + * CLPRO-2902: Sync node_exporter with upstream + + -- Ruslan Koliada Mon, 1 Dec 2025 10:00:00 +0000 + cl-node-exporter (1.1.0-3) stable; urgency=medium [ Yehor Komarov ] diff --git a/debian/rules b/debian/rules index 7c2b063629..0a0ea703d4 100644 --- a/debian/rules +++ b/debian/rules @@ -29,9 +29,10 @@ _clshare_plus := /usr/share/cloudlinux/cl_plus dh $@ override_dh_auto_build: - curl https://dl.google.com/go/go1.14.4.linux-amd64.tar.gz --output /tmp/go.tar.gz + curl https://dl.google.com/go/go1.24.0.linux-amd64.tar.gz --output /tmp/go.tar.gz tar xzf /tmp/go.tar.gz -C /tmp/ make build + make tools make test rm -rf collector/fixtures/sys ./ttar -C collector/fixtures -x -f collector/fixtures/sys.ttar @@ -43,10 +44,15 @@ override_dh_auto_install: install -D -m 755 node_exporter $(buildroot)$(_clshare_plus)/node_exporter # Install tests - mkdir -p $(buildroot)/opt/node_exporter_tests/collector - cp -r collector/fixtures $(buildroot)/opt/node_exporter_tests/collector/ + mkdir -p $(buildroot)/opt/node_exporter_tests/ + cp -r collector $(buildroot)/opt/node_exporter_tests/ install -D -m 755 end-to-end-test.sh $(buildroot)/opt/node_exporter_tests/end-to-end-test.sh install -D -m 755 node_exporter $(buildroot)/opt/node_exporter_tests/node_exporter + mkdir -p $(buildroot)/opt/node_exporter_tests/tools + install -D -m 755 tools/tools $(buildroot)/opt/node_exporter_tests/tools/tools + + # remove broken symlinks + find $(buildroot)/opt/node_exporter_tests/collector/fixtures -xtype l -delete echo "$(version)-$(release)" > $(buildroot)/usr/share/cloudlinux/cl_version @@ -57,7 +63,8 @@ override_dh_auto_install: dh_movefiles -p$(name)-tests \ ./opt/node_exporter_tests/collector/ \ ./opt/node_exporter_tests/end-to-end-test.sh \ - ./opt/node_exporter_tests/node_exporter + ./opt/node_exporter_tests/node_exporter \ + ./opt/node_exporter_tests/tools/ dh_installdirs -p$(name) \ .$(_clshare_plus) diff --git a/docs/TIME.md b/docs/TIME.md index 18773e0b49..4f0b01a04b 100644 --- a/docs/TIME.md +++ b/docs/TIME.md @@ -2,15 +2,17 @@ ## `ntp` collector -This collector is intended for usage with local NTPD like [ntp.org](http://ntp.org/), [chrony](https://chrony.tuxfamily.org/comparison.html) or [OpenNTPD](http://www.openntpd.org/). +NOTE: This collector is deprecated and will be removed in the next major version release. -Note, some chrony packages have `local stratum 10` configuration value making chrony a valid server when it is unsynchronised. This configuration makes one of `node_ntp_sanity` heuristics unreliable. +This collector is intended for usage with local NTP daemons including [ntp.org](http://ntp.org/), [chrony](https://chrony.tuxfamily.org/comparison.html), and [OpenNTPD](http://www.openntpd.org/). -Note, OpenNTPD does not listen for SNTP queries by default, you should add `listen on 127.0.0.1` configuration line to use this collector with OpenNTPD. +Note, some chrony packages have `local stratum 10` configuration value making chrony a valid server when it is unsynchronised. This configuration makes one of the heuristics that derive `node_ntp_sanity` unreliable. + +Note, OpenNTPD does not listen for SNTP queries by default. Add `listen on 127.0.0.1` to the OpenNTPD configuration when using this collector with that package. ### `node_ntp_stratum` -This metric shows [stratum](https://en.wikipedia.org/wiki/Network_Time_Protocol#Clock_strata) of local NTPD. +This metric shows the [stratum](https://en.wikipedia.org/wiki/Network_Time_Protocol#Clock_strata) of the local NTP daemon. Stratum `16` means that clock are unsynchronised. See also aforementioned note about default local stratum in chrony. diff --git a/docs/example-16-compatibility-rules-new-to-old.yml b/docs/example-16-compatibility-rules-new-to-old.yml index 54aaa278aa..3d74eb5de7 100644 --- a/docs/example-16-compatibility-rules-new-to-old.yml +++ b/docs/example-16-compatibility-rules-new-to-old.yml @@ -1,201 +1,201 @@ groups: -- name: node_exporter-16-bcache - rules: - - expr: node_bcache_cache_read_races - record: node_bcache_cache_read_races_total -- name: node_exporter-16-buddyinfo - rules: - - expr: node_buddyinfo_blocks - record: node_buddyinfo_count -- name: node_exporter-16-stat - rules: - - expr: node_boot_time_seconds - record: node_boot_time - - expr: node_time_seconds - record: node_time - - expr: node_context_switches_total - record: node_context_switches - - expr: node_forks_total - record: node_forks - - expr: node_intr_total - record: node_intr -- name: node_exporter-16-cpu - rules: - - expr: label_replace(node_cpu_seconds_total, "cpu", "$1", "cpu", "cpu(.+)") - record: node_cpu -- name: node_exporter-16-diskstats - rules: - - expr: node_disk_read_bytes_total - record: node_disk_bytes_read - - expr: node_disk_written_bytes_total - record: node_disk_bytes_written - - expr: node_disk_io_time_seconds_total * 1000 - record: node_disk_io_time_ms - - expr: node_disk_io_time_weighted_seconds_total - record: node_disk_io_time_weighted - - expr: node_disk_reads_completed_total - record: node_disk_reads_completed - - expr: node_disk_reads_merged_total - record: node_disk_reads_merged - - expr: node_disk_read_time_seconds_total * 1000 - record: node_disk_read_time_ms - - expr: node_disk_writes_completed_total - record: node_disk_writes_completed - - expr: node_disk_writes_merged_total - record: node_disk_writes_merged - - expr: node_disk_write_time_seconds_total * 1000 - record: node_disk_write_time_ms -- name: node_exporter-16-filesystem - rules: - - expr: node_filesystem_free_bytes - record: node_filesystem_free - - expr: node_filesystem_avail_bytes - record: node_filesystem_avail - - expr: node_filesystem_size_bytes - record: node_filesystem_size -- name: node_exporter-16-infiniband - rules: - - expr: node_infiniband_port_data_received_bytes_total - record: node_infiniband_port_data_received_bytes - - expr: node_infiniband_port_data_transmitted_bytes_total - record: node_infiniband_port_data_transmitted_bytes -- name: node_exporter-16-interrupts - rules: - - expr: node_interrupts_total - record: node_interrupts -- name: node_exporter-16-memory - rules: - - expr: node_memory_Active_bytes - record: node_memory_Active - - expr: node_memory_Active_anon_bytes - record: node_memory_Active_anon - - expr: node_memory_Active_file_bytes - record: node_memory_Active_file - - expr: node_memory_AnonHugePages_bytes - record: node_memory_AnonHugePages - - expr: node_memory_AnonPages_bytes - record: node_memory_AnonPages - - expr: node_memory_Bounce_bytes - record: node_memory_Bounce - - expr: node_memory_Buffers_bytes - record: node_memory_Buffers - - expr: node_memory_Cached_bytes - record: node_memory_Cached - - expr: node_memory_CommitLimit_bytes - record: node_memory_CommitLimit - - expr: node_memory_Committed_AS_bytes - record: node_memory_Committed_AS - - expr: node_memory_DirectMap2M_bytes - record: node_memory_DirectMap2M - - expr: node_memory_DirectMap4k_bytes - record: node_memory_DirectMap4k - - expr: node_memory_Dirty_bytes - record: node_memory_Dirty - - expr: node_memory_HardwareCorrupted_bytes - record: node_memory_HardwareCorrupted - - expr: node_memory_Hugepagesize_bytes - record: node_memory_Hugepagesize - - expr: node_memory_Inactive_bytes - record: node_memory_Inactive - - expr: node_memory_Inactive_anon_bytes - record: node_memory_Inactive_anon - - expr: node_memory_Inactive_file_bytes - record: node_memory_Inactive_file - - expr: node_memory_KernelStack_bytes - record: node_memory_KernelStack - - expr: node_memory_Mapped_bytes - record: node_memory_Mapped - - expr: node_memory_MemAvailable_bytes - record: node_memory_MemAvailable - - expr: node_memory_MemFree_bytes - record: node_memory_MemFree - - expr: node_memory_MemTotal_bytes - record: node_memory_MemTotal - - expr: node_memory_Mlocked_bytes - record: node_memory_Mlocked - - expr: node_memory_NFS_Unstable_bytes - record: node_memory_NFS_Unstable - - expr: node_memory_PageTables_bytes - record: node_memory_PageTables - - expr: node_memory_Shmem_bytes - record: node_memory_Shmem - - expr: node_memory_ShmemHugePages_bytes - record: node_memory_ShmemHugePages - - expr: node_memory_ShmemPmdMapped_bytes - record: node_memory_ShmemPmdMapped - - expr: node_memory_Slab_bytes - record: node_memory_Slab - - expr: node_memory_SReclaimable_bytes - record: node_memory_SReclaimable - - expr: node_memory_SUnreclaim_bytes - record: node_memory_SUnreclaim - - expr: node_memory_SwapCached_bytes - record: node_memory_SwapCached - - expr: node_memory_SwapFree_bytes - record: node_memory_SwapFree - - expr: node_memory_SwapTotal_bytes - record: node_memory_SwapTotal - - expr: node_memory_Unevictable_bytes - record: node_memory_Unevictable - - expr: node_memory_VmallocChunk_bytes - record: node_memory_VmallocChunk - - expr: node_memory_VmallocTotal_bytes - record: node_memory_VmallocTotal - - expr: node_memory_VmallocUsed_bytes - record: node_memory_VmallocUsed - - expr: node_memory_Writeback_bytes - record: node_memory_Writeback - - expr: node_memory_WritebackTmp_bytes - record: node_memory_WritebackTmp -- name: node_exporter-16-network - rules: - - expr: node_network_receive_bytes_total - record: node_network_receive_bytes - - expr: node_network_receive_compressed_total - record: node_network_receive_compressed - - expr: node_network_receive_drop_total - record: node_network_receive_drop - - expr: node_network_receive_errs_total - record: node_network_receive_errs - - expr: node_network_receive_fifo_total - record: node_network_receive_fifo - - expr: node_network_receive_frame_total - record: node_network_receive_frame - - expr: node_network_receive_multicast_total - record: node_network_receive_multicast - - expr: node_network_receive_packets_total - record: node_network_receive_packets - - expr: node_network_transmit_bytes_total - record: node_network_transmit_bytes - - expr: node_network_transmit_compressed_total - record: node_network_transmit_compressed - - expr: node_network_transmit_drop_total - record: node_network_transmit_drop - - expr: node_network_transmit_errs_total - record: node_network_transmit_errs - - expr: node_network_transmit_fifo_total - record: node_network_transmit_fifo - - expr: node_network_transmit_frame_total - record: node_network_transmit_frame - - expr: node_network_transmit_multicast_total - record: node_network_transmit_multicast - - expr: node_network_transmit_packets_total - record: node_network_transmit_packets -- name: node_exporter-16-nfs - rules: - - expr: node_nfs_connections_total - record: node_nfs_net_connections - - expr: node_nfs_packets_total - record: node_nfs_net_reads - - expr: label_replace(label_replace(node_nfs_requests_total, "proto", "$1", "version", "(.+)"), "method", "$1", "procedure", "(.+)") - record: node_nfs_procedures - - expr: node_nfs_rpc_authentication_refreshes_total - record: node_nfs_rpc_authentication_refreshes - - expr: node_nfs_rpcs_total - record: node_nfs_rpc_operations - - expr: node_nfs_rpc_retransmissions_total - record: node_nfs_rpc_retransmissions -- name: node_exporter-16-textfile - rules: - - expr: node_textfile_mtime_seconds - record: node_textfile_mtime + - name: node_exporter-16-bcache + rules: + - expr: node_bcache_cache_read_races + record: node_bcache_cache_read_races_total + - name: node_exporter-16-buddyinfo + rules: + - expr: node_buddyinfo_blocks + record: node_buddyinfo_count + - name: node_exporter-16-stat + rules: + - expr: node_boot_time_seconds + record: node_boot_time + - expr: node_time_seconds + record: node_time + - expr: node_context_switches_total + record: node_context_switches + - expr: node_forks_total + record: node_forks + - expr: node_intr_total + record: node_intr + - name: node_exporter-16-cpu + rules: + - expr: label_replace(node_cpu_seconds_total, "cpu", "cpu$1", "cpu", "(.+)") + record: node_cpu + - name: node_exporter-16-diskstats + rules: + - expr: node_disk_read_bytes_total + record: node_disk_bytes_read + - expr: node_disk_written_bytes_total + record: node_disk_bytes_written + - expr: node_disk_io_time_seconds_total * 1000 + record: node_disk_io_time_ms + - expr: node_disk_io_time_weighted_seconds_total + record: node_disk_io_time_weighted + - expr: node_disk_reads_completed_total + record: node_disk_reads_completed + - expr: node_disk_reads_merged_total + record: node_disk_reads_merged + - expr: node_disk_read_time_seconds_total * 1000 + record: node_disk_read_time_ms + - expr: node_disk_writes_completed_total + record: node_disk_writes_completed + - expr: node_disk_writes_merged_total + record: node_disk_writes_merged + - expr: node_disk_write_time_seconds_total * 1000 + record: node_disk_write_time_ms + - name: node_exporter-16-filesystem + rules: + - expr: node_filesystem_free_bytes + record: node_filesystem_free + - expr: node_filesystem_avail_bytes + record: node_filesystem_avail + - expr: node_filesystem_size_bytes + record: node_filesystem_size + - name: node_exporter-16-infiniband + rules: + - expr: node_infiniband_port_data_received_bytes_total + record: node_infiniband_port_data_received_bytes + - expr: node_infiniband_port_data_transmitted_bytes_total + record: node_infiniband_port_data_transmitted_bytes + - name: node_exporter-16-interrupts + rules: + - expr: node_interrupts_total + record: node_interrupts + - name: node_exporter-16-memory + rules: + - expr: node_memory_Active_bytes + record: node_memory_Active + - expr: node_memory_Active_anon_bytes + record: node_memory_Active_anon + - expr: node_memory_Active_file_bytes + record: node_memory_Active_file + - expr: node_memory_AnonHugePages_bytes + record: node_memory_AnonHugePages + - expr: node_memory_AnonPages_bytes + record: node_memory_AnonPages + - expr: node_memory_Bounce_bytes + record: node_memory_Bounce + - expr: node_memory_Buffers_bytes + record: node_memory_Buffers + - expr: node_memory_Cached_bytes + record: node_memory_Cached + - expr: node_memory_CommitLimit_bytes + record: node_memory_CommitLimit + - expr: node_memory_Committed_AS_bytes + record: node_memory_Committed_AS + - expr: node_memory_DirectMap2M_bytes + record: node_memory_DirectMap2M + - expr: node_memory_DirectMap4k_bytes + record: node_memory_DirectMap4k + - expr: node_memory_Dirty_bytes + record: node_memory_Dirty + - expr: node_memory_HardwareCorrupted_bytes + record: node_memory_HardwareCorrupted + - expr: node_memory_Hugepagesize_bytes + record: node_memory_Hugepagesize + - expr: node_memory_Inactive_bytes + record: node_memory_Inactive + - expr: node_memory_Inactive_anon_bytes + record: node_memory_Inactive_anon + - expr: node_memory_Inactive_file_bytes + record: node_memory_Inactive_file + - expr: node_memory_KernelStack_bytes + record: node_memory_KernelStack + - expr: node_memory_Mapped_bytes + record: node_memory_Mapped + - expr: node_memory_MemAvailable_bytes + record: node_memory_MemAvailable + - expr: node_memory_MemFree_bytes + record: node_memory_MemFree + - expr: node_memory_MemTotal_bytes + record: node_memory_MemTotal + - expr: node_memory_Mlocked_bytes + record: node_memory_Mlocked + - expr: node_memory_NFS_Unstable_bytes + record: node_memory_NFS_Unstable + - expr: node_memory_PageTables_bytes + record: node_memory_PageTables + - expr: node_memory_Shmem_bytes + record: node_memory_Shmem + - expr: node_memory_ShmemHugePages_bytes + record: node_memory_ShmemHugePages + - expr: node_memory_ShmemPmdMapped_bytes + record: node_memory_ShmemPmdMapped + - expr: node_memory_Slab_bytes + record: node_memory_Slab + - expr: node_memory_SReclaimable_bytes + record: node_memory_SReclaimable + - expr: node_memory_SUnreclaim_bytes + record: node_memory_SUnreclaim + - expr: node_memory_SwapCached_bytes + record: node_memory_SwapCached + - expr: node_memory_SwapFree_bytes + record: node_memory_SwapFree + - expr: node_memory_SwapTotal_bytes + record: node_memory_SwapTotal + - expr: node_memory_Unevictable_bytes + record: node_memory_Unevictable + - expr: node_memory_VmallocChunk_bytes + record: node_memory_VmallocChunk + - expr: node_memory_VmallocTotal_bytes + record: node_memory_VmallocTotal + - expr: node_memory_VmallocUsed_bytes + record: node_memory_VmallocUsed + - expr: node_memory_Writeback_bytes + record: node_memory_Writeback + - expr: node_memory_WritebackTmp_bytes + record: node_memory_WritebackTmp + - name: node_exporter-16-network + rules: + - expr: node_network_receive_bytes_total + record: node_network_receive_bytes + - expr: node_network_receive_compressed_total + record: node_network_receive_compressed + - expr: node_network_receive_drop_total + record: node_network_receive_drop + - expr: node_network_receive_errs_total + record: node_network_receive_errs + - expr: node_network_receive_fifo_total + record: node_network_receive_fifo + - expr: node_network_receive_frame_total + record: node_network_receive_frame + - expr: node_network_receive_multicast_total + record: node_network_receive_multicast + - expr: node_network_receive_packets_total + record: node_network_receive_packets + - expr: node_network_transmit_bytes_total + record: node_network_transmit_bytes + - expr: node_network_transmit_compressed_total + record: node_network_transmit_compressed + - expr: node_network_transmit_drop_total + record: node_network_transmit_drop + - expr: node_network_transmit_errs_total + record: node_network_transmit_errs + - expr: node_network_transmit_fifo_total + record: node_network_transmit_fifo + - expr: node_network_transmit_frame_total + record: node_network_transmit_frame + - expr: node_network_transmit_multicast_total + record: node_network_transmit_multicast + - expr: node_network_transmit_packets_total + record: node_network_transmit_packets + - name: node_exporter-16-nfs + rules: + - expr: node_nfs_connections_total + record: node_nfs_net_connections + - expr: node_nfs_packets_total + record: node_nfs_net_reads + - expr: label_replace(label_replace(node_nfs_requests_total, "proto", "$1", "version", "(.+)"), "method", "$1", "procedure", "(.+)") + record: node_nfs_procedures + - expr: node_nfs_rpc_authentication_refreshes_total + record: node_nfs_rpc_authentication_refreshes + - expr: node_nfs_rpcs_total + record: node_nfs_rpc_operations + - expr: node_nfs_rpc_retransmissions_total + record: node_nfs_rpc_retransmissions + - name: node_exporter-16-textfile + rules: + - expr: node_textfile_mtime_seconds + record: node_textfile_mtime diff --git a/docs/example-16-compatibility-rules.yml b/docs/example-16-compatibility-rules.yml index f772bd55e1..f05e1b4210 100644 --- a/docs/example-16-compatibility-rules.yml +++ b/docs/example-16-compatibility-rules.yml @@ -1,201 +1,201 @@ groups: -- name: node_exporter-16-bcache - rules: - - record: node_bcache_cache_read_races - expr: node_bcache_cache_read_races_total -- name: node_exporter-16-buddyinfo - rules: - - record: node_buddyinfo_blocks - expr: node_buddyinfo_count -- name: node_exporter-16-stat - rules: - - record: node_boot_time_seconds - expr: node_boot_time - - record: node_time_seconds - expr: node_time - - record: node_context_switches_total - expr: node_context_switches - - record: node_forks_total - expr: node_forks - - record: node_intr_total - expr: node_intr -- name: node_exporter-16-cpu - rules: - - record: node_cpu_seconds_total - expr: label_replace(node_cpu, "cpu", "$1", "cpu", "cpu(.+)") -- name: node_exporter-16-diskstats - rules: - - record: node_disk_read_bytes_total - expr: node_disk_bytes_read - - record: node_disk_written_bytes_total - expr: node_disk_bytes_written - - record: node_disk_io_time_seconds_total - expr: node_disk_io_time_ms / 1000 - - record: node_disk_io_time_weighted_seconds_total - expr: node_disk_io_time_weighted - - record: node_disk_reads_completed_total - expr: node_disk_reads_completed - - record: node_disk_reads_merged_total - expr: node_disk_reads_merged - - record: node_disk_read_time_seconds_total - expr: node_disk_read_time_ms / 1000 - - record: node_disk_writes_completed_total - expr: node_disk_writes_completed - - record: node_disk_writes_merged_total - expr: node_disk_writes_merged - - record: node_disk_write_time_seconds_total - expr: node_disk_write_time_ms / 1000 -- name: node_exporter-16-filesystem - rules: - - record: node_filesystem_free_bytes - expr: node_filesystem_free - - record: node_filesystem_avail_bytes - expr: node_filesystem_avail - - record: node_filesystem_size_bytes - expr: node_filesystem_size -- name: node_exporter-16-infiniband - rules: - - record: node_infiniband_port_data_received_bytes_total - expr: node_infiniband_port_data_received_bytes - - record: node_infiniband_port_data_transmitted_bytes_total - expr: node_infiniband_port_data_transmitted_bytes -- name: node_exporter-16-interrupts - rules: - - record: node_interrupts_total - expr: node_interrupts -- name: node_exporter-16-memory - rules: - - record: node_memory_Active_bytes - expr: node_memory_Active - - record: node_memory_Active_anon_bytes - expr: node_memory_Active_anon - - record: node_memory_Active_file_bytes - expr: node_memory_Active_file - - record: node_memory_AnonHugePages_bytes - expr: node_memory_AnonHugePages - - record: node_memory_AnonPages_bytes - expr: node_memory_AnonPages - - record: node_memory_Bounce_bytes - expr: node_memory_Bounce - - record: node_memory_Buffers_bytes - expr: node_memory_Buffers - - record: node_memory_Cached_bytes - expr: node_memory_Cached - - record: node_memory_CommitLimit_bytes - expr: node_memory_CommitLimit - - record: node_memory_Committed_AS_bytes - expr: node_memory_Committed_AS - - record: node_memory_DirectMap2M_bytes - expr: node_memory_DirectMap2M - - record: node_memory_DirectMap4k_bytes - expr: node_memory_DirectMap4k - - record: node_memory_Dirty_bytes - expr: node_memory_Dirty - - record: node_memory_HardwareCorrupted_bytes - expr: node_memory_HardwareCorrupted - - record: node_memory_Hugepagesize_bytes - expr: node_memory_Hugepagesize - - record: node_memory_Inactive_bytes - expr: node_memory_Inactive - - record: node_memory_Inactive_anon_bytes - expr: node_memory_Inactive_anon - - record: node_memory_Inactive_file_bytes - expr: node_memory_Inactive_file - - record: node_memory_KernelStack_bytes - expr: node_memory_KernelStack - - record: node_memory_Mapped_bytes - expr: node_memory_Mapped - - record: node_memory_MemAvailable_bytes - expr: node_memory_MemAvailable - - record: node_memory_MemFree_bytes - expr: node_memory_MemFree - - record: node_memory_MemTotal_bytes - expr: node_memory_MemTotal - - record: node_memory_Mlocked_bytes - expr: node_memory_Mlocked - - record: node_memory_NFS_Unstable_bytes - expr: node_memory_NFS_Unstable - - record: node_memory_PageTables_bytes - expr: node_memory_PageTables - - record: node_memory_Shmem_bytes - expr: node_memory_Shmem - - record: node_memory_ShmemHugePages_bytes - expr: node_memory_ShmemHugePages - - record: node_memory_ShmemPmdMapped_bytes - expr: node_memory_ShmemPmdMapped - - record: node_memory_Slab_bytes - expr: node_memory_Slab - - record: node_memory_SReclaimable_bytes - expr: node_memory_SReclaimable - - record: node_memory_SUnreclaim_bytes - expr: node_memory_SUnreclaim - - record: node_memory_SwapCached_bytes - expr: node_memory_SwapCached - - record: node_memory_SwapFree_bytes - expr: node_memory_SwapFree - - record: node_memory_SwapTotal_bytes - expr: node_memory_SwapTotal - - record: node_memory_Unevictable_bytes - expr: node_memory_Unevictable - - record: node_memory_VmallocChunk_bytes - expr: node_memory_VmallocChunk - - record: node_memory_VmallocTotal_bytes - expr: node_memory_VmallocTotal - - record: node_memory_VmallocUsed_bytes - expr: node_memory_VmallocUsed - - record: node_memory_Writeback_bytes - expr: node_memory_Writeback - - record: node_memory_WritebackTmp_bytes - expr: node_memory_WritebackTmp -- name: node_exporter-16-network - rules: - - record: node_network_receive_bytes_total - expr: node_network_receive_bytes - - record: node_network_receive_compressed_total - expr: node_network_receive_compressed - - record: node_network_receive_drop_total - expr: node_network_receive_drop - - record: node_network_receive_errs_total - expr: node_network_receive_errs - - record: node_network_receive_fifo_total - expr: node_network_receive_fifo - - record: node_network_receive_frame_total - expr: node_network_receive_frame - - record: node_network_receive_multicast_total - expr: node_network_receive_multicast - - record: node_network_receive_packets_total - expr: node_network_receive_packets - - record: node_network_transmit_bytes_total - expr: node_network_transmit_bytes - - record: node_network_transmit_compressed_total - expr: node_network_transmit_compressed - - record: node_network_transmit_drop_total - expr: node_network_transmit_drop - - record: node_network_transmit_errs_total - expr: node_network_transmit_errs - - record: node_network_transmit_fifo_total - expr: node_network_transmit_fifo - - record: node_network_transmit_frame_total - expr: node_network_transmit_frame - - record: node_network_transmit_multicast_total - expr: node_network_transmit_multicast - - record: node_network_transmit_packets_total - expr: node_network_transmit_packets -- name: node_exporter-16-nfs - rules: - - record: node_nfs_connections_total - expr: node_nfs_net_connections - - record: node_nfs_packets_total - expr: node_nfs_net_reads - - record: node_nfs_requests_total - expr: label_replace(label_replace(node_nfs_procedures, "proto", "$1", "version", "(.+)"), "method", "$1", "procedure", "(.+)") - - record: node_nfs_rpc_authentication_refreshes_total - expr: node_nfs_rpc_authentication_refreshes - - record: node_nfs_rpcs_total - expr: node_nfs_rpc_operations - - record: node_nfs_rpc_retransmissions_total - expr: node_nfs_rpc_retransmissions -- name: node_exporter-16-textfile - rules: - - record: node_textfile_mtime_seconds - expr: node_textfile_mtime + - name: node_exporter-16-bcache + rules: + - record: node_bcache_cache_read_races + expr: node_bcache_cache_read_races_total + - name: node_exporter-16-buddyinfo + rules: + - record: node_buddyinfo_blocks + expr: node_buddyinfo_count + - name: node_exporter-16-stat + rules: + - record: node_boot_time_seconds + expr: node_boot_time + - record: node_time_seconds + expr: node_time + - record: node_context_switches_total + expr: node_context_switches + - record: node_forks_total + expr: node_forks + - record: node_intr_total + expr: node_intr + - name: node_exporter-16-cpu + rules: + - record: node_cpu + expr: label_replace(node_cpu_seconds_total, "cpu", "$1", "cpu", "cpu(.+)") + - name: node_exporter-16-diskstats + rules: + - record: node_disk_read_bytes_total + expr: node_disk_bytes_read + - record: node_disk_written_bytes_total + expr: node_disk_bytes_written + - record: node_disk_io_time_seconds_total + expr: node_disk_io_time_ms / 1000 + - record: node_disk_io_time_weighted_seconds_total + expr: node_disk_io_time_weighted + - record: node_disk_reads_completed_total + expr: node_disk_reads_completed + - record: node_disk_reads_merged_total + expr: node_disk_reads_merged + - record: node_disk_read_time_seconds_total + expr: node_disk_read_time_ms / 1000 + - record: node_disk_writes_completed_total + expr: node_disk_writes_completed + - record: node_disk_writes_merged_total + expr: node_disk_writes_merged + - record: node_disk_write_time_seconds_total + expr: node_disk_write_time_ms / 1000 + - name: node_exporter-16-filesystem + rules: + - record: node_filesystem_free_bytes + expr: node_filesystem_free + - record: node_filesystem_avail_bytes + expr: node_filesystem_avail + - record: node_filesystem_size_bytes + expr: node_filesystem_size + - name: node_exporter-16-infiniband + rules: + - record: node_infiniband_port_data_received_bytes_total + expr: node_infiniband_port_data_received_bytes + - record: node_infiniband_port_data_transmitted_bytes_total + expr: node_infiniband_port_data_transmitted_bytes + - name: node_exporter-16-interrupts + rules: + - record: node_interrupts_total + expr: node_interrupts + - name: node_exporter-16-memory + rules: + - record: node_memory_Active_bytes + expr: node_memory_Active + - record: node_memory_Active_anon_bytes + expr: node_memory_Active_anon + - record: node_memory_Active_file_bytes + expr: node_memory_Active_file + - record: node_memory_AnonHugePages_bytes + expr: node_memory_AnonHugePages + - record: node_memory_AnonPages_bytes + expr: node_memory_AnonPages + - record: node_memory_Bounce_bytes + expr: node_memory_Bounce + - record: node_memory_Buffers_bytes + expr: node_memory_Buffers + - record: node_memory_Cached_bytes + expr: node_memory_Cached + - record: node_memory_CommitLimit_bytes + expr: node_memory_CommitLimit + - record: node_memory_Committed_AS_bytes + expr: node_memory_Committed_AS + - record: node_memory_DirectMap2M_bytes + expr: node_memory_DirectMap2M + - record: node_memory_DirectMap4k_bytes + expr: node_memory_DirectMap4k + - record: node_memory_Dirty_bytes + expr: node_memory_Dirty + - record: node_memory_HardwareCorrupted_bytes + expr: node_memory_HardwareCorrupted + - record: node_memory_Hugepagesize_bytes + expr: node_memory_Hugepagesize + - record: node_memory_Inactive_bytes + expr: node_memory_Inactive + - record: node_memory_Inactive_anon_bytes + expr: node_memory_Inactive_anon + - record: node_memory_Inactive_file_bytes + expr: node_memory_Inactive_file + - record: node_memory_KernelStack_bytes + expr: node_memory_KernelStack + - record: node_memory_Mapped_bytes + expr: node_memory_Mapped + - record: node_memory_MemAvailable_bytes + expr: node_memory_MemAvailable + - record: node_memory_MemFree_bytes + expr: node_memory_MemFree + - record: node_memory_MemTotal_bytes + expr: node_memory_MemTotal + - record: node_memory_Mlocked_bytes + expr: node_memory_Mlocked + - record: node_memory_NFS_Unstable_bytes + expr: node_memory_NFS_Unstable + - record: node_memory_PageTables_bytes + expr: node_memory_PageTables + - record: node_memory_Shmem_bytes + expr: node_memory_Shmem + - record: node_memory_ShmemHugePages_bytes + expr: node_memory_ShmemHugePages + - record: node_memory_ShmemPmdMapped_bytes + expr: node_memory_ShmemPmdMapped + - record: node_memory_Slab_bytes + expr: node_memory_Slab + - record: node_memory_SReclaimable_bytes + expr: node_memory_SReclaimable + - record: node_memory_SUnreclaim_bytes + expr: node_memory_SUnreclaim + - record: node_memory_SwapCached_bytes + expr: node_memory_SwapCached + - record: node_memory_SwapFree_bytes + expr: node_memory_SwapFree + - record: node_memory_SwapTotal_bytes + expr: node_memory_SwapTotal + - record: node_memory_Unevictable_bytes + expr: node_memory_Unevictable + - record: node_memory_VmallocChunk_bytes + expr: node_memory_VmallocChunk + - record: node_memory_VmallocTotal_bytes + expr: node_memory_VmallocTotal + - record: node_memory_VmallocUsed_bytes + expr: node_memory_VmallocUsed + - record: node_memory_Writeback_bytes + expr: node_memory_Writeback + - record: node_memory_WritebackTmp_bytes + expr: node_memory_WritebackTmp + - name: node_exporter-16-network + rules: + - record: node_network_receive_bytes_total + expr: node_network_receive_bytes + - record: node_network_receive_compressed_total + expr: node_network_receive_compressed + - record: node_network_receive_drop_total + expr: node_network_receive_drop + - record: node_network_receive_errs_total + expr: node_network_receive_errs + - record: node_network_receive_fifo_total + expr: node_network_receive_fifo + - record: node_network_receive_frame_total + expr: node_network_receive_frame + - record: node_network_receive_multicast_total + expr: node_network_receive_multicast + - record: node_network_receive_packets_total + expr: node_network_receive_packets + - record: node_network_transmit_bytes_total + expr: node_network_transmit_bytes + - record: node_network_transmit_compressed_total + expr: node_network_transmit_compressed + - record: node_network_transmit_drop_total + expr: node_network_transmit_drop + - record: node_network_transmit_errs_total + expr: node_network_transmit_errs + - record: node_network_transmit_fifo_total + expr: node_network_transmit_fifo + - record: node_network_transmit_frame_total + expr: node_network_transmit_frame + - record: node_network_transmit_multicast_total + expr: node_network_transmit_multicast + - record: node_network_transmit_packets_total + expr: node_network_transmit_packets + - name: node_exporter-16-nfs + rules: + - record: node_nfs_connections_total + expr: node_nfs_net_connections + - record: node_nfs_packets_total + expr: node_nfs_net_reads + - record: node_nfs_requests_total + expr: label_replace(label_replace(node_nfs_procedures, "proto", "$1", "version", "(.+)"), "method", "$1", "procedure", "(.+)") + - record: node_nfs_rpc_authentication_refreshes_total + expr: node_nfs_rpc_authentication_refreshes + - record: node_nfs_rpcs_total + expr: node_nfs_rpc_operations + - record: node_nfs_rpc_retransmissions_total + expr: node_nfs_rpc_retransmissions + - name: node_exporter-16-textfile + rules: + - record: node_textfile_mtime_seconds + expr: node_textfile_mtime diff --git a/docs/example-17-compatibility-rules-new-to-old.yml b/docs/example-17-compatibility-rules-new-to-old.yml index c6db522b3a..da1520c9f4 100644 --- a/docs/example-17-compatibility-rules-new-to-old.yml +++ b/docs/example-17-compatibility-rules-new-to-old.yml @@ -1,5 +1,5 @@ groups: -- name: node_exporter-17-supervisord - rules: - - record: node_supervisord_start_time_seconds - expr: node_supervisord_uptime + time() + - name: node_exporter-17-supervisord + rules: + - record: node_supervisord_start_time_seconds + expr: node_supervisord_uptime + time() diff --git a/docs/example-17-compatibility-rules.yml b/docs/example-17-compatibility-rules.yml index 6fbeaa9122..e3d4d6626d 100644 --- a/docs/example-17-compatibility-rules.yml +++ b/docs/example-17-compatibility-rules.yml @@ -1,5 +1,5 @@ groups: -- name: node_exporter-17-supervisord - rules: - - record: node_supervisord_uptime - expr: time() - node_supervisord_start_time_seconds + - name: node_exporter-17-supervisord + rules: + - record: node_supervisord_uptime + expr: time() - node_supervisord_start_time_seconds diff --git a/docs/node-mixin/Makefile b/docs/node-mixin/Makefile index 012a4b5006..d04b37d009 100644 --- a/docs/node-mixin/Makefile +++ b/docs/node-mixin/Makefile @@ -24,5 +24,9 @@ lint: node_alerts.yaml node_rules.yaml promtool check rules node_alerts.yaml node_rules.yaml +.PHONY: jb_install +jb_install: + jb install + clean: rm -rf dashboards_out node_alerts.yaml node_rules.yaml diff --git a/docs/node-mixin/README.md b/docs/node-mixin/README.md index 46981d629d..824385ec71 100644 --- a/docs/node-mixin/README.md +++ b/docs/node-mixin/README.md @@ -8,40 +8,38 @@ dashboards based on the metrics exported by the Node Exporter. The mixin creates recording and alerting rules for Prometheus and suitable dashboard descriptions for Grafana. -To use them, you need to have `jsonnet` (v0.13+) and `jb` installed. If you +To use them, you need to have `jsonnet` (v0.16+) and `jb` installed. If you have a working Go development environment, it's easiest to run the following: + ```bash -$ go get github.com/google/go-jsonnet/cmd/jsonnet -$ go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb +go install github.com/google/go-jsonnet/cmd/jsonnet@latest +go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest +go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest ``` -_Note: The make targets `lint` and `fmt` need the `jsonnetfmt` binary, which is -currently not included in the Go implementation of `jsonnet`. For the time -being, you have to install the [C++ version of -jsonnetfmt](https://github.com/google/jsonnet) if you want to use `make lint` -or `make fmt`._ - Next, install the dependencies by running the following command in this directory: + ```bash -$ jb install +jb install ``` You can then build the Prometheus rules files `node_alerts.yaml` and `node_rules.yaml`: + ```bash -$ make node_alerts.yaml node_rules.yaml +make node_alerts.yaml node_rules.yaml ``` You can also build a directory `dashboard_out` with the JSON dashboard files for Grafana: + ```bash -$ make dashboards_out +make dashboards_out ``` Note that some of the generated dashboards require recording rules specified in the previously generated `node_rules.yaml`. For more advanced uses of mixins, see -https://github.com/monitoring-mixins/docs. - +. diff --git a/docs/node-mixin/alerts/alerts.libsonnet b/docs/node-mixin/alerts/alerts.libsonnet index 9ef18d52e5..29c934f57b 100644 --- a/docs/node-mixin/alerts/alerts.libsonnet +++ b/docs/node-mixin/alerts/alerts.libsonnet @@ -8,11 +8,11 @@ alert: 'NodeFilesystemSpaceFillingUp', expr: ||| ( - node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d + node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d and - predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s}[6h], 24*60*60) < 0 + predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[%(fsSpaceFillingUpPredictionWindow)s], %(nodeWarningWindowHours)s*60*60) < 0 and - node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', @@ -20,19 +20,19 @@ severity: 'warning', }, annotations: { - summary: 'Filesystem is predicted to run out of space within the next 24 hours.', - description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up.', + summary: 'Filesystem is predicted to run out of space within the next %(nodeWarningWindowHours)s hours.' % $._config, + description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up.', }, }, { alert: 'NodeFilesystemSpaceFillingUp', expr: ||| ( - node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < %(fsSpaceFillingUpCriticalThreshold)d + node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpCriticalThreshold)d and - predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s}[6h], 4*60*60) < 0 + predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], %(nodeCriticalWindowHours)s*60*60) < 0 and - node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', @@ -40,55 +40,55 @@ severity: '%(nodeCriticalSeverity)s' % $._config, }, annotations: { - summary: 'Filesystem is predicted to run out of space within the next 4 hours.', - description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast.', + summary: 'Filesystem is predicted to run out of space within the next %(nodeCriticalWindowHours)s hours.' % $._config, + description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast.', }, }, { alert: 'NodeFilesystemAlmostOutOfSpace', expr: ||| ( - node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 5 + node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceAvailableWarningThreshold)d and - node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, - 'for': '1h', + 'for': '30m', labels: { severity: 'warning', }, annotations: { - summary: 'Filesystem has less than 5% space left.', - description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.', + summary: 'Filesystem has less than %(fsSpaceAvailableWarningThreshold)d%% space left.' % $._config, + description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.', }, }, { alert: 'NodeFilesystemAlmostOutOfSpace', expr: ||| ( - node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 3 + node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceAvailableCriticalThreshold)d and - node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, - 'for': '1h', + 'for': '30m', labels: { severity: '%(nodeCriticalSeverity)s' % $._config, }, annotations: { - summary: 'Filesystem has less than 3% space left.', - description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.', + summary: 'Filesystem has less than %(fsSpaceAvailableCriticalThreshold)d%% space left.' % $._config, + description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.', }, }, { alert: 'NodeFilesystemFilesFillingUp', expr: ||| ( - node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 40 + node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 40 and - predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s}[6h], 24*60*60) < 0 + predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], %(nodeWarningWindowHours)s*60*60) < 0 and - node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', @@ -96,19 +96,19 @@ severity: 'warning', }, annotations: { - summary: 'Filesystem is predicted to run out of inodes within the next 24 hours.', - description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up.', + summary: 'Filesystem is predicted to run out of inodes within the next %(nodeWarningWindowHours)s hours.' % $._config, + description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up.', }, }, { alert: 'NodeFilesystemFilesFillingUp', expr: ||| ( - node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 20 + node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 20 and - predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s}[6h], 4*60*60) < 0 + predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], %(nodeCriticalWindowHours)s*60*60) < 0 and - node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', @@ -116,17 +116,17 @@ severity: '%(nodeCriticalSeverity)s' % $._config, }, annotations: { - summary: 'Filesystem is predicted to run out of inodes within the next 4 hours.', - description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.', + summary: 'Filesystem is predicted to run out of inodes within the next %(nodeCriticalWindowHours)s hours.' % $._config, + description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.', }, }, { alert: 'NodeFilesystemAlmostOutOfFiles', expr: ||| ( - node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 5 + node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 5 and - node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', @@ -135,16 +135,16 @@ }, annotations: { summary: 'Filesystem has less than 5% inodes left.', - description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.', + description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.', }, }, { alert: 'NodeFilesystemAlmostOutOfFiles', expr: ||| ( - node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 3 + node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 3 and - node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', @@ -153,13 +153,13 @@ }, annotations: { summary: 'Filesystem has less than 3% inodes left.', - description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.', + description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.', }, }, { alert: 'NodeNetworkReceiveErrs', expr: ||| - increase(node_network_receive_errs_total[2m]) > 10 + rate(node_network_receive_errs_total{%(nodeExporterSelector)s}[2m]) / rate(node_network_receive_packets_total{%(nodeExporterSelector)s}[2m]) > 0.01 ||| % $._config, 'for': '1h', labels: { @@ -173,7 +173,7 @@ { alert: 'NodeNetworkTransmitErrs', expr: ||| - increase(node_network_transmit_errs_total[2m]) > 10 + rate(node_network_transmit_errs_total{%(nodeExporterSelector)s}[2m]) / rate(node_network_transmit_packets_total{%(nodeExporterSelector)s}[2m]) > 0.01 ||| % $._config, 'for': '1h', labels: { @@ -187,11 +187,11 @@ { alert: 'NodeHighNumberConntrackEntriesUsed', expr: ||| - (node_nf_conntrack_entries / node_nf_conntrack_entries_limit) > 0.75 + (node_nf_conntrack_entries{%(nodeExporterSelector)s} / node_nf_conntrack_entries_limit) > 0.75 ||| % $._config, annotations: { summary: 'Number of conntrack are getting close to the limit.', - description: '{{ $value | humanizePercentage }} of conntrack entries are used.', + description: '{{ $labels.instance }} {{ $value | humanizePercentage }} of conntrack entries are used.', }, labels: { severity: 'warning', @@ -204,7 +204,7 @@ ||| % $._config, annotations: { summary: 'Node Exporter text file collector failed to scrape.', - description: 'Node Exporter text file collector failed to scrape.', + description: 'Node Exporter text file collector on {{ $labels.instance }} failed to scrape.', }, labels: { severity: 'warning', @@ -214,15 +214,15 @@ alert: 'NodeClockSkewDetected', expr: ||| ( - node_timex_offset_seconds > 0.05 + node_timex_offset_seconds{%(nodeExporterSelector)s} > 0.05 and - deriv(node_timex_offset_seconds[5m]) >= 0 + deriv(node_timex_offset_seconds{%(nodeExporterSelector)s}[5m]) >= 0 ) or ( - node_timex_offset_seconds < -0.05 + node_timex_offset_seconds{%(nodeExporterSelector)s} < -0.05 and - deriv(node_timex_offset_seconds[5m]) <= 0 + deriv(node_timex_offset_seconds{%(nodeExporterSelector)s}[5m]) <= 0 ) ||| % $._config, 'for': '10m', @@ -231,13 +231,15 @@ }, annotations: { summary: 'Clock skew detected.', - message: 'Clock on {{ $labels.instance }} is out of sync by more than 300s. Ensure NTP is configured correctly on this host.', + description: 'Clock at {{ $labels.instance }} is out of sync by more than 0.05s. Ensure NTP is configured correctly on this host.', }, }, { alert: 'NodeClockNotSynchronising', expr: ||| - min_over_time(node_timex_sync_status[5m]) == 0 + min_over_time(node_timex_sync_status{%(nodeExporterSelector)s}[5m]) == 0 + and + node_timex_maxerror_seconds{%(nodeExporterSelector)s} >= 16 ||| % $._config, 'for': '10m', labels: { @@ -245,7 +247,192 @@ }, annotations: { summary: 'Clock not synchronising.', - message: 'Clock on {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host.', + description: 'Clock at {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host.', + }, + }, + { + alert: 'NodeRAIDDegraded', + expr: ||| + node_md_disks_required{%(nodeExporterSelector)s,%(diskDeviceSelector)s} - ignoring (state) (node_md_disks{state="active",%(nodeExporterSelector)s,%(diskDeviceSelector)s}) > 0 + ||| % $._config, + 'for': '15m', + labels: { + severity: 'critical', + }, + annotations: { + summary: 'RAID Array is degraded.', + description: "RAID array '{{ $labels.device }}' at {{ $labels.instance }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.", + }, + }, + { + alert: 'NodeRAIDDiskFailure', + expr: ||| + node_md_disks{state="failed",%(nodeExporterSelector)s,%(diskDeviceSelector)s} > 0 + ||| % $._config, + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Failed device in RAID array.', + description: "At least one device in RAID array at {{ $labels.instance }} failed. Array '{{ $labels.device }}' needs attention and possibly a disk swap.", + }, + }, + { + alert: 'NodeFileDescriptorLimit', + expr: ||| + ( + node_filefd_allocated{%(nodeExporterSelector)s} * 100 / node_filefd_maximum{%(nodeExporterSelector)s} > 70 + ) + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Kernel is predicted to exhaust file descriptors limit soon.', + description: 'File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.', + }, + }, + { + alert: 'NodeFileDescriptorLimit', + expr: ||| + ( + node_filefd_allocated{%(nodeExporterSelector)s} * 100 / node_filefd_maximum{%(nodeExporterSelector)s} > 90 + ) + ||| % $._config, + 'for': '15m', + labels: { + severity: 'critical', + }, + annotations: { + summary: 'Kernel is predicted to exhaust file descriptors limit soon.', + description: 'File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.', + }, + }, + { + alert: 'NodeCPUHighUsage', + expr: ||| + sum without(mode) (avg without (cpu) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode!~"idle|iowait"}[2m]))) * 100 > %(cpuHighUsageThreshold)d + ||| % $._config, + 'for': '15m', + labels: { + severity: 'info', + }, + annotations: { + summary: 'High CPU usage.', + description: ||| + CPU usage at {{ $labels.instance }} has been above %(cpuHighUsageThreshold)d%% for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}%%. + ||| % $._config, + }, + }, + { + alert: 'NodeSystemSaturation', + expr: ||| + node_load1{%(nodeExporterSelector)s} + / count without (cpu, mode) (node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle"}) > %(systemSaturationPerCoreThreshold)d + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'System saturated, load per core is very high.', + description: ||| + System load per core at {{ $labels.instance }} has been above %(systemSaturationPerCoreThreshold)d for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}. + This might indicate this instance resources saturation and can cause it becoming unresponsive. + ||| % $._config, + }, + }, + { + alert: 'NodeMemoryMajorPagesFaults', + expr: ||| + rate(node_vmstat_pgmajfault{%(nodeExporterSelector)s}[5m]) > %(memoryMajorPagesFaultsThreshold)d + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Memory major page faults are occurring at very high rate.', + description: ||| + Memory major pages are occurring at very high rate at {{ $labels.instance }}, %(memoryMajorPagesFaultsThreshold)d major page faults per second for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}. + Please check that there is enough memory available at this instance. + ||| % $._config, + }, + }, + { + alert: 'NodeMemoryHighUtilization', + expr: ||| + 100 - (node_memory_MemAvailable_bytes{%(nodeExporterSelector)s} / node_memory_MemTotal_bytes{%(nodeExporterSelector)s} * 100) > %(memoryHighUtilizationThreshold)d + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Host is running out of memory.', + description: ||| + Memory is filling up at {{ $labels.instance }}, has been above %(memoryHighUtilizationThreshold)d%% for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}%%. + ||| % $._config, + }, + }, + { + alert: 'NodeDiskIOSaturation', + expr: ||| + rate(node_disk_io_time_weighted_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[5m]) > %(diskIOSaturationThreshold)d + ||| % $._config, + 'for': '30m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Disk IO queue is high.', + description: ||| + Disk IO queue (aqu-sq) is high on {{ $labels.device }} at {{ $labels.instance }}, has been above %(diskIOSaturationThreshold)d for the last 30 minutes, is currently at {{ printf "%%.2f" $value }}. + This symptom might indicate disk saturation. + ||| % $._config, + }, + }, + { + alert: 'NodeSystemdServiceFailed', + expr: ||| + node_systemd_unit_state{%(nodeExporterSelector)s, state="failed"} == 1 + ||| % $._config, + 'for': '5m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Systemd service has entered failed state.', + description: 'Systemd service {{ $labels.name }} has entered failed state at {{ $labels.instance }}', + }, + }, + { + alert: 'NodeSystemdServiceCrashlooping', + expr: ||| + increase(node_systemd_service_restart_total{%(nodeExporterSelector)s}[5m]) > 2 + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Systemd service keeps restaring, possibly crash looping.', + description: 'Systemd service {{ $labels.name }} has being restarted too many times at {{ $labels.instance }} for the last 15 minutes. Please check if service is crash looping.', + }, + }, + { + alert: 'NodeBondingDegraded', + expr: ||| + (node_bonding_slaves{%(nodeExporterSelector)s} - node_bonding_active{%(nodeExporterSelector)s}) != 0 + ||| % $._config, + 'for': '5m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Bonding interface is degraded.', + description: 'Bonding interface {{ $labels.master }} on {{ $labels.instance }} is in degraded state due to one or more slave failures.', }, }, ], diff --git a/docs/node-mixin/config.libsonnet b/docs/node-mixin/config.libsonnet index c06252cba5..1a4b3caae4 100644 --- a/docs/node-mixin/config.libsonnet +++ b/docs/node-mixin/config.libsonnet @@ -16,6 +16,14 @@ // alerting, you can exclude them here, e.g. 'fstype!="tmpfs"'. fsSelector: 'fstype!=""', + // Select the mountpoint for filesystem-related queries. If left + // empty, all mountpoints are selected. For example if you have a + // special purpose tmpfs instance that has a fixed size and will + // always be 100% full, but you still want alerts and dashboards for + // other tmpfs instances, you can exclude those by mountpoint prefix + // like so: 'mountpoint!~"/var/lib/foo.*"'. + fsMountpointSelector: 'mountpoint!=""', + // Select the device for disk-related queries. If left empty, all // devices are selected. If you have unusual devices you don't // want to include in dashboards and alerting, you can exclude @@ -35,18 +43,64 @@ // just a warning for K8s nodes. nodeCriticalSeverity: 'critical', + // CPU utilization (%) on which to trigger the + // 'NodeCPUHighUsage' alert. + cpuHighUsageThreshold: 90, + // Load average 1m (per core) on which to trigger the + // 'NodeSystemSaturation' alert. + systemSaturationPerCoreThreshold: 2, + + // Some of the alerts use predict_linear() to fire alerts ahead of time to + // prevent unrecoverable situations (eg. no more disk space). However, the + // node may have automatic processes (cronjobs) in place to prevent that + // within a certain time window, this may not align with the default time + // window of these alerts. This can cause these alerts to start flapping. + // By reducing the time window, the system gets more time to + // resolve this before problems occur. + nodeWarningWindowHours: '24', + nodeCriticalWindowHours: '4', + // Available disk space (%) thresholds on which to trigger the // 'NodeFilesystemSpaceFillingUp' alerts. These alerts fire if the disk // usage grows in a way that it is predicted to run out in 4h or 1d // and if the provided thresholds have been reached right now. - // In some cases you'll want to adjust these, e.g. by default Kubernetes + // In some cases you'll want to adjust these, e.g., by default, Kubernetes // runs the image garbage collection when the disk usage reaches 85% // of its available space. In that case, you'll want to reduce the // critical threshold below to something like 14 or 15, otherwise // the alert could fire under normal node usage. + // Additionally, the prediction window for the alert can be configured + // to account for environments where disk usage can fluctuate within + // a short time frame. By extending the prediction window, you can + // reduce false positives caused by temporary spikes, providing a + // more accurate prediction of disk space issues. fsSpaceFillingUpWarningThreshold: 40, fsSpaceFillingUpCriticalThreshold: 20, + fsSpaceFillingUpPredictionWindow: '6h', + + // Available disk space (%) thresholds on which to trigger the + // 'NodeFilesystemAlmostOutOfSpace' alerts. + fsSpaceAvailableWarningThreshold: 5, + fsSpaceAvailableCriticalThreshold: 3, + + // Memory utilization (%) level on which to trigger the + // 'NodeMemoryHighUtilization' alert. + memoryHighUtilizationThreshold: 90, + + // Threshold for the rate of memory major page faults to trigger + // 'NodeMemoryMajorPagesFaults' alert. + memoryMajorPagesFaultsThreshold: 500, + + // Disk IO queue level above which to trigger + // 'NodeDiskIOSaturation' alert. + diskIOSaturationThreshold: 10, + + rateInterval: '5m', + // Opt-in for multi-cluster support. + showMultiCluster: false, + clusterLabel: 'cluster', - grafana_prefix: '', + dashboardNamePrefix: 'Node Exporter / ', + dashboardTags: ['node-exporter-mixin'], }, } diff --git a/docs/node-mixin/dashboards/node.libsonnet b/docs/node-mixin/dashboards/node.libsonnet index 78241ed970..41e2903389 100644 --- a/docs/node-mixin/dashboards/node.libsonnet +++ b/docs/node-mixin/dashboards/node.libsonnet @@ -1,254 +1,8 @@ -local grafana = import 'grafonnet/grafana.libsonnet'; -local dashboard = grafana.dashboard; -local row = grafana.row; -local prometheus = grafana.prometheus; -local template = grafana.template; -local graphPanel = grafana.graphPanel; -local promgrafonnet = import 'promgrafonnet/promgrafonnet.libsonnet'; -local gauge = promgrafonnet.gauge; - { + local nodemixin = import '../lib/prom-mixin.libsonnet', grafanaDashboards+:: { - 'nodes.json': - local idleCPU = - graphPanel.new( - 'CPU Usage', - datasource='$datasource', - span=6, - format='percentunit', - max=1, - min=0, - stack=true, - ) - .addTarget(prometheus.target( - ||| - ( - (1 - rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle", instance="$instance"}[$__interval])) - / ignoring(cpu) group_left - count without (cpu)( node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle", instance="$instance"}) - ) - ||| % $._config, - legendFormat='{{cpu}}', - intervalFactor=5, - interval='1m', - )); - - local systemLoad = - graphPanel.new( - 'Load Average', - datasource='$datasource', - span=6, - format='short', - min=0, - fill=0, - ) - .addTarget(prometheus.target('node_load1{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='1m load average')) - .addTarget(prometheus.target('node_load5{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='5m load average')) - .addTarget(prometheus.target('node_load15{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='15m load average')) - .addTarget(prometheus.target('count(node_cpu_seconds_total{%(nodeExporterSelector)s, instance="$instance", mode="idle"})' % $._config, legendFormat='logical cores')); - - local memoryGraph = - graphPanel.new( - 'Memory Usage', - datasource='$datasource', - span=9, - format='bytes', - stack=true, - min=0, - ) - .addTarget(prometheus.target( - ||| - ( - node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance"} - - - node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance"} - - - node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance"} - - - node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance"} - ) - ||| % $._config, legendFormat='memory used' - )) - .addTarget(prometheus.target('node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='memory buffers')) - .addTarget(prometheus.target('node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='memory cached')) - .addTarget(prometheus.target('node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='memory free')); - - // TODO: It would be nicer to have a gauge that gets a 0-1 range and displays it as a percentage 0%-100%. - // This needs to be added upstream in the promgrafonnet library and then changed here. - local memoryGauge = gauge.new( - 'Memory Usage', - ||| - 100 - - ( - node_memory_MemAvailable_bytes{%(nodeExporterSelector)s, instance="$instance"} - / - node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance"} - * 100 - ) - ||| % $._config, - ).withLowerBeingBetter(); - - local diskIO = - graphPanel.new( - 'Disk I/O', - datasource='$datasource', - span=6, - min=0, - fill=0, - ) - // TODO: Does it make sense to have those three in the same panel? - .addTarget(prometheus.target( - 'rate(node_disk_read_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__interval])' % $._config, - legendFormat='{{device}} read', - interval='1m', - )) - .addTarget(prometheus.target( - 'rate(node_disk_written_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__interval])' % $._config, - legendFormat='{{device}} written', - interval='1m', - )) - .addTarget(prometheus.target( - 'rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__interval])' % $._config, - legendFormat='{{device}} io time', - interval='1m', - )) + - { - seriesOverrides: [ - { - alias: '/ read| written/', - yaxis: 1, - }, - { - alias: '/ io time/', - yaxis: 2, - }, - ], - yaxes: [ - self.yaxe(format='bytes'), - self.yaxe(format='s'), - ], - }; - - // TODO: Somehow partition this by device while excluding read-only devices. - local diskSpaceUsage = - graphPanel.new( - 'Disk Space Usage', - datasource='$datasource', - span=6, - format='bytes', - min=0, - fill=1, - stack=true, - ) - .addTarget(prometheus.target( - ||| - sum( - max by (device) ( - node_filesystem_size_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s} - - - node_filesystem_avail_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s} - ) - ) - ||| % $._config, - legendFormat='used', - )) - .addTarget(prometheus.target( - ||| - sum( - max by (device) ( - node_filesystem_avail_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s} - ) - ) - ||| % $._config, - legendFormat='available', - )) + - { - seriesOverrides: [ - { - alias: 'used', - color: '#E0B400', - }, - { - alias: 'available', - color: '#73BF69', - }, - ], - }; - - local networkReceived = - graphPanel.new( - 'Network Received', - datasource='$datasource', - span=6, - format='bytes', - min=0, - fill=0, - ) - .addTarget(prometheus.target( - 'rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, instance="$instance", device!="lo"}[$__interval])' % $._config, - legendFormat='{{device}}', - interval='1m', - )); - - local networkTransmitted = - graphPanel.new( - 'Network Transmitted', - datasource='$datasource', - span=6, - format='bytes', - min=0, - fill=0, - ) - .addTarget(prometheus.target( - 'rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, instance="$instance", device!="lo"}[$__interval])' % $._config, - legendFormat='{{device}}', - interval='1m', - )); - - dashboard.new('Nodes', time_from='now-1h') - .addTemplate( - { - current: { - text: 'Prometheus', - value: 'Prometheus', - }, - hide: 0, - label: null, - name: 'datasource', - options: [], - query: 'prometheus', - refresh: 1, - regex: '', - type: 'datasource', - }, - ) - .addTemplate( - template.new( - 'instance', - '$datasource', - 'label_values(node_exporter_build_info{%(nodeExporterSelector)s}, instance)' % $._config, - refresh='time', - ) - ) - .addRow( - row.new() - .addPanel(idleCPU) - .addPanel(systemLoad) - ) - .addRow( - row.new() - .addPanel(memoryGraph) - .addPanel(memoryGauge) - ) - .addRow( - row.new() - .addPanel(diskIO) - .addPanel(diskSpaceUsage) - ) - .addRow( - row.new() - .addPanel(networkReceived) - .addPanel(networkTransmitted) - ), + 'nodes.json': nodemixin.new(config=$._config, platform='Linux', uid=std.md5('nodes.json')).dashboard, + 'nodes-darwin.json': nodemixin.new(config=$._config, platform='Darwin', uid=std.md5('nodes-darwin.json')).dashboard, + 'nodes-aix.json': nodemixin.new(config=$._config, platform='AIX', uid=std.md5('nodes-aix.json')).dashboard, }, } diff --git a/docs/node-mixin/dashboards/use.libsonnet b/docs/node-mixin/dashboards/use.libsonnet index 3c855a6064..cfdaf41604 100644 --- a/docs/node-mixin/dashboards/use.libsonnet +++ b/docs/node-mixin/dashboards/use.libsonnet @@ -1,275 +1,474 @@ -local g = import 'grafana-builder/grafana.libsonnet'; +local grafana = import 'github.com/grafana/grafonnet/gen/grafonnet-latest/main.libsonnet'; +local dashboard = grafana.dashboard; +local variable = dashboard.variable; +local row = grafana.panel.row; +local prometheus = grafana.query.prometheus; + +local timeSeriesPanel = grafana.panel.timeSeries; +local tsOptions = timeSeriesPanel.options; +local tsStandardOptions = timeSeriesPanel.standardOptions; +local tsQueryOptions = timeSeriesPanel.queryOptions; +local tsCustom = timeSeriesPanel.fieldConfig.defaults.custom; +local tsLegend = tsOptions.legend; + +local c = import '../config.libsonnet'; + +local datasource = variable.datasource.new( + 'datasource', 'prometheus' +); + +local tsCommonPanelOptions = + variable.query.withDatasourceFromVariable(datasource) + + tsCustom.stacking.withMode('normal') + + tsCustom.withFillOpacity(100) + + tsCustom.withShowPoints('never') + + tsLegend.withShowLegend(false) + + tsOptions.tooltip.withMode('multi') + + tsOptions.tooltip.withSort('desc'); + +local CPUUtilisation = + timeSeriesPanel.new( + 'CPU Utilisation', + ) + + tsCommonPanelOptions + + tsStandardOptions.withUnit('percentunit'); + +local CPUSaturation = + // TODO: Is this a useful panel? At least there should be some explanation how load + // average relates to the "CPU saturation" in the title. + timeSeriesPanel.new( + 'CPU Saturation (Load1 per CPU)', + ) + + tsCommonPanelOptions + + tsStandardOptions.withUnit('percentunit'); + +local memoryUtilisation = + timeSeriesPanel.new( + 'Memory Utilisation', + ) + + tsCommonPanelOptions + + tsStandardOptions.withUnit('percentunit'); + +local memorySaturation = + timeSeriesPanel.new( + 'Memory Saturation (Major Page Faults)', + ) + + tsCommonPanelOptions + + tsStandardOptions.withUnit('rds'); + +local networkOverrides = tsStandardOptions.withOverrides( + [ + tsStandardOptions.override.byRegexp.new('/Transmit/') + + tsStandardOptions.override.byRegexp.withPropertiesFromOptions( + tsCustom.withTransform('negative-Y') + ), + ] +); + +local networkUtilisation = + timeSeriesPanel.new( + 'Network Utilisation (Bytes Receive/Transmit)', + ) + + tsCommonPanelOptions + + tsStandardOptions.withUnit('Bps') + + networkOverrides; + +local networkSaturation = + timeSeriesPanel.new( + 'Network Saturation (Drops Receive/Transmit)', + ) + + tsCommonPanelOptions + + tsStandardOptions.withUnit('Bps') + + networkOverrides; + +local diskIOUtilisation = + timeSeriesPanel.new( + 'Disk IO Utilisation', + ) + + tsCommonPanelOptions + + tsStandardOptions.withUnit('percentunit'); + +local diskIOSaturation = + timeSeriesPanel.new( + 'Disk IO Saturation', + ) + + tsCommonPanelOptions + + tsStandardOptions.withUnit('percentunit'); + +local diskSpaceUtilisation = + timeSeriesPanel.new( + 'Disk Space Utilisation', + ) + + tsCommonPanelOptions + + tsStandardOptions.withUnit('percentunit'); { - grafanaDashboards+:: { - 'node-cluster-rsrc-use.json': - local legendLink = '%s/dashboard/file/node-rsrc-use.json' % $._config.grafana_prefix; + _clusterVariable:: + variable.query.new('cluster') + + variable.query.withDatasourceFromVariable(datasource) + + variable.query.queryTypes.withLabelValues( + $._config.clusterLabel, + 'node_time_seconds', + ) + + (if $._config.showMultiCluster then variable.query.generalOptions.showOnDashboard.withLabelAndValue() else variable.query.generalOptions.showOnDashboard.withNothing()) + + variable.query.refresh.onTime() + + variable.query.selectionOptions.withIncludeAll(false) + + variable.query.withSort(asc=true), - g.dashboard('USE Method / Cluster') - .addRow( - g.row('CPU') - .addPanel( - g.panel('CPU Utilisation') + - g.queryPanel(||| - ( - instance:node_cpu_utilisation:rate1m{%(nodeExporterSelector)s} - * - instance:node_num_cpu:sum{%(nodeExporterSelector)s} - ) - / scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s})) - ||| % $._config, '{{instance}}', legendLink) + - g.stack + - { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) }, - ) - .addPanel( - // TODO: Is this a useful panel? At least there should be some explanation how load - // average relates to the "CPU saturation" in the title. - g.panel('CPU Saturation (load1 per CPU)') + - g.queryPanel(||| - instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s} - / scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s})) - ||| % $._config, '{{instance}}', legendLink) + - g.stack + - // TODO: Does `max: 1` make sense? The stack can go over 1 in high-load scenarios. - { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) }, - ) - ) - .addRow( - g.row('Memory') - .addPanel( - g.panel('Memory Utilisation') + - g.queryPanel(||| - instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s} - / scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s})) - ||| % $._config, '{{instance}}', legendLink) + - g.stack + - { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) }, - ) - .addPanel( - g.panel('Memory Saturation (Major Page Faults)') + - g.queryPanel('instance:node_vmstat_pgmajfault:rate1m{%(nodeExporterSelector)s}' % $._config, '{{instance}}', legendLink) + - g.stack + - { yaxes: g.yaxes('rps') }, - ) - ) - .addRow( - g.row('Network') - .addPanel( - g.panel('Net Utilisation (Bytes Receive/Transmit)') + - g.queryPanel( - [ - 'instance:node_network_receive_bytes_excluding_lo:rate1m{%(nodeExporterSelector)s}' % $._config, - 'instance:node_network_transmit_bytes_excluding_lo:rate1m{%(nodeExporterSelector)s}' % $._config, - ], - ['{{instance}} Receive', '{{instance}} Transmit'], - legendLink, - ) + - g.stack + - { - yaxes: g.yaxes({ format: 'Bps', min: null }), - seriesOverrides: [ - { - alias: '/ Receive/', - stack: 'A', - }, - { - alias: '/ Transmit/', - stack: 'B', - transform: 'negative-Y', - }, - ], - }, - ) - .addPanel( - g.panel('Net Saturation (Drops Receive/Transmit)') + - g.queryPanel( - [ - 'instance:node_network_receive_drop_excluding_lo:rate1m{%(nodeExporterSelector)s}' % $._config, - 'instance:node_network_transmit_drop_excluding_lo:rate1m{%(nodeExporterSelector)s}' % $._config, - ], - ['{{instance}} Receive', '{{instance}} Transmit'], - legendLink, - ) + - g.stack + - { - yaxes: g.yaxes({ format: 'rps', min: null }), - seriesOverrides: [ - { - alias: '/ Receive/', - stack: 'A', - }, - { - alias: '/ Transmit/', - stack: 'B', - transform: 'negative-Y', - }, - ], - }, - ) - ) - .addRow( - g.row('Disk IO') - .addPanel( - g.panel('Disk IO Utilisation') + - // Full utilisation would be all disks on each node spending an average of - // 1 second per second doing I/O, normalize by metric cardinality for stacked charts. - // TODO: Does the partition by device make sense? Using the most utilized device per - // instance might make more sense. - g.queryPanel(||| - instance_device:node_disk_io_time_seconds:rate1m{%(nodeExporterSelector)s} - / scalar(count(instance_device:node_disk_io_time_seconds:rate1m{%(nodeExporterSelector)s})) - ||| % $._config, '{{instance}} {{device}}', legendLink) + - g.stack + - { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) }, - ) - .addPanel( - g.panel('Disk IO Saturation') + - g.queryPanel(||| - instance_device:node_disk_io_time_weighted_seconds:rate1m{%(nodeExporterSelector)s} - / scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate1m{%(nodeExporterSelector)s})) - ||| % $._config, '{{instance}} {{device}}', legendLink) + - g.stack + - { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) }, - ) - ) - .addRow( - g.row('Disk Space') - .addPanel( - g.panel('Disk Space Utilisation') + - g.queryPanel(||| - sum without (device) ( - max without (fstype, mountpoint) ( - node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s} - node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s} - ) - ) - / scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s}))) - ||| % $._config, '{{instance}}', legendLink) + - g.stack + - { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) }, - ), - ), + grafanaDashboards+:: { + 'node-rsrc-use.json': + dashboard.new( + '%sUSE Method / Node' % $._config.dashboardNamePrefix, + ) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags($._config.dashboardTags) + + dashboard.withTimezone('utc') + + dashboard.withRefresh('30s') + + dashboard.graphTooltip.withSharedCrosshair() + + dashboard.withUid(std.md5('node-rsrc-use.json')) + + dashboard.withVariables([ + datasource, + $._clusterVariable, + variable.query.new('instance') + + variable.query.withDatasourceFromVariable(datasource) + + variable.query.queryTypes.withLabelValues( + 'instance', + 'node_exporter_build_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config, + ) + + variable.query.refresh.onTime() + + variable.query.withSort(asc=true), + ]) + + dashboard.withPanels( + grafana.util.grid.makeGrid([ + row.new('CPU') + + row.withPanels([ + CPUUtilisation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Utilisation')]), + CPUSaturation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Saturation')]), + ]), + row.new('Memory') + + row.withPanels([ + memoryUtilisation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Utilisation')]), + memorySaturation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Major page Faults')]), + ]), + row.new('Network') + + row.withPanels([ + networkUtilisation + tsQueryOptions.withTargets([ + prometheus.new('$datasource', 'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Receive'), + prometheus.new('$datasource', 'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Transmit'), + ]), + networkSaturation + tsQueryOptions.withTargets([ + prometheus.new('$datasource', 'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Receive'), + prometheus.new('$datasource', 'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Transmit'), + ]), + ]), + row.new('Disk IO') + + row.withPanels([ + diskIOUtilisation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('{{device}}')]), + diskIOSaturation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('{{device}}')]), + ]), + ], panelWidth=12, panelHeight=7) + + grafana.util.grid.makeGrid([ + row.new('Disk Space') + + row.withPanels([ + diskSpaceUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sort_desc(1 - + ( + max without (mountpoint, fstype) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"}) + / + max without (mountpoint, fstype) (node_filesystem_size_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"}) + ) != 0 + ) + ||| % $._config + ) + prometheus.withLegendFormat('{{device}}'), + ]), + ]), + ], panelWidth=24, panelHeight=7, startY=34), + ), + 'node-cluster-rsrc-use.json': + dashboard.new( + '%sUSE Method / Cluster' % $._config.dashboardNamePrefix, + ) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags($._config.dashboardTags) + + dashboard.withTimezone('utc') + + dashboard.withRefresh('30s') + + dashboard.graphTooltip.withSharedCrosshair() + + dashboard.withUid(std.md5('node-cluster-rsrc-use.json')) + + dashboard.withVariables([ + datasource, + $._clusterVariable, + ]) + + dashboard.withPanels( + grafana.util.grid.makeGrid([ + row.new('CPU') + + row.withPanels([ + CPUUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + (( + instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + * + instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + ) != 0 ) + / scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ||| % $._config + ) + prometheus.withLegendFormat('{{ instance }}'), + ]), + CPUSaturation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + ( + instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + / scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ) != 0 + ||| % $._config + ) + prometheus.withLegendFormat('{{ instance }}'), + ]), + ]), + row.new('Memory') + + row.withPanels([ + memoryUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + ( + instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + / scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ) != 0 + ||| % $._config + ) + prometheus.withLegendFormat('{{ instance }}'), + ]), + memorySaturation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + 'instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config + ) + prometheus.withLegendFormat('{{ instance }}'), + ]), + ]), + row.new('Network') + + row.withPanels([ + networkUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + 'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config + ) + prometheus.withLegendFormat('{{ instance }} Receive'), + prometheus.new( + '$datasource', + 'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config + ) + prometheus.withLegendFormat('{{ instance }} Transmit'), + ]), + networkSaturation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + 'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config + ) + prometheus.withLegendFormat('{{ instance }} Receive'), + prometheus.new( + '$datasource', + 'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config + ) + prometheus.withLegendFormat('{{ instance }} Transmit'), + ]), + ]), + row.new('Disk IO') + + row.withPanels([ + diskIOUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + / scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ||| % $._config + ) + prometheus.withLegendFormat('{{ instance }} {{device}}'), + ]), + diskIOSaturation + tsQueryOptions.withTargets([prometheus.new( + '$datasource', + ||| + instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + / scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ||| % $._config + ) + prometheus.withLegendFormat('{{ instance }} {{device}}')]), + ]), + ], panelWidth=12, panelHeight=7) + + grafana.util.grid.makeGrid([ + row.new('Disk Space') + + row.withPanels([ + diskSpaceUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sum without (device) ( + max without (fstype, mountpoint) (( + node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"} + - + node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"} + ) != 0) + ) + / scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"}))) + ||| % $._config + ) + prometheus.withLegendFormat('{{ instance }}'), + ]), + ]), + ], panelWidth=24, panelHeight=7, startY=34), + ), + } + + if $._config.showMultiCluster then { + 'node-multicluster-rsrc-use.json': + dashboard.new( + '%sUSE Method / Multi-cluster' % $._config.dashboardNamePrefix, + ) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags($._config.dashboardTags) + + dashboard.withTimezone('utc') + + dashboard.withRefresh('30s') + + dashboard.graphTooltip.withSharedCrosshair() + + dashboard.withUid(std.md5('node-multicluster-rsrc-use.json')) + + dashboard.withVariables([ + datasource, + ]) + + dashboard.withPanels( + grafana.util.grid.makeGrid([ + row.new('CPU') + + row.withPanels([ + CPUUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sum( + (( + instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s} + * + instance:node_num_cpu:sum{%(nodeExporterSelector)s} + ) != 0) + / scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s})) + ) by (%(clusterLabel)s) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'), + ]), + CPUSaturation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sum(( + instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s} + / scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s})) + ) != 0) by (%(clusterLabel)s) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'), + ]), + ]), + row.new('Memory') + + row.withPanels([ + memoryUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sum(( + instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s} + / scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s})) + ) != 0) by (%(clusterLabel)s) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'), + ]), + memorySaturation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sum(( + instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| + % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'), + ]), + ]), + row.new('Network') + + row.withPanels([ + networkUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sum(( + instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Receive'), + prometheus.new( + '$datasource', + ||| + sum(( + instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Transmit'), + ]), + networkSaturation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sum(( + instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Receive'), + prometheus.new( + '$datasource', + ||| + sum(( + instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Transmit'), + ]), + ]), + row.new('Disk IO') + + row.withPanels([ + diskIOUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sum(( + instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s} + / scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s})) + ) != 0) by (%(clusterLabel)s, device) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{device}}'), + ]), + diskIOSaturation + tsQueryOptions.withTargets([prometheus.new( + '$datasource', + ||| + sum(( + instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s} + / scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s})) + ) != 0) by (%(clusterLabel)s, device) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{device}}')]), + ]), - 'node-rsrc-use.json': - g.dashboard('USE Method / Node') - .addTemplate('instance', 'up{%(nodeExporterSelector)s}' % $._config, 'instance') - .addRow( - g.row('CPU') - .addPanel( - g.panel('CPU Utilisation') + - g.queryPanel('instance:node_cpu_utilisation:rate1m{%(nodeExporterSelector)s, instance="$instance"}' % $._config, 'Utilisation') + - { - yaxes: g.yaxes('percentunit'), - legend+: { show: false }, - }, - ) - .addPanel( - // TODO: Is this a useful panel? At least there should be some explanation how load - // average relates to the "CPU saturation" in the title. - g.panel('CPU Saturation (Load1 per CPU)') + - g.queryPanel('instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, instance="$instance"}' % $._config, 'Saturation') + - { - yaxes: g.yaxes('percentunit'), - legend+: { show: false }, - }, - ) - ) - .addRow( - g.row('Memory') - .addPanel( - g.panel('Memory Utilisation') + - g.queryPanel('instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(nodeExporterSelector)s, instance="$instance"}' % $._config, 'Memory') + - { yaxes: g.yaxes('percentunit') }, - ) - .addPanel( - g.panel('Memory Saturation (Major Page Faults)') + - g.queryPanel('instance:node_vmstat_pgmajfault:rate1m{%(nodeExporterSelector)s, instance="$instance"}' % $._config, 'Major page faults') + - { - yaxes: g.yaxes('short'), - legend+: { show: false }, - }, - ) - ) - .addRow( - g.row('Net') - .addPanel( - g.panel('Net Utilisation (Bytes Receive/Transmit)') + - g.queryPanel( - [ - 'instance:node_network_receive_bytes_excluding_lo:rate1m{%(nodeExporterSelector)s, instance="$instance"}' % $._config, - 'instance:node_network_transmit_bytes_excluding_lo:rate1m{%(nodeExporterSelector)s, instance="$instance"}' % $._config, - ], - ['Receive', 'Transmit'], - ) + - { - yaxes: g.yaxes({ format: 'Bps', min: null }), - seriesOverrides: [ - { - alias: '/Receive/', - stack: 'A', - }, - { - alias: '/Transmit/', - stack: 'B', - transform: 'negative-Y', - }, - ], - }, - ) - .addPanel( - g.panel('Net Saturation (Drops Receive/Transmit)') + - g.queryPanel( - [ - 'instance:node_network_receive_drop_excluding_lo:rate1m{%(nodeExporterSelector)s, instance="$instance"}' % $._config, - 'instance:node_network_transmit_drop_excluding_lo:rate1m{%(nodeExporterSelector)s, instance="$instance"}' % $._config, - ], - ['Receive drops', 'Transmit drops'], - ) + - { - yaxes: g.yaxes({ format: 'rps', min: null }), - seriesOverrides: [ - { - alias: '/Receive/', - stack: 'A', - }, - { - alias: '/Transmit/', - stack: 'B', - transform: 'negative-Y', - }, - ], - }, - ) - ) - .addRow( - g.row('Disk IO') - .addPanel( - g.panel('Disk IO Utilisation') + - g.queryPanel('instance_device:node_disk_io_time_seconds:rate1m{%(nodeExporterSelector)s, instance="$instance"}' % $._config, '{{device}}') + - { yaxes: g.yaxes('percentunit') }, - ) - .addPanel( - g.panel('Disk IO Saturation') + - g.queryPanel('instance_device:node_disk_io_time_weighted_seconds:rate1m{%(nodeExporterSelector)s, instance="$instance"}' % $._config, '{{device}}') + - { yaxes: g.yaxes('percentunit') }, - ) - ) - .addRow( - g.row('Disk Space') - .addPanel( - g.panel('Disk Space Utilisation') + - g.queryPanel(||| - 1 - - ( - max without (mountpoint, fstype) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, instance="$instance"}) - / - max without (mountpoint, fstype) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, instance="$instance"}) - ) - ||| % $._config, '{{device}}') + - { - yaxes: g.yaxes('percentunit'), - legend+: { show: false }, - }, - ), - ), - }, + ], panelWidth=12, panelHeight=7) + + grafana.util.grid.makeGrid([ + row.new('Disk Space') + + row.withPanels([ + diskSpaceUtilisation + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + sum ( + sum without (device) ( + max without (fstype, mountpoint, instance, pod) (( + node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s} - node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s} + ) != 0) + ) + / scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s}))) + ) by (%(clusterLabel)s) + ||| % $._config + ) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'), + ]), + ]), + ], panelWidth=24, panelHeight=7, startY=34), + ), + } else {}, } diff --git a/docs/node-mixin/jsonnetfile.json b/docs/node-mixin/jsonnetfile.json index dc92880dc3..2d56d91245 100644 --- a/docs/node-mixin/jsonnetfile.json +++ b/docs/node-mixin/jsonnetfile.json @@ -1,34 +1,15 @@ { - "dependencies": [ - { - "name": "grafonnet", - "source": { - "git": { - "remote": "https://github.com/grafana/grafonnet-lib", - "subdir": "grafonnet" - } - }, - "version": "master" - }, - { - "name": "grafana-builder", - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs", - "subdir": "grafana-builder" - } - }, - "version": "master" - }, - { - "name": "promgrafonnet", - "source": { - "git": { - "remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin", - "subdir": "lib/promgrafonnet" - } - }, - "version": "master" + "version": 1, + "dependencies": [ + { + "source": { + "git": { + "remote": "https://github.com/grafana/grafonnet.git", + "subdir": "gen/grafonnet-latest" } - ] + }, + "version": "main" + } + ], + "legacyImports": false } diff --git a/docs/node-mixin/lib/prom-mixin.libsonnet b/docs/node-mixin/lib/prom-mixin.libsonnet new file mode 100644 index 0000000000..f18c273c29 --- /dev/null +++ b/docs/node-mixin/lib/prom-mixin.libsonnet @@ -0,0 +1,535 @@ +local grafana = import 'github.com/grafana/grafonnet/gen/grafonnet-latest/main.libsonnet'; +local dashboard = grafana.dashboard; +local row = grafana.panel.row; +local prometheus = grafana.query.prometheus; +local variable = dashboard.variable; + +local timeSeriesPanel = grafana.panel.timeSeries; +local tsOptions = timeSeriesPanel.options; +local tsStandardOptions = timeSeriesPanel.standardOptions; +local tsQueryOptions = timeSeriesPanel.queryOptions; +local tsCustom = timeSeriesPanel.fieldConfig.defaults.custom; + +local gaugePanel = grafana.panel.gauge; +local gaugeStep = gaugePanel.standardOptions.threshold.step; + +local table = grafana.panel.table; +local tableStep = table.standardOptions.threshold.step; +local tableOverride = table.standardOptions.override; +local tableTransformation = table.queryOptions.transformation; + +{ + + new(config=null, platform=null, uid=null):: { + + local prometheusDatasourceVariable = variable.datasource.new( + 'datasource', 'prometheus' + ), + + local clusterVariablePrototype = + variable.query.new('cluster') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + (if config.showMultiCluster then variable.query.generalOptions.showOnDashboard.withLabelAndValue() else variable.query.generalOptions.showOnDashboard.withNothing()) + + variable.query.refresh.onTime() + + variable.query.generalOptions.withLabel('Cluster'), + + local clusterVariable = + if platform == 'Darwin' then + clusterVariablePrototype + + variable.query.queryTypes.withLabelValues( + ' %(clusterLabel)s' % config, + 'node_uname_info{%(nodeExporterSelector)s, sysname="Darwin"}' % config, + ) + else + clusterVariablePrototype + + variable.query.queryTypes.withLabelValues( + '%(clusterLabel)s' % config, + 'node_uname_info{%(nodeExporterSelector)s, sysname!="Darwin"}' % config, + ), + + local instanceVariablePrototype = + variable.query.new('instance') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + variable.query.refresh.onTime() + + variable.query.generalOptions.withLabel('Instance'), + + local instanceVariable = + if platform == 'Darwin' then + instanceVariablePrototype + + variable.query.queryTypes.withLabelValues( + 'instance', + 'node_uname_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster", sysname="Darwin"}' % config, + ) + else + instanceVariablePrototype + + variable.query.queryTypes.withLabelValues( + 'instance', + 'node_uname_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster", sysname!="Darwin"}' % config, + ), + + local idleCPU = + timeSeriesPanel.new('CPU Usage') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + tsStandardOptions.withUnit('percentunit') + + tsCustom.stacking.withMode('normal') + + tsStandardOptions.withMax(1) + + tsStandardOptions.withMin(0) + + tsOptions.tooltip.withMode('multi') + + tsCustom.withFillOpacity(10) + + tsCustom.withShowPoints('never') + + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + ( + (1 - sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal", instance="$instance", %(clusterLabel)s="$cluster"}[$__rate_interval]))) + / ignoring(cpu) group_left + count without (cpu, mode) (node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle", instance="$instance", %(clusterLabel)s="$cluster"}) + ) + ||| % config, + ) + + prometheus.withLegendFormat('{{cpu}}') + + prometheus.withIntervalFactor(5), + ]), + + local systemLoad = + timeSeriesPanel.new('Load Average') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + tsStandardOptions.withUnit('short') + + tsStandardOptions.withMin(0) + + tsCustom.withFillOpacity(0) + + tsCustom.withShowPoints('never') + + tsOptions.tooltip.withMode('multi') + + tsQueryOptions.withTargets([ + prometheus.new('$datasource', 'node_load1{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('1m load average'), + prometheus.new('$datasource', 'node_load5{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('5m load average'), + prometheus.new('$datasource', 'node_load15{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('15m load average'), + prometheus.new('$datasource', 'count(node_cpu_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", mode="idle"})' % config) + prometheus.withLegendFormat('logical cores'), + ]), + + local memoryGraphPanelPrototype = + timeSeriesPanel.new('Memory Usage') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + tsStandardOptions.withUnit('bytes') + + tsStandardOptions.withMin(0) + + tsOptions.tooltip.withMode('multi') + + tsCustom.withFillOpacity(10) + + tsCustom.withShowPoints('never'), + + local memoryGraph = + if platform == 'Linux' then + memoryGraphPanelPrototype + + tsCustom.stacking.withMode('normal') + + tsQueryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + ( + node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} + - + node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} + - + node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} + - + node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} + ) + ||| % config, + ) + prometheus.withLegendFormat('memory used'), + prometheus.new('$datasource', 'node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('memory buffers'), + prometheus.new('$datasource', 'node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('memory cached'), + prometheus.new('$datasource', 'node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('memory free'), + ]) + else if platform == 'Darwin' then + // not useful to stack + memoryGraphPanelPrototype + + tsCustom.stacking.withMode('none') + + tsQueryOptions.withTargets([ + prometheus.new('$datasource', 'node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('Physical Memory'), + prometheus.new( + '$datasource', + ||| + ( + node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} - + node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} + + node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} + + node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} + ) + ||| % config + ) + prometheus.withLegendFormat( + 'Memory Used' + ), + prometheus.new( + '$datasource', + ||| + ( + node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} - + node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} + ) + ||| % config + ) + prometheus.withLegendFormat( + 'App Memory' + ), + prometheus.new('$datasource', 'node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('Wired Memory'), + prometheus.new('$datasource', 'node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('Compressed'), + ]) + + else if platform == 'AIX' then + memoryGraphPanelPrototype + + tsCustom.stacking.withMode('none') + + tsQueryOptions.withTargets([ + prometheus.new('$datasource', 'node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('Physical Memory'), + prometheus.new( + '$datasource', + ||| + ( + node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} - + node_memory_available_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} + ) + ||| % config + ) + prometheus.withLegendFormat('Memory Used'), + ]), + + + // NOTE: avg() is used to circumvent a label change caused by a node_exporter rollout. + local memoryGaugePanelPrototype = + gaugePanel.new('Memory Usage') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + gaugePanel.standardOptions.thresholds.withSteps([ + gaugeStep.withColor('rgba(50, 172, 45, 0.97)'), + gaugeStep.withColor('rgba(237, 129, 40, 0.89)') + gaugeStep.withValue(80), + gaugeStep.withColor('rgba(245, 54, 54, 0.9)') + gaugeStep.withValue(90), + ]) + + gaugePanel.standardOptions.withMax(100) + + gaugePanel.standardOptions.withMin(0) + + gaugePanel.standardOptions.withUnit('percent'), + + local memoryGauge = + if platform == 'Linux' then + memoryGaugePanelPrototype + + gaugePanel.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + 100 - + ( + avg(node_memory_MemAvailable_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}) / + avg(node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}) + * 100 + ) + ||| % config, + ), + ]) + + else if platform == 'Darwin' then + memoryGaugePanelPrototype + + gaugePanel.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + ( + ( + avg(node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}) - + avg(node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}) + + avg(node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}) + + avg(node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}) + ) / + avg(node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}) + ) + * + 100 + ||| % config + ), + ]) + + else if platform == 'AIX' then + memoryGaugePanelPrototype + + gaugePanel.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + 100 - + ( + avg(node_memory_available_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}) / + avg(node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}) + * 100 + ) + ||| % config + ), + ]), + + + local diskIO = + timeSeriesPanel.new('Disk I/O') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + tsStandardOptions.withMin(0) + + tsCustom.withFillOpacity(0) + + tsCustom.withShowPoints('never') + + tsOptions.tooltip.withMode('multi') + + tsQueryOptions.withTargets([ + // TODO: Does it make sense to have those three in the same panel? + prometheus.new('$datasource', 'rate(node_disk_read_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(diskDeviceSelector)s}[$__rate_interval])' % config) + + prometheus.withLegendFormat('{{device}} read') + + prometheus.withIntervalFactor(1), + prometheus.new('$datasource', 'rate(node_disk_written_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(diskDeviceSelector)s}[$__rate_interval])' % config) + + prometheus.withLegendFormat('{{device}} written') + + prometheus.withIntervalFactor(1), + prometheus.new('$datasource', 'rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(diskDeviceSelector)s}[$__rate_interval])' % config) + + prometheus.withLegendFormat('{{device}} io time') + + prometheus.withIntervalFactor(1), + ]) + + tsStandardOptions.withOverrides( + [ + tsStandardOptions.override.byRegexp.new('/ read| written/') + + tsStandardOptions.override.byRegexp.withPropertiesFromOptions( + tsStandardOptions.withUnit('Bps') + ), + tsStandardOptions.override.byRegexp.new('/ io time/') + + tsStandardOptions.override.byRegexp.withPropertiesFromOptions(tsStandardOptions.withUnit('percentunit')), + ] + ), + + local diskSpaceUsage = + table.new('Disk Space Usage') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + table.standardOptions.withUnit('decbytes') + + table.standardOptions.thresholds.withSteps( + [ + tableStep.withColor('green'), + tableStep.withColor('yellow') + gaugeStep.withValue(0.8), + tableStep.withColor('red') + gaugeStep.withValue(0.9), + ] + ) + + table.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + max by (mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(fsSelector)s, %(fsMountpointSelector)s}) + ||| % config + ) + + prometheus.withLegendFormat('') + + prometheus.withInstant() + + prometheus.withFormat('table'), + prometheus.new( + '$datasource', + ||| + max by (mountpoint) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(fsSelector)s, %(fsMountpointSelector)s}) + ||| % config + ) + + prometheus.withLegendFormat('') + + prometheus.withInstant() + + prometheus.withFormat('table'), + ]) + + table.standardOptions.withOverrides([ + tableOverride.byName.new('Mounted on') + + tableOverride.byName.withProperty('custom.width', 260), + tableOverride.byName.new('Size') + + tableOverride.byName.withProperty('custom.width', 93), + tableOverride.byName.new('Used') + + tableOverride.byName.withProperty('custom.width', 72), + tableOverride.byName.new('Available') + + tableOverride.byName.withProperty('custom.width', 88), + tableOverride.byName.new('Used, %') + + tableOverride.byName.withProperty('unit', 'percentunit') + + tableOverride.byName.withPropertiesFromOptions( + table.fieldConfig.defaults.custom.withCellOptions( + { type: 'gauge' }, + ) + ) + + tableOverride.byName.withProperty('max', 1) + + tableOverride.byName.withProperty('min', 0), + ]) + + table.queryOptions.withTransformations([ + tableTransformation.withId('groupBy') + + tableTransformation.withOptions( + { + fields: { + 'Value #A': { + aggregations: [ + 'lastNotNull', + ], + operation: 'aggregate', + }, + 'Value #B': { + aggregations: [ + 'lastNotNull', + ], + operation: 'aggregate', + }, + mountpoint: { + aggregations: [], + operation: 'groupby', + }, + }, + } + ), + tableTransformation.withId('merge'), + tableTransformation.withId('calculateField') + + tableTransformation.withOptions( + { + alias: 'Used', + binary: { + left: 'Value #A (lastNotNull)', + operator: '-', + reducer: 'sum', + right: 'Value #B (lastNotNull)', + }, + mode: 'binary', + reduce: { + reducer: 'sum', + }, + } + ), + tableTransformation.withId('calculateField') + + tableTransformation.withOptions( + { + alias: 'Used, %', + binary: { + left: 'Used', + operator: '/', + reducer: 'sum', + right: 'Value #A (lastNotNull)', + }, + mode: 'binary', + reduce: { + reducer: 'sum', + }, + } + ), + tableTransformation.withId('organize') + + tableTransformation.withOptions( + { + excludeByName: {}, + indexByName: {}, + renameByName: { + 'Value #A (lastNotNull)': 'Size', + 'Value #B (lastNotNull)': 'Available', + mountpoint: 'Mounted on', + }, + } + ), + tableTransformation.withId('sortBy') + + tableTransformation.withOptions( + { + fields: {}, + sort: [ + { + field: 'Mounted on', + }, + ], + } + ), + + ]), + + local networkReceived = + timeSeriesPanel.new('Network Received') + + timeSeriesPanel.panelOptions.withDescription('Network received (bits/s)') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + tsStandardOptions.withUnit('bps') + + tsStandardOptions.withMin(0) + + tsCustom.withFillOpacity(0) + + tsCustom.withShowPoints('never') + + tsOptions.tooltip.withMode('multi') + + tsQueryOptions.withTargets([ + prometheus.new('$datasource', 'rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", device!="lo"}[$__rate_interval]) * 8' % config) + + prometheus.withLegendFormat('{{device}}') + + prometheus.withIntervalFactor(1), + ]), + + local networkTransmitted = + timeSeriesPanel.new('Network Transmitted') + + timeSeriesPanel.panelOptions.withDescription('Network transmitted (bits/s)') + + variable.query.withDatasourceFromVariable(prometheusDatasourceVariable) + + tsStandardOptions.withUnit('bps') + + tsStandardOptions.withMin(0) + + tsCustom.withFillOpacity(0) + + tsOptions.tooltip.withMode('multi') + + tsQueryOptions.withTargets([ + prometheus.new('$datasource', 'rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", device!="lo"}[$__rate_interval]) * 8' % config) + + prometheus.withLegendFormat('{{device}}') + + prometheus.withIntervalFactor(1), + ]), + + local cpuRow = + row.new('CPU') + + row.withPanels([ + idleCPU, + systemLoad, + ]), + + local memoryRow = [ + row.new('Memory') + row.gridPos.withY(8), + memoryGraph + row.gridPos.withX(0) + row.gridPos.withY(9) + row.gridPos.withH(7) + row.gridPos.withW(18), + memoryGauge + row.gridPos.withX(18) + row.gridPos.withY(9) + row.gridPos.withH(7) + row.gridPos.withW(6), + ], + + local diskRow = + row.new('Disk') + + row.withPanels([ + diskIO, + diskSpaceUsage, + ]), + + local networkRow = + row.new('Network') + + row.withPanels([ + networkReceived, + networkTransmitted, + ]), + + local panels = + grafana.util.grid.makeGrid([ + cpuRow, + ], panelWidth=12, panelHeight=7) + + memoryRow + + grafana.util.grid.makeGrid([ + diskRow, + networkRow, + ], panelWidth=12, panelHeight=7, startY=18), + + local variables = + [ + prometheusDatasourceVariable, + clusterVariable, + instanceVariable, + ], + + dashboard: if platform == 'Linux' then + dashboard.new( + '%sNodes' % config.dashboardNamePrefix, + ) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags(config.dashboardTags) + + dashboard.withTimezone('utc') + + dashboard.withRefresh('30s') + + dashboard.withUid(std.md5(uid)) + + dashboard.graphTooltip.withSharedCrosshair() + + dashboard.withVariables(variables) + + dashboard.withPanels(panels) + else if platform == 'Darwin' then + dashboard.new( + '%sMacOS' % config.dashboardNamePrefix, + ) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags(config.dashboardTags) + + dashboard.withTimezone('utc') + + dashboard.withRefresh('30s') + + dashboard.withUid(std.md5(uid)) + + dashboard.graphTooltip.withSharedCrosshair() + + dashboard.withVariables(variables) + + dashboard.withPanels(panels) + else if platform == 'AIX' then + dashboard.new( + '%sAIX' % config.dashboardNamePrefix, + ) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags(config.dashboardTags) + + dashboard.withTimezone('utc') + + dashboard.withRefresh('30s') + + dashboard.withUid(std.md5(uid)) + + dashboard.graphTooltip.withSharedCrosshair() + + dashboard.withVariables(variables) + + dashboard.withPanels(panels), + + }, +} diff --git a/docs/node-mixin/rules/rules.libsonnet b/docs/node-mixin/rules/rules.libsonnet index 6b396e3bd3..9c8eb90dd1 100644 --- a/docs/node-mixin/rules/rules.libsonnet +++ b/docs/node-mixin/rules/rules.libsonnet @@ -8,19 +8,17 @@ // This rule gives the number of CPUs per node. record: 'instance:node_num_cpu:sum', expr: ||| - count without (cpu) ( - count without (mode) ( - node_cpu_seconds_total{%(nodeExporterSelector)s} - ) + count without (cpu, mode) ( + node_cpu_seconds_total{%(nodeExporterSelector)s,mode="idle"} ) ||| % $._config, }, { - // CPU utilisation is % CPU is not idle. - record: 'instance:node_cpu_utilisation:rate1m', + // CPU utilisation is % CPU without {idle,iowait,steal}. + record: 'instance:node_cpu_utilisation:rate%(rateInterval)s' % $._config, expr: ||| - 1 - avg without (cpu, mode) ( - rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle"}[1m]) + 1 - avg without (cpu) ( + sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal"}[%(rateInterval)s])) ) ||| % $._config, }, @@ -43,62 +41,74 @@ record: 'instance:node_memory_utilisation:ratio', expr: ||| 1 - ( - node_memory_MemAvailable_bytes{%(nodeExporterSelector)s} + ( + node_memory_MemAvailable_bytes{%(nodeExporterSelector)s} + or + ( + node_memory_Buffers_bytes{%(nodeExporterSelector)s} + + + node_memory_Cached_bytes{%(nodeExporterSelector)s} + + + node_memory_MemFree_bytes{%(nodeExporterSelector)s} + + + node_memory_Slab_bytes{%(nodeExporterSelector)s} + ) + ) / node_memory_MemTotal_bytes{%(nodeExporterSelector)s} ) ||| % $._config, }, { - record: 'instance:node_vmstat_pgmajfault:rate1m', + record: 'instance:node_vmstat_pgmajfault:rate%(rateInterval)s' % $._config, expr: ||| - rate(node_vmstat_pgmajfault{%(nodeExporterSelector)s}[1m]) + rate(node_vmstat_pgmajfault{%(nodeExporterSelector)s}[%(rateInterval)s]) ||| % $._config, }, { // Disk utilisation (seconds spent, 1 second rate). - record: 'instance_device:node_disk_io_time_seconds:rate1m', + record: 'instance_device:node_disk_io_time_seconds:rate%(rateInterval)s' % $._config, expr: ||| - rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[1m]) + rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[%(rateInterval)s]) ||| % $._config, }, { // Disk saturation (weighted seconds spent, 1 second rate). - record: 'instance_device:node_disk_io_time_weighted_seconds:rate1m', + record: 'instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s' % $._config, expr: ||| - rate(node_disk_io_time_weighted_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[1m]) + rate(node_disk_io_time_weighted_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[%(rateInterval)s]) ||| % $._config, }, { - record: 'instance:node_network_receive_bytes_excluding_lo:rate1m', + record: 'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s' % $._config, expr: ||| sum without (device) ( - rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, device!="lo"}[1m]) + rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) ) ||| % $._config, }, { - record: 'instance:node_network_transmit_bytes_excluding_lo:rate1m', + record: 'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s' % $._config, expr: ||| sum without (device) ( - rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, device!="lo"}[1m]) + rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) ) ||| % $._config, }, // TODO: Find out if those drops ever happen on modern switched networks. { - record: 'instance:node_network_receive_drop_excluding_lo:rate1m', + record: 'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s' % $._config, expr: ||| sum without (device) ( - rate(node_network_receive_drop_total{%(nodeExporterSelector)s, device!="lo"}[1m]) + rate(node_network_receive_drop_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) ) ||| % $._config, }, { - record: 'instance:node_network_transmit_drop_excluding_lo:rate1m', + record: 'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s' % $._config, expr: ||| sum without (device) ( - rate(node_network_transmit_drop_total{%(nodeExporterSelector)s, device!="lo"}[1m]) + rate(node_network_transmit_drop_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) ) ||| % $._config, }, diff --git a/end-to-end-test.sh b/end-to-end-test.sh index 9214584e0c..c343579508 100755 --- a/end-to-end-test.sh +++ b/end-to-end-test.sh @@ -2,24 +2,62 @@ set -euf -o pipefail +# Allow setting GOHOSTOS for debugging purposes. +GOHOSTOS=${GOHOSTOS:-$(go env GOHOSTOS 2>/dev/null || uname -s | tr '[:upper:]' '[:lower:]')} + +# Allow setting arch for debugging purposes. +arch=${arch:-$(uname -m)} + +maybe_flag_search_scope() { + local collector=$1 + os_aux_os="" + if [[ $GOHOSTOS =~ ^(freebsd|openbsd|netbsd|solaris|dragonfly)$ ]]; then + os_aux_os=" ${collector}_bsd.go" + fi + echo "${collector}_common.go ${collector}.go ${collector}_${GOHOSTOS}.go ${collector}_${GOHOSTOS}_${arch}.go${os_aux_os}" +} + +supported_collectors() { + local collectors=$1 + local supported="" + for collector in ${collectors}; do + for filename in $(maybe_flag_search_scope "${collector}"); do + file="collector/${filename}" + if ./tools/tools match ${file} > /dev/null 2>&1; then + if grep -h -E -o -- "registerCollector\(" ${file} > /dev/null 2>&1; then + supported="${supported} ${collector}" + fi + break + fi + done + done + echo "${supported}" | tr ' ' '\n' | sort | uniq +} + enabled_collectors=$(cat << COLLECTORS arp bcache + bonding btrfs buddyinfo + cgroups conntrack cpu cpufreq + cpu_vulnerabilities diskstats + dmi drbd edac entropy + fibrechannel filefd hwmon infiniband interrupts ipvs ksmd + lnstat loadavg mdadm meminfo @@ -29,49 +67,74 @@ enabled_collectors=$(cat << COLLECTORS netstat nfs nfsd + pcidevice pressure + processes qdisc rapl schedstat + slabinfo sockstat + softirqs stat - thermal_zone + sysctl textfile - bonding - udp_queues + thermal_zone + udp_queues vmstat + watchdog wifi + xfrm xfs zfs - processes + zoneinfo COLLECTORS ) +supported_enabled_collectors=$(supported_collectors "${enabled_collectors}") + disabled_collectors=$(cat << COLLECTORS + selinux filesystem - time timex uname COLLECTORS ) +supported_disabled_collectors=$(supported_collectors "${disabled_collectors}") + cd "$(dirname $0)" port="$((10000 + (RANDOM % 10000)))" tmpdir=$(mktemp -d /tmp/node_exporter_e2e_test.XXXXXX) unix_socket="${tmpdir}/node_exporter.socket" -skip_re="^(go_|node_exporter_build_info|node_scrape_collector_duration_seconds|process_|node_textfile_mtime_seconds)" +skip_re="^(go_|node_exporter_build_info|node_scrape_collector_duration_seconds|process_|node_textfile_mtime_seconds|node_time_(zone|seconds)|node_network_(receive|transmit)_(bytes|packets)_total)" -arch="$(uname -m)" +case "${arch}" in + aarch64|ppc64le) fixture_metrics='collector/fixtures/e2e-64k-page-output.txt' ;; + *) fixture_metrics='collector/fixtures/e2e-output.txt' ;; +esac +# Only test CPU info collection on x86_64. case "${arch}" in - aarch64|ppc64le) fixture='collector/fixtures/e2e-64k-page-output.txt' ;; - *) fixture='collector/fixtures/e2e-output.txt' ;; + x86_64) + cpu_info_collector='--collector.cpu.info' + cpu_info_bugs='^(cpu_meltdown|spectre_.*|mds)$' + cpu_info_flags='^(aes|avx.?|constant_tsc)$' + ;; + *) + cpu_info_collector='--no-collector.cpu.info' + cpu_info_bugs='' + cpu_info_flags='' + ;; esac -keep=0; update=0; verbose=0; socket=0; -while getopts 'hkuvs' opt +extra_flags=""; keep=0; update=0; verbose=0; socket=0 +while getopts 'e:hkuvs' opt do case "$opt" in + e) + extra_flags="${OPTARG}" + ;; k) keep=1 ;; @@ -86,9 +149,9 @@ do set -x ;; *) - echo "Usage: $0 [-k] [-u] [-v]" + echo "Usage: $0 [-k] [-u] [-v] [-s]" echo " -k: keep temporary files and leave node_exporter running" - echo " -u: update fixture" + echo " -u: update fixture_metrics" echo " -v: verbose output" echo " -s: use unix socket" exit 1 @@ -102,6 +165,108 @@ then exit 1 fi +collector_flags=$(cat << FLAGS + ${extra_flags} + ${cpu_info_collector} + --collector.arp.device-exclude=nope + --collector.bcache.priorityStats + --collector.cpu.info.bugs-include=${cpu_info_bugs} + --collector.cpu.info.flags-include=${cpu_info_flags} + --collector.hwmon.chip-include=(applesmc|coretemp|hwmon4|nct6779) + --collector.netclass.ignore-invalid-speed + --collector.netclass.ignored-devices=(dmz|int) + --collector.netdev.device-include=lo + --collector.qdisc.device-include=(wlan0|eth0) + --collector.qdisc.fixtures=collector/fixtures/qdisc/ + --collector.stat.softirq + --collector.sysctl.include-info=kernel.seccomp.actions_avail + --collector.sysctl.include=fs.file-nr + --collector.sysctl.include=fs.file-nr:total,current,max + --collector.sysctl.include=kernel.threads-max + --collector.textfile.directory=collector/fixtures/textfile/two_metric_files/ + --collector.wifi.fixtures=collector/fixtures/wifi + --no-collector.arp.netlink +FLAGS +) + +# Handle supported --[no-]collector. flags. These are not hardcoded. +_filtered_collector_flags="" +for flag in ${collector_flags}; do + collector=$(echo "${flag}" | cut -d"." -f2) + # If the flag is associated with an enabled-by-default collector, include it. + enabled_by_default=0 + for filename in $(maybe_flag_search_scope "${collector}") ; do + file="collector/${filename}" + if grep -h -E -o -- "registerCollector\(.*, defaultEnabled" ${file} > /dev/null 2>&1; then + _filtered_collector_flags="${_filtered_collector_flags} ${flag}" + enabled_by_default=1 + break + fi + done + if [ ${enabled_by_default} -eq 1 ]; then + continue + fi + # If the flag is associated with an enabled-list collector, include it. + if echo "${supported_enabled_collectors} ${supported_disabled_collectors}" | grep -q -w "${collector}"; then + _filtered_collector_flags="${_filtered_collector_flags} ${flag}" + fi +done + +# Handle supported --[no-]collector.. flags. These are hardcoded and matched by the expression below. +filtered_collector_flags="" +# Check flags of all supported collectors further down their sub-collectors (beyond the 2nd "."). +for flag in ${_filtered_collector_flags}; do + # Iterate through all possible files where the flag may be defined. + flag_collector="$(echo "${flag}" | cut -d"." -f2)" + for filename in $(maybe_flag_search_scope "${flag_collector}") ; do + file="collector/${filename}" + # Move to next iteration if the current file is not included under the build context. + if ! ./tools/tools match "$file" > /dev/null 2>&1; then + continue + fi + # Flag has the format: --[no-]collector... + if [ -n "$(echo ${flag} | cut -d"." -f3)" ]; then + # Check if the flag is used in the file. + trimmed_flag=$(echo "${flag}" | tr -d "\"' " | cut -d"=" -f1 | cut -c 3-) + if [[ $trimmed_flag =~ ^no- ]]; then + trimmed_flag=$(echo $trimmed_flag | cut -c 4-) + fi + if grep -h -E -o -- "kingpin.Flag\(\"${trimmed_flag}" ${file} > /dev/null 2>&1; then + filtered_collector_flags="${filtered_collector_flags} ${flag}" + else + continue + fi + # Flag has the format: --[no-]collector.. + else + # Flag is supported by the host. + filtered_collector_flags="${filtered_collector_flags} ${flag}" + fi + done +done + +# Check for ignored flags. +ignored_flags="" +for flag in ${collector_flags}; do + flag=$(echo "${flag}" | tr -d " ") + if ! echo "${filtered_collector_flags}" | grep -q -F -- "${flag}" > /dev/null 2>&1; then + ignored_flags="${ignored_flags} ${flag}" + fi +done + +cat << EOF > "${tmpdir}/config.log" +ENABLED COLLECTORS======= +$(echo "${supported_enabled_collectors:1}" | tr ' ' '\n' | sort) +========================= + +DISABLED COLLECTORS====== +$(echo "${supported_disabled_collectors:1}" | tr ' ' '\n' | sort) +========================= + +IGNORED FLAGS============ +$(echo "${ignored_flags:1}"| tr ' ' '\n' | sort | uniq) +========================= +EOF + if [ ${socket} -ne 0 ]; then touch "${unix_socket}" connection_params="--web.socket-path=${unix_socket}" @@ -109,26 +274,34 @@ else connection_params="--web.listen-address=127.0.0.1:${port}" fi - ./node_exporter \ + --path.rootfs="collector/fixtures" \ --path.procfs="collector/fixtures/proc" \ --path.sysfs="collector/fixtures/sys" \ - $(for c in ${enabled_collectors}; do echo --collector.${c} ; done) \ - $(for c in ${disabled_collectors}; do echo --no-collector.${c} ; done) \ - --collector.textfile.directory="collector/fixtures/textfile/two_metric_files/" \ - --collector.wifi.fixtures="collector/fixtures/wifi" \ - --collector.qdisc.fixtures="collector/fixtures/qdisc/" \ - --collector.netclass.ignored-devices="(bond0|dmz|int)" \ - --collector.cpu.info \ + --path.udev.data="collector/fixtures/udev/data" \ + $(for c in ${supported_enabled_collectors}; do echo --collector.${c} ; done) \ + $(for c in ${supported_disabled_collectors}; do echo --no-collector.${c} ; done) \ + ${filtered_collector_flags} \ ${connection_params} \ --log.level="debug" > "${tmpdir}/node_exporter.log" 2>&1 & echo $! > "${tmpdir}/node_exporter.pid" +generated_metrics="${tmpdir}/e2e-output.txt" +for os in freebsd openbsd netbsd solaris dragonfly darwin; do + if [ "${GOHOSTOS}" = "${os}" ]; then + generated_metrics="${tmpdir}/e2e-output-${GOHOSTOS}.txt" + fixture_metrics="${fixture_metrics::-4}-${GOHOSTOS}.txt" + fi +done + finish() { if [ $? -ne 0 -o ${verbose} -ne 0 ] then cat << EOF >&2 +CONFIG ================== +$(cat "${tmpdir}/config.log") +========================= LOG ===================== $(cat "${tmpdir}/node_exporter.log") ========================= @@ -137,15 +310,25 @@ EOF if [ ${update} -ne 0 ] then - cp "${tmpdir}/e2e-output.txt" "${fixture}" + cp "${generated_metrics}" "${fixture_metrics}" fi if [ ${keep} -eq 0 ] then - kill "$(cat ${tmpdir}/node_exporter.pid)" - # This silences the "Killed" message - set +e - wait "$(cat ${tmpdir}/node_exporter.pid)" > /dev/null 2>&1 + pid=$(cat "${tmpdir}/node_exporter.pid") + # Disown to prevent shell from printing "Terminated" message + disown "$pid" 2>/dev/null || true + kill "$pid" 2>/dev/null || true + # Wait for the process to exit gracefully to allow socket cleanup + for i in {1..100}; do + if ! kill -0 "$pid" 2>/dev/null; then + break + fi + sleep 0.1 + done + # Force kill if it's still running + kill -9 "$pid" 2>/dev/null || true + rc=0 if [ ${socket} -ne 0 ]; then if ls -l "${unix_socket}" &> /dev/null; then @@ -154,9 +337,9 @@ EOF fi fi rm -rf "${tmpdir}" - # We should exit with non-zero code, - # if node exporter didn't remove the socket file. - exit $rc + if [ $rc -ne 0 ]; then + exit $rc + fi fi } @@ -176,14 +359,98 @@ get() { } sleep 1 + ( if [ ${socket} -ne 0 ]; then - curl -s -X GET --unix-socket "${unix_socket}" ./metrics + curl -s -X GET --unix-socket "${unix_socket}" http://localhost/metrics else get "127.0.0.1:${port}/metrics" fi -) | grep -E -v "${skip_re}" > "${tmpdir}/e2e-output.txt" +) | grep --text -E -v "${skip_re}" > "${generated_metrics}" + +# The following ignore-list is only applicable to the VMs used to run E2E tests on platforms for which containerized environments are not available. +# However, owing to this, there are some non-deterministic metrics that end up generating samples, unlike their containerized counterparts, for e.g., node_network_receive_bytes_total. +non_deterministic_metrics=$(cat << METRICS + node_boot_time_seconds + node_cpu_frequency_hertz + node_cpu_frequency_max_hertz + node_cpu_seconds_total + node_disk_io_time_seconds_total + node_disk_read_bytes_total + node_disk_read_sectors_total + node_disk_read_time_seconds_total + node_disk_reads_completed_total + node_disk_write_time_seconds_total + node_disk_writes_completed_total + node_disk_written_bytes_total + node_disk_written_sectors_total + node_exec_context_switches_total + node_exec_device_interrupts_total + node_exec_forks_total + node_exec_software_interrupts_total + node_exec_system_calls_total + node_exec_traps_total + node_interrupts_total + node_load1 + node_load15 + node_load5 + node_memory_active_bytes + node_memory_buffer_bytes + node_memory_cache_bytes + node_memory_compressed_bytes + node_memory_free_bytes + node_memory_inactive_bytes + node_memory_internal_bytes + node_memory_laundry_bytes + node_memory_purgeable_bytes + node_memory_size_bytes + node_memory_swapped_in_bytes_total + node_memory_swapped_out_bytes_total + node_memory_wired_bytes + node_netstat_tcp_receive_packets_total + node_netstat_tcp_transmit_packets_total + node_network_receive_bytes_total + node_network_receive_multicast_total + node_network_transmit_multicast_total + node_zfs_abdstats_linear_count_total + node_zfs_abdstats_linear_data_bytes + node_zfs_abdstats_scatter_chunk_waste_bytes + node_zfs_abdstats_scatter_count_total + node_zfs_abdstats_scatter_data_bytes + node_zfs_abdstats_struct_bytes + node_zfs_arcstats_anon_bytes + node_zfs_arcstats_c_bytes + node_zfs_arcstats_c_max_bytes + node_zfs_arcstats_data_bytes + node_zfs_arcstats_demand_data_hits_total + node_zfs_arcstats_demand_data_misses_total + node_zfs_arcstats_demand_metadata_hits_total + node_zfs_arcstats_demand_metadata_misses_total + node_zfs_arcstats_hdr_bytes + node_zfs_arcstats_hits_total + node_zfs_arcstats_meta_bytes + node_zfs_arcstats_mfu_bytes + node_zfs_arcstats_mfu_ghost_hits_total + node_zfs_arcstats_misses_total + node_zfs_arcstats_mru_bytes + node_zfs_arcstats_mru_ghost_bytes + node_zfs_arcstats_other_bytes + node_zfs_arcstats_pd_bytes + node_zfs_arcstats_size_bytes + node_zfs_zfetchstats_hits_total + node_zfs_zfetchstats_misses_total +METRICS +) + +# Remove non-deterministic metrics from the generated metrics file (as we run their workflows in VMs). +for os in freebsd openbsd netbsd solaris dragonfly darwin; do + if [ "${GOHOSTOS}" = "${os}" ]; then + for metric in ${non_deterministic_metrics}; do + sed -i "/${metric}/d" "${generated_metrics}" + done + fi +done diff -u \ - "${fixture}" \ - "${tmpdir}/e2e-output.txt" + "${fixture_metrics}" \ + "${generated_metrics}" diff --git a/example-rules.yml b/example-rules.yml index 5ca5c4cb26..d9c125dab3 100644 --- a/example-rules.yml +++ b/example-rules.yml @@ -1,18 +1,18 @@ groups: -- name: example-node-exporter-rules - rules: - # The count of CPUs per node, useful for getting CPU time as a percent of total. - - record: instance:node_cpus:count - expr: count(node_cpu_seconds_total{mode="idle"}) without (cpu,mode) + - name: example-node-exporter-rules + rules: + # The count of CPUs per node, useful for getting CPU time as a percent of total. + - record: instance:node_cpus:count + expr: count(node_cpu_seconds_total{mode="idle"}) without (cpu,mode) - # CPU in use by CPU. - - record: instance_cpu:node_cpu_seconds_not_idle:rate5m - expr: sum(rate(node_cpu_seconds_total{mode!="idle"}[5m])) without (mode) + # CPU in use by CPU. + - record: instance_cpu:node_cpu_seconds_not_idle:rate5m + expr: sum(rate(node_cpu_seconds_total{mode!="idle"}[5m])) without (mode) - # CPU in use by mode. - - record: instance_mode:node_cpu_seconds:rate5m - expr: sum(rate(node_cpu_seconds_total[5m])) without (cpu) + # CPU in use by mode. + - record: instance_mode:node_cpu_seconds:rate5m + expr: sum(rate(node_cpu_seconds_total[5m])) without (cpu) - # CPU in use ratio. - - record: instance:node_cpu_utilization:ratio - expr: sum(instance_mode:node_cpu_seconds:rate5m{mode!="idle"}) without (mode) / instance:node_cpus:count + # CPU in use ratio. + - record: instance:node_cpu_utilization:ratio + expr: sum(instance_mode:node_cpu_seconds:rate5m{mode!="idle"}) without (mode) / instance:node_cpus:count diff --git a/examples/systemd/README.md b/examples/systemd/README.md index 67039bed13..3b3577fa0f 100644 --- a/examples/systemd/README.md +++ b/examples/systemd/README.md @@ -2,7 +2,7 @@ If you are using distribution packages or the copr repository, you don't need to deal with these files! -The unit file in this directory is to be put into `/etc/systemd/system`. +The unit files (`*.service` and `*.socket`) in this directory are to be put into `/etc/systemd/system`. It needs a user named `node_exporter`, whose shell should be `/sbin/nologin` and should not have any special privileges. It needs a sysconfig file in `/etc/sysconfig/node_exporter`. It needs a directory named `/var/lib/node_exporter/textfile_collector`, whose owner should be `node_exporter`:`node_exporter`. diff --git a/examples/systemd/node_exporter.service b/examples/systemd/node_exporter.service index 64f1405b02..1ef24442d2 100644 --- a/examples/systemd/node_exporter.service +++ b/examples/systemd/node_exporter.service @@ -1,10 +1,13 @@ [Unit] Description=Node Exporter +Requires=node_exporter.socket [Service] User=node_exporter -EnvironmentFile=/etc/sysconfig/node_exporter -ExecStart=/usr/sbin/node_exporter $OPTIONS +# Fallback when environment file does not exist +Environment=OPTIONS= +EnvironmentFile=-/etc/sysconfig/node_exporter +ExecStart=/usr/sbin/node_exporter --web.systemd-socket $OPTIONS [Install] WantedBy=multi-user.target diff --git a/examples/systemd/node_exporter.socket b/examples/systemd/node_exporter.socket new file mode 100644 index 0000000000..c3e5670bfb --- /dev/null +++ b/examples/systemd/node_exporter.socket @@ -0,0 +1,8 @@ +[Unit] +Description=Node Exporter + +[Socket] +ListenStream=9100 + +[Install] +WantedBy=sockets.target diff --git a/go.mod b/go.mod index 9a92e3b923..3aef9ce197 100644 --- a/go.mod +++ b/go.mod @@ -1,38 +1,62 @@ module github.com/prometheus/node_exporter +go 1.24.0 + require ( - github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect - github.com/beevik/ntp v0.3.0 - github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043 - github.com/go-kit/kit v0.10.0 - github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 - github.com/golang/protobuf v1.4.1 // indirect - github.com/hodgesds/perf-utils v0.0.8 - github.com/jpillora/backoff v1.0.0 // indirect - github.com/lufia/iostat v1.1.0 + github.com/alecthomas/kingpin/v2 v2.4.0 + github.com/beevik/ntp v1.5.0 + github.com/coreos/go-systemd/v22 v22.6.0 + github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6 + github.com/ema/qdisc v1.0.0 + github.com/godbus/dbus/v5 v5.1.0 + github.com/hashicorp/go-envparse v0.1.0 + github.com/hodgesds/perf-utils v0.7.0 + github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973 + github.com/jsimonetti/rtnetlink/v2 v2.1.0 + github.com/lufia/iostat v1.2.1 github.com/mattn/go-xmlrpc v0.0.3 - github.com/mdlayher/genetlink v1.0.0 // indirect - github.com/mdlayher/netlink v1.1.0 // indirect - github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.6.0 - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.10.0 - github.com/prometheus/procfs v0.1.3 - github.com/siebenmann/go-kstat v0.0.0-20200303194639-4e8294f9e9d5 - github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a - go.uber.org/multierr v1.5.0 // indirect - golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20200513185701-a91f0712d120 // indirect - golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect - golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 - golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97 // indirect - gopkg.in/alecthomas/kingpin.v2 v2.2.6 - gopkg.in/yaml.v2 v2.3.0 - honnef.co/go/tools v0.0.1-2020.1.3 // indirect + github.com/mdlayher/ethtool v0.5.0 + github.com/mdlayher/netlink v1.8.0 + github.com/mdlayher/wifi v0.7.0 + github.com/opencontainers/selinux v1.13.0 + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 + github.com/prometheus-community/go-runit v0.1.0 + github.com/prometheus/client_golang v1.23.2 + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/common v0.67.2 + github.com/prometheus/exporter-toolkit v0.15.0 + github.com/prometheus/procfs v0.19.2 + github.com/safchain/ethtool v0.6.2 + golang.org/x/exp v0.0.0-20250911091902-df9299821621 + golang.org/x/sys v0.38.0 + howett.net/plist v1.0.1 ) -go 1.14 +require ( + cyphar.com/go-pathrs v0.2.1 // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cyphar/filepath-securejoin v0.6.0 // indirect + github.com/dennwc/ioctl v1.0.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/mdlayher/genetlink v1.3.2 // indirect + github.com/mdlayher/socket v0.5.1 // indirect + github.com/mdlayher/vsock v1.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.13.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect +) diff --git a/go.sum b/go.sum index 5799b025c8..1c0d0dad09 100644 --- a/go.sum +++ b/go.sum @@ -1,523 +1,144 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= -github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +cyphar.com/go-pathrs v0.2.1 h1:9nx1vOgwVvX1mNBWDu93+vaceedpbsDqo+XuBGL40b8= +cyphar.com/go-pathrs v0.2.1/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/beevik/ntp v1.5.0 h1:y+uj/JjNwlY2JahivxYvtmv4ehfi3h74fAuABB9ZSM4= +github.com/beevik/ntp v1.5.0/go.mod h1:mJEhBrwT76w9D+IfOEGvuzyuudiW9E52U2BaTrMOYow= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cilium/ebpf v0.19.0 h1:Ro/rE64RmFBeA9FGjcTc+KmCeY6jXmryu6FfnzPRIao= +github.com/cilium/ebpf v0.19.0/go.mod h1:fLCgMo3l8tZmAdM3B2XqdFzXBpwkcSTroaVqN08OWVY= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= +github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is= +github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043 h1:I3hLsM87FSASssIrIOGwJCio31dvLkvpYDKn2+r31ec= -github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 h1:s+PDl6lozQ+dEUtUtQnO7+A2iPG3sK1pI4liU+jxn90= -github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hodgesds/perf-utils v0.0.8 h1:6BT6cddpouM0G7eHhLFS+XcqtPvhrzWbPreyIvgFEcg= -github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6 h1:fV+JlCY0cCJh3l0jfE7iB3ZmrdfJSgfcjdrCQhPokGg= +github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA= +github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg= +github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0= +github.com/ema/qdisc v1.0.0 h1:EHLG08FVRbWLg8uRICa3xzC9Zm0m7HyMHfXobWFnXYg= +github.com/ema/qdisc v1.0.0/go.mod h1:FhIc0fLYi7f+lK5maMsesDqwYojIOh3VfRs8EVd5YJQ= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/hashicorp/go-envparse v0.1.0 h1:bE++6bhIsNCPLvgDZkYqo3nA+/PFI51pkrHdmPSDFPY= +github.com/hashicorp/go-envparse v0.1.0/go.mod h1:OHheN1GoygLlAkTlXLXvAdnXdZxy8JUweQ1rAXx1xnc= +github.com/hodgesds/perf-utils v0.7.0 h1:7KlHGMuig4FRH5fNw68PV6xLmgTe7jKs9hgAcEAbioU= +github.com/hodgesds/perf-utils v0.7.0/go.mod h1:LAklqfDadNKpkxoAJNHpD5tkY0rkZEVdnCEWN5k4QJY= +github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973 h1:hk4LPqXIY/c9XzRbe7dA6qQxaT6Axcbny0L/G5a4owQ= +github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:PoK3ejP3LJkGTzKqRlpvCIFas3ncU02v8zzWDW+g0FY= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552 h1:Ve/e6edHdAHn+8/24Xco7IhQCv3u5Dab2qZNvR9e5/U= -github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= -github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lufia/iostat v1.1.0 h1:Z1wa4Hhxwi8uSKfgRsFc5RLtt3SuFPIOgkiPGkUtHDY= -github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/jsimonetti/rtnetlink/v2 v2.1.0 h1:3sSPD0k+Qvia3wbv6kZXCN0Dlz6Swv7RHjvvonuOcKE= +github.com/jsimonetti/rtnetlink/v2 v2.1.0/go.mod h1:hPPUTE+ekH3HD+zCEGAGLxzFY9HrJCyD1aN7JJ3SHIY= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lufia/iostat v1.2.1 h1:tnCdZBIglgxD47RyD55kfWQcJMGzO+1QBziSQfesf2k= +github.com/lufia/iostat v1.2.1/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/mattn/go-xmlrpc v0.0.3 h1:Y6WEMLEsqs3RviBrAa1/7qmbGB7DVD3brZIbqMbQdGY= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= -github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= -github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= -github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09 h1:U2vuol6i4UF6MSpZJclH4HHiLRMoq1NAzxpIpCUJK/Y= -github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.0.0 h1:vySPY5Oxnn/8lxAPn2cK6kAzcZzYJl3KriSLO46OT18= -github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= -github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee h1:hZDujBrW3ye2xxdKNFYT59D4yCH5Q0zLuNBNtysKtok= -github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mdlayher/ethtool v0.5.0 h1:7MpuhvUE574uVQDfkXotePLdfSNetlx3GDikFcdlVQA= +github.com/mdlayher/ethtool v0.5.0/go.mod h1:ROV9hwnETqDdpLv8E8WkCa8FymlkhFEeiB9cg3qzNkk= +github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= +github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= +github.com/mdlayher/netlink v1.8.0 h1:e7XNIYJKD7hUct3Px04RuIGJbBxy1/c4nX7D5YyvvlM= +github.com/mdlayher/netlink v1.8.0/go.mod h1:UhgKXUlDQhzb09DrCl2GuRNEglHmhYoWAHid9HK3594= +github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= +github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= +github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= +github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= +github.com/mdlayher/wifi v0.7.0 h1:0BvMO+gLu06pvOpINs+wVY9KgwBOyEm5TUpyLmy6yF8= +github.com/mdlayher/wifi v0.7.0/go.mod h1:Px0mNl8jXl5uiC2FWgoD6AAGWVhq19sMmppboqR59Gg= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/opencontainers/selinux v1.13.0 h1:Zza88GWezyT7RLql12URvoxsbLfjFx988+LGaWfbL84= +github.com/opencontainers/selinux v1.13.0/go.mod h1:XxWTed+A/s5NNq4GmYScVy+9jzXhGBVEOAyucdRUY8s= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/siebenmann/go-kstat v0.0.0-20200303194639-4e8294f9e9d5 h1:rRF7gJ7t0E1bfqNLwMqgb59eb273kgi+GgLE/yEiDzs= -github.com/siebenmann/go-kstat v0.0.0-20200303194639-4e8294f9e9d5/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a h1:os5OBNhwOwybXZMNLqT96XqtjdTtwRFw2w08uluvNeI= -github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus-community/go-runit v0.1.0 h1:uTWEj/Fn2RoLdfg/etSqwzgYNOYPrARx1BHUN052tGA= +github.com/prometheus-community/go-runit v0.1.0/go.mod h1:AvJ9Jo3gAFu2lbM4+qfjdpq30FfiLDJZKbQ015u08IQ= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.2 h1:PcBAckGFTIHt2+L3I33uNRTlKTplNzFctXcWhPyAEN8= +github.com/prometheus/common v0.67.2/go.mod h1:63W3KZb1JOKgcjlIr64WW/LvFGAqKPj0atm+knVGEko= +github.com/prometheus/exporter-toolkit v0.15.0 h1:Pcle5sSViwR1x0gdPd0wtYrPQENBieQAM7TmT0qtb2U= +github.com/prometheus/exporter-toolkit v0.15.0/go.mod h1:OyRWd2iTo6Xge9Kedvv0IhCrJSBu36JCfJ2yVniRIYk= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/safchain/ethtool v0.6.2 h1:O3ZPFAKEUEfbtE6J/feEe2Ft7dIJ2Sy8t4SdMRiIMHY= +github.com/safchain/ethtool v0.6.2/go.mod h1:VS7cn+bP3Px3rIq55xImBiZGHVLNyBh5dqG6dDQy8+I= +github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973 h1:GfSdC6wKfTGcgCS7BtzF5694Amne1pGCSTY252WhlEY= +github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191007182048-72f939374954 h1:JGZucVF/L/TotR719NbujzadOZ2AgnYlqphQGHDCKaU= -golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120 h1:EZ3cVSzKOlJxAd8e8YAJ7no8nNypTxexh/YE/xW3ZEY= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4= -golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97 h1:DAuln/hGp+aJiHpID1Y1hYzMEPP5WLwtZHPb50mN0OE= -golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM= +howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= diff --git a/https/README.md b/https/README.md deleted file mode 100644 index e6c9896f14..0000000000 --- a/https/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# HTTPS Package for Prometheus - -The `https` directory contains a Go package and a sample configuration file for -running `node_exporter` with HTTPS instead of HTTP. We currently support TLS 1.3 -and TLS 1.2. - -To run a server with TLS, use the flag `--web.config`. - -e.g. `./node_exporter --web.config="web-config.yml"` -If the config is kept within the https directory. - -The config file should be written in YAML format, and is reloaded on each connection to check for new certificates and/or authentication policy. - -## Sample Config - -``` -tls_server_config: - # Certificate and key files for server to use to authenticate to client. - cert_file: - key_file: - - # Server policy for client authentication. Maps to ClientAuth Policies. - # For more detail on clientAuth options: [ClientAuthType](https://golang.org/pkg/crypto/tls/#ClientAuthType) - [ client_auth_type: | default = "NoClientCert" ] - - # CA certificate for client certificate authentication to the server. - [ client_ca_file: ] - - # Minimum TLS version that is acceptable. - [ min_version: | default = "TLS12" ] - - # Maximum TLS version that is acceptable. - [ max_version: | default = "TLS13" ] - - # List of supported cipher suites for TLS versions up to TLS 1.2. If empty, - # Go default cipher suites are used. Available cipher suites are documented - # in the go documentation: - # https://golang.org/pkg/crypto/tls/#pkg-constants - [ cipher_suites: - [ - ] ] - - # prefer_server_cipher_suites controls whether the server selects the - # client's most preferred ciphersuite, or the server's most preferred - # ciphersuite. If true then the server's preference, as expressed in - # the order of elements in cipher_suites, is used. - [ prefer_server_cipher_suites: | default = true ] - - # Elliptic curves that will be used in an ECDHE handshake, in preference - # order. Available curves are documented in the go documentation: - # https://golang.org/pkg/crypto/tls/#CurveID - [ curve_preferences: - [ - ] ] - -http_server_config: - # Enable HTTP/2 support. Note that HTTP/2 is only supported with TLS. - # This can not be changed on the fly. - [ http2: | default = true ] - -# Usernames and hashed passwords that have full access to the web -# server via basic authentication. If empty, no basic authentication is -# required. Passwords are hashed with bcrypt. -basic_auth_users: - [ : ... ] -``` - -## About bcrypt - -There are several tools out there to generate bcrypt passwords, e.g. -[htpasswd](https://httpd.apache.org/docs/2.4/programs/htpasswd.html): - -`htpasswd -nBC 10 "" | tr -d ':\n'` - -That command will prompt you for a password and output the hashed password, -which will look something like: -`$2y$10$X0h1gDsPszWURQaxFh.zoubFi6DXncSjhoQNJgRrnGs7EsimhC7zG` - -The cost (10 in the example) influences the time it takes for computing the -hash. A higher cost will en up slowing down the authentication process. -Depending on the machine, a cost of 10 will take about ~70ms where a cost of -18 can take up to a few seconds. That hash will be computed on every -password-protected request. diff --git a/https/testdata/server.crt b/https/testdata/server.crt deleted file mode 100644 index 2ead96984b..0000000000 --- a/https/testdata/server.crt +++ /dev/null @@ -1,96 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1 (0x1) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus TLS CA - Validity - Not Before: Apr 5 08:06:57 2019 GMT - Not After : Mar 26 08:06:57 2059 GMT - Subject: C=US, O=Prometheus, CN=prometheus.example.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) - Modulus: - 00:bd:6c:b6:7f:d1:2f:be:e4:41:eb:5d:ff:50:78: - 03:2b:76:03:da:01:48:20:13:90:66:c9:ce:6e:06: - e5:fa:2d:0d:c0:b0:46:28:44:10:a0:61:79:87:a2: - 98:4c:29:fa:f9:bb:0f:44:c7:90:5c:5c:55:60:cd: - 45:da:b8:e4:dd:28:72:c8:8b:a1:3e:4b:00:09:82: - b0:2c:dc:d6:17:c9:02:f4:cd:26:c7:11:28:f3:77: - b5:97:c2:76:c2:e0:07:d7:34:5b:e0:ed:1a:59:a5: - b4:b7:16:09:3d:35:bd:d9:03:07:9d:7c:3b:f0:63: - bd:5e:02:99:cf:32:e1:ac:4c:7a:3e:4c:b2:8e:98: - 68:07:4f:59:dc:0d:bf:cc:83:04:5c:d8:90:f0:73: - da:2b:08:17:c4:36:a7:d8:94:3d:b6:c0:af:29:0a: - d3:19:5f:eb:7d:cc:4d:05:56:11:0a:ee:b1:f3:d7: - c9:5a:3c:8c:57:16:91:51:14:f8:20:4e:0f:29:9e: - 04:21:e6:f1:e4:e8:44:af:d7:25:92:08:64:fc:2c: - 1c:2e:4f:71:53:91:53:1d:e5:f9:7b:52:0f:21:da: - 5c:dd:19:68:96:ca:70:6a:f1:c4:0d:07:af:f8:65: - 13:92:e9:ef:65:b3:89:86:fd:c0:74:5c:a4:6b:49: - 62:c5 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Key Usage: critical - Digital Signature, Key Encipherment - X509v3 Basic Constraints: - CA:FALSE - X509v3 Extended Key Usage: - TLS Web Server Authentication, TLS Web Client Authentication - X509v3 Subject Key Identifier: - 00:61:01:AD:25:44:8A:EF:E1:2C:EC:83:5A:3A:3B:EA:A0:BD:E1:45 - X509v3 Authority Key Identifier: - keyid:4D:02:BF:71:95:6A:AA:58:C5:9C:B8:83:67:5E:64:16:99:E1:2A:9E - - Authority Information Access: - CA Issuers - URI:http://example.com/ca/tls-ca.cer - - X509v3 CRL Distribution Points: - - Full Name: - URI:http://example.com/ca/tls-ca.crl - - X509v3 Subject Alternative Name: - IP Address:127.0.0.1, IP Address:127.0.0.0, DNS:localhost - Signature Algorithm: sha1WithRSAEncryption - 77:97:e4:ef:db:10:8e:62:50:96:4a:6e:f5:a4:f9:1f:19:3b: - c8:a4:dd:b3:f6:11:41:1a:fb:e3:f8:dd:0e:64:e5:2b:00:b9: - e6:25:9f:2e:e1:d2:9a:cd:b6:f2:41:4d:27:dd:2c:9a:af:97: - 79:e8:cf:61:fb:cf:be:25:c6:e1:19:a0:c8:90:44:a0:76:8a: - 45:d4:37:22:e5:d4:80:b4:b3:0f:a8:33:08:24:ad:21:0b:b7: - 98:46:93:90:8a:ae:77:0c:cb:b8:59:d3:3b:9b:fb:16:5a:22: - ca:c2:97:9d:78:1b:fc:23:fc:a0:42:54:40:de:88:4b:07:2b: - 19:4e:0e:79:bf:c9:9f:01:a6:46:c5:55:fa:9f:c0:0d:8a:a6: - e1:47:16:a6:0e:be:23:c9:e9:58:d6:31:71:8c:80:9c:16:64: - f0:14:08:22:a1:23:7c:98:b9:62:d1:4a:ce:e3:5c:59:fb:41: - 87:a5:3b:36:dd:3d:45:48:b0:b0:77:6f:de:58:2a:27:4d:56: - 20:54:08:20:c8:6d:79:b5:b9:e6:3a:03:24:0f:6d:67:39:20: - 78:10:2f:47:85:83:c1:4d:17:33:79:84:75:27:fa:47:67:59: - 56:cc:33:7b:a5:77:aa:59:9a:98:30:10:1a:78:43:34:8f:ed: - c2:a1:a3:ea ------BEGIN CERTIFICATE----- -MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADBpMQswCQYDVQQGEwJVUzET -MBEGA1UECgwKUHJvbWV0aGV1czEpMCcGA1UECwwgUHJvbWV0aGV1cyBDZXJ0aWZp -Y2F0ZSBBdXRob3JpdHkxGjAYBgNVBAMMEVByb21ldGhldXMgVExTIENBMCAXDTE5 -MDQwNTA4MDY1N1oYDzIwNTkwMzI2MDgwNjU3WjBDMQswCQYDVQQGEwJVUzETMBEG -A1UECgwKUHJvbWV0aGV1czEfMB0GA1UEAwwWcHJvbWV0aGV1cy5leGFtcGxlLmNv -bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL1stn/RL77kQetd/1B4 -Ayt2A9oBSCATkGbJzm4G5fotDcCwRihEEKBheYeimEwp+vm7D0THkFxcVWDNRdq4 -5N0ocsiLoT5LAAmCsCzc1hfJAvTNJscRKPN3tZfCdsLgB9c0W+DtGlmltLcWCT01 -vdkDB518O/BjvV4Cmc8y4axMej5Mso6YaAdPWdwNv8yDBFzYkPBz2isIF8Q2p9iU -PbbArykK0xlf633MTQVWEQrusfPXyVo8jFcWkVEU+CBODymeBCHm8eToRK/XJZII -ZPwsHC5PcVORUx3l+XtSDyHaXN0ZaJbKcGrxxA0Hr/hlE5Lp72WziYb9wHRcpGtJ -YsUCAwEAAaOCAREwggENMA4GA1UdDwEB/wQEAwIFoDAJBgNVHRMEAjAAMB0GA1Ud -JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUAGEBrSVEiu/hLOyD -Wjo76qC94UUwHwYDVR0jBBgwFoAUTQK/cZVqqljFnLiDZ15kFpnhKp4wPAYIKwYB -BQUHAQEEMDAuMCwGCCsGAQUFBzAChiBodHRwOi8vZXhhbXBsZS5jb20vY2EvdGxz -LWNhLmNlcjAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vZXhhbXBsZS5jb20vY2Ev -dGxzLWNhLmNybDAgBgNVHREEGTAXhwR/AAABhwR/AAAAgglsb2NhbGhvc3QwDQYJ -KoZIhvcNAQEFBQADggEBAHeX5O/bEI5iUJZKbvWk+R8ZO8ik3bP2EUEa++P43Q5k -5SsAueYlny7h0prNtvJBTSfdLJqvl3noz2H7z74lxuEZoMiQRKB2ikXUNyLl1IC0 -sw+oMwgkrSELt5hGk5CKrncMy7hZ0zub+xZaIsrCl514G/wj/KBCVEDeiEsHKxlO -Dnm/yZ8BpkbFVfqfwA2KpuFHFqYOviPJ6VjWMXGMgJwWZPAUCCKhI3yYuWLRSs7j -XFn7QYelOzbdPUVIsLB3b95YKidNViBUCCDIbXm1ueY6AyQPbWc5IHgQL0eFg8FN -FzN5hHUn+kdnWVbMM3uld6pZmpgwEBp4QzSP7cKho+o= ------END CERTIFICATE----- diff --git a/https/testdata/server.key b/https/testdata/server.key deleted file mode 100644 index e1226c0e1f..0000000000 --- a/https/testdata/server.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC9bLZ/0S++5EHr -Xf9QeAMrdgPaAUggE5Bmyc5uBuX6LQ3AsEYoRBCgYXmHophMKfr5uw9Ex5BcXFVg -zUXauOTdKHLIi6E+SwAJgrAs3NYXyQL0zSbHESjzd7WXwnbC4AfXNFvg7RpZpbS3 -Fgk9Nb3ZAwedfDvwY71eApnPMuGsTHo+TLKOmGgHT1ncDb/MgwRc2JDwc9orCBfE -NqfYlD22wK8pCtMZX+t9zE0FVhEK7rHz18laPIxXFpFRFPggTg8pngQh5vHk6ESv -1yWSCGT8LBwuT3FTkVMd5fl7Ug8h2lzdGWiWynBq8cQNB6/4ZROS6e9ls4mG/cB0 -XKRrSWLFAgMBAAECggEAezQ0V1o11dEc1vuiTjJgzWnLA4aF5OcUquZjb8jo2Blp -soR0fUgYEFiV9RRaPl+nr7ptKe0rBgfAOGALKUHNCdN/JNU8oQmjEoyADg3s6jeB -xruQlzWgDwszf2uqVwHj16Nkhx1wYBKZQeQBSmCkBHwl/daKHcahqn3CkLOleKx+ -Qlc3BzWNaGte6qpJMs0It3by1FuxRwVz5VkL8uhzj0WIOYMA84t0gTnFH9gfRO3F -licotxg/Nl5M36wWcfL8Jq++72AtaKcD1jUEwuQpogrVeqflmeHwn/TlL++Hv6Xe -Lq0jt3OCUKUV40eq9c5uEgTmyrVHMDkfFdXzutdMAQKBgQDsSMXk7P4SX6u6uTjV -In9eWw6ZyJ2aL6VB9co/NMsj49GrrFT8VX9d+JPe9P/n6tuGcFbymNep22njRksR -0ItpW1NFRR/R3g0kYe1EhkRpNm6fhY9oIuR9xhcNnPNYkqAKT3T/dxrzbwsNhomi -X8aht/eCz4ZsK/KdOGTkPozxgQKBgQDNOvrclT1Wl4bxONp9pEV5XpRSD/qigfIp -i5wxy7ihX/QY9RToIWJDnzMVLnEYe64RB2WB8/4WwNPOQcuaxXbFUFct/2NdhTnS -ToJPgPe819zW9t1FLTf1fHtsRBpGFtbhdlUDOiOtJiMXYiwlRh2uyWFhjOo8TNUE -qMwai0vLRQKBgQCDH4t6lC4W4jK5x2oLlT5bjWqX2uXjF8e8x/q5gsGspBPKEjOD -aKrq6jSdSRbui73RaGxH6pvb7iBf+LVWKIYFLKIUUdzrqS9f3lw+Z8h1HrjbG9JO -dvaX+aL3cf71S0E3F4sU7fLt3tSiZ+PfUQk424+mbyXox6a2qwIKS9AJgQKBgHCu -dHROYJo9ojKpo5Ueb6K+4jLYYSV+sYZMCBtzHlFETNKzJaJ6SeiU7Ugw8pmdtqnU -5M/gNl8pymFR0MeOqbKWdPdlZJpBfsjQoE2kouEFqFRCwKStui7IBUAheEeJXLv3 -659U+aek69l35oMkp0GDgjs8UpN/H+pp/36Hgrr9AoGAftWU405rpStHEdRVrazP -FibQesT9HOdJgmm1gNIhj+PnFs7lKER9p0Wdl79QnIqjwyhjCXL94TFerzTKLY2c -IRj5dcRHiiT0iK8wq8bzGNYCqV73oQXaUFMiutNAArXwzwuvPFPWNBQsjLzeDLeC -mcOsCcPAk8cLYtVfZo2sP3g= ------END PRIVATE KEY----- diff --git a/https/testdata/tls-ca-chain.pem b/https/testdata/tls-ca-chain.pem deleted file mode 100644 index 722264d89b..0000000000 --- a/https/testdata/tls-ca-chain.pem +++ /dev/null @@ -1,173 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 2 (0x2) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus Root CA - Validity - Not Before: Apr 5 08:00:37 2019 GMT - Not After : Mar 26 08:00:37 2059 GMT - Subject: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus TLS CA - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) - Modulus: - 00:aa:d2:34:6b:ed:f1:f4:01:08:e5:00:9f:75:c8: - ba:fc:4b:72:c6:04:93:af:f1:f6:b5:ce:01:0d:c6: - bd:d3:16:98:9d:e5:51:56:12:58:16:ee:18:6e:f0: - 68:a9:42:16:65:cf:e3:31:f5:90:79:9d:13:32:87: - 3b:1f:65:fd:84:88:a4:56:3d:26:54:69:05:27:5a: - ea:89:02:e7:31:9b:7d:7f:76:93:54:70:bc:17:92: - 06:9f:9f:90:4a:8a:cf:82:a7:7b:7c:71:c4:fa:34: - 56:00:32:1a:85:c5:f8:e4:4a:63:43:37:9d:60:84: - 4d:78:6e:87:12:c4:2b:1f:93:a5:fe:cc:5e:f1:df: - c1:97:ff:b7:3e:20:38:1d:71:15:11:ec:6c:7a:cc: - 0e:87:52:31:b1:b9:74:c3:07:1c:42:4b:1e:c1:17: - bc:e4:13:b7:b0:20:2e:c4:07:93:bd:a8:11:f9:da: - a7:d0:df:4a:48:be:9b:6d:65:c3:ae:58:56:c0:9f: - 17:c5:d8:32:b1:04:22:fb:5b:18:f6:20:10:50:ec: - 2d:10:4f:cc:48:8f:f2:75:dd:33:a4:0e:f5:55:da: - 2c:89:a1:3a:52:bb:11:11:0b:97:27:17:73:35:da: - 10:71:b3:9f:a8:42:91:e6:3a:66:00:f9:e5:11:8f: - 5b:57 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Key Usage: critical - Certificate Sign, CRL Sign - X509v3 Basic Constraints: critical - CA:TRUE, pathlen:0 - X509v3 Subject Key Identifier: - 4D:02:BF:71:95:6A:AA:58:C5:9C:B8:83:67:5E:64:16:99:E1:2A:9E - X509v3 Authority Key Identifier: - keyid:3C:1E:A8:C6:4C:05:4D:20:EC:88:DB:29:D4:7B:F9:12:5D:CE:EA:1A - - Authority Information Access: - CA Issuers - URI:https://example.com/ca/root-ca.cer - - X509v3 CRL Distribution Points: - - Full Name: - URI:https://example.com/ca/root-ca.crl - - Signature Algorithm: sha1WithRSAEncryption - 63:fc:ba:30:a5:05:d6:76:14:f1:77:38:b1:41:6f:81:d9:b4: - 02:fd:bc:e5:f6:d9:e6:73:e0:71:cf:4c:fb:13:b5:6b:bd:b9: - c6:f6:28:18:36:e1:8c:d9:93:b3:78:4a:3d:39:1b:f4:fb:69: - 75:24:ae:e1:a0:2f:94:05:bf:10:3c:3e:d2:2b:a8:f3:31:25: - 2e:ed:13:ad:60:5d:22:9a:26:15:20:86:98:73:4c:f6:4b:48: - b8:1f:67:ba:4e:c9:47:ed:85:dc:38:dc:02:0c:fb:54:d5:2e: - 6c:b4:95:18:51:d1:ae:ea:e8:fb:b4:19:50:04:bc:31:7e:51: - 9e:85:29:4d:c8:f7:26:d6:d6:8d:35:2d:9e:e2:06:16:38:e2: - 56:80:ec:f3:a3:34:e3:28:c4:e8:10:d0:8a:a6:6f:20:9a:b9: - dc:b9:90:6b:ba:8a:27:2c:29:72:28:55:e7:59:a6:a7:90:ec: - 32:e8:d0:26:4a:c1:44:dd:20:bf:dc:4d:1e:7e:cc:e5:a2:5b: - e8:df:3d:4b:01:aa:48:56:17:e9:29:d8:71:83:05:36:8c:11: - 4f:77:b8:95:20:b7:c7:21:06:c2:87:97:b4:6b:d3:f7:23:ba: - 4d:5f:15:d1:0c:4d:6e:f1:6a:9d:57:5c:02:6a:d7:31:18:ef: - 5c:fc:f8:04 ------BEGIN CERTIFICATE----- -MIIELTCCAxWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBqMQswCQYDVQQGEwJVUzET -MBEGA1UECgwKUHJvbWV0aGV1czEpMCcGA1UECwwgUHJvbWV0aGV1cyBDZXJ0aWZp -Y2F0ZSBBdXRob3JpdHkxGzAZBgNVBAMMElByb21ldGhldXMgUm9vdCBDQTAgFw0x -OTA0MDUwODAwMzdaGA8yMDU5MDMyNjA4MDAzN1owaTELMAkGA1UEBhMCVVMxEzAR -BgNVBAoMClByb21ldGhldXMxKTAnBgNVBAsMIFByb21ldGhldXMgQ2VydGlmaWNh -dGUgQXV0aG9yaXR5MRowGAYDVQQDDBFQcm9tZXRoZXVzIFRMUyBDQTCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBAKrSNGvt8fQBCOUAn3XIuvxLcsYEk6/x -9rXOAQ3GvdMWmJ3lUVYSWBbuGG7waKlCFmXP4zH1kHmdEzKHOx9l/YSIpFY9JlRp -BSda6okC5zGbfX92k1RwvBeSBp+fkEqKz4Kne3xxxPo0VgAyGoXF+ORKY0M3nWCE -TXhuhxLEKx+Tpf7MXvHfwZf/tz4gOB1xFRHsbHrMDodSMbG5dMMHHEJLHsEXvOQT -t7AgLsQHk72oEfnap9DfSki+m21lw65YVsCfF8XYMrEEIvtbGPYgEFDsLRBPzEiP -8nXdM6QO9VXaLImhOlK7ERELlycXczXaEHGzn6hCkeY6ZgD55RGPW1cCAwEAAaOB -3DCB2TAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4E -FgQUTQK/cZVqqljFnLiDZ15kFpnhKp4wHwYDVR0jBBgwFoAUPB6oxkwFTSDsiNsp -1Hv5El3O6howPgYIKwYBBQUHAQEEMjAwMC4GCCsGAQUFBzAChiJodHRwczovL2V4 -YW1wbGUuY29tL2NhL3Jvb3QtY2EuY2VyMDMGA1UdHwQsMCowKKAmoCSGImh0dHBz -Oi8vZXhhbXBsZS5jb20vY2Evcm9vdC1jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEB -AGP8ujClBdZ2FPF3OLFBb4HZtAL9vOX22eZz4HHPTPsTtWu9ucb2KBg24YzZk7N4 -Sj05G/T7aXUkruGgL5QFvxA8PtIrqPMxJS7tE61gXSKaJhUghphzTPZLSLgfZ7pO -yUfthdw43AIM+1TVLmy0lRhR0a7q6Pu0GVAEvDF+UZ6FKU3I9ybW1o01LZ7iBhY4 -4laA7POjNOMoxOgQ0IqmbyCaudy5kGu6iicsKXIoVedZpqeQ7DLo0CZKwUTdIL/c -TR5+zOWiW+jfPUsBqkhWF+kp2HGDBTaMEU93uJUgt8chBsKHl7Rr0/cjuk1fFdEM -TW7xap1XXAJq1zEY71z8+AQ= ------END CERTIFICATE----- -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1 (0x1) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus Root CA - Validity - Not Before: Apr 5 07:55:00 2019 GMT - Not After : Mar 26 07:55:00 2059 GMT - Subject: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus Root CA - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) - Modulus: - 00:bf:b9:e2:ab:5f:61:22:e1:4e:cd:ee:da:b0:26: - 2e:bb:b0:7e:1c:ce:10:be:16:29:35:0c:0c:1d:93: - 01:29:2a:f6:f9:c2:6e:5c:10:44:ca:f8:dc:ad:7a: - 06:64:0f:8a:18:ad:b2:a2:94:49:c9:ba:8c:45:94: - 7c:d9:e0:11:45:d8:16:79:a2:20:9f:8c:63:60:72: - 2a:5b:f9:66:80:ac:85:67:01:5a:eb:91:c1:d2:88: - 87:9e:4c:18:c9:f2:f0:7a:18:c0:e6:ab:2c:78:de: - 5f:b2:22:4e:94:9c:f5:cd:e6:e2:33:30:e9:20:10: - a6:a1:75:eb:59:ab:45:a9:f7:3e:54:40:ae:05:25: - be:74:c5:3a:fd:af:73:16:60:45:7c:4a:e0:0e:0d: - a1:15:7f:9a:1f:c2:a7:04:ad:ef:b3:e4:f6:00:2c: - 4e:0b:04:90:49:ee:d3:db:a6:12:c4:91:0b:32:4f: - 11:84:c7:c4:8a:ef:51:66:7a:b0:20:2f:cb:95:8d: - 96:57:60:66:5e:f9:4f:5a:94:9c:71:ad:eb:ca:70: - 3e:62:06:c2:3a:29:f8:9e:86:af:da:07:78:f8:31: - af:42:48:49:9e:4a:df:1b:27:1f:44:35:81:6d:fa: - 7a:c5:6a:0a:35:23:c7:c4:d5:fe:c9:9e:61:c9:30: - cd:1f - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Key Usage: critical - Certificate Sign, CRL Sign - X509v3 Basic Constraints: critical - CA:TRUE - X509v3 Subject Key Identifier: - 3C:1E:A8:C6:4C:05:4D:20:EC:88:DB:29:D4:7B:F9:12:5D:CE:EA:1A - X509v3 Authority Key Identifier: - keyid:3C:1E:A8:C6:4C:05:4D:20:EC:88:DB:29:D4:7B:F9:12:5D:CE:EA:1A - - Signature Algorithm: sha1WithRSAEncryption - 56:2f:79:e5:12:91:f5:19:a7:d1:32:28:fd:e3:9d:8f:e1:3c: - bb:a3:a5:f2:55:8a:03:ad:2c:1d:18:82:e1:7f:19:75:d9:47: - 5b:e7:7c:e4:a5:e0:eb:dc:7e:24:a3:7d:99:1a:cf:39:ba:a5: - b4:b8:45:68:83:cf:70:ad:56:f2:34:73:65:fc:6c:b0:53:9a: - 79:04:f7:3e:7e:4b:22:1b:e7:76:23:20:bc:9c:05:a2:5d:01: - d2:f0:09:49:17:b2:61:74:1a:5b:f4:e0:fd:ce:11:ba:13:4a: - e6:07:11:7d:30:e2:11:87:ee:33:1a:68:de:67:f4:ac:b5:58: - 1a:ac:cf:7a:2d:fd:c3:44:5b:4b:cd:6c:ff:f6:49:b4:55:4a: - 09:a0:92:2d:57:3b:69:85:54:3e:e9:ec:ef:b2:a5:7a:29:75: - 2b:f8:eb:4b:d4:cf:68:ee:3e:c8:63:7e:12:eb:e4:2f:63:a3: - a7:c8:0f:e9:39:ff:5c:29:65:7f:25:f0:42:bf:07:ba:06:b8: - 5e:d6:56:ba:f8:67:56:1b:42:aa:b3:04:d8:6e:88:10:a5:70: - b5:81:04:a4:90:a3:f0:83:4d:0c:6b:12:5d:a4:4c:83:5a:ff: - a8:7a:86:61:ff:0f:4c:e5:0f:17:d1:64:3c:bd:d9:22:7e:b7: - fa:9b:83:ba ------BEGIN CERTIFICATE----- -MIIDtDCCApygAwIBAgIBATANBgkqhkiG9w0BAQUFADBqMQswCQYDVQQGEwJVUzET -MBEGA1UECgwKUHJvbWV0aGV1czEpMCcGA1UECwwgUHJvbWV0aGV1cyBDZXJ0aWZp -Y2F0ZSBBdXRob3JpdHkxGzAZBgNVBAMMElByb21ldGhldXMgUm9vdCBDQTAgFw0x -OTA0MDUwNzU1MDBaGA8yMDU5MDMyNjA3NTUwMFowajELMAkGA1UEBhMCVVMxEzAR -BgNVBAoMClByb21ldGhldXMxKTAnBgNVBAsMIFByb21ldGhldXMgQ2VydGlmaWNh -dGUgQXV0aG9yaXR5MRswGQYDVQQDDBJQcm9tZXRoZXVzIFJvb3QgQ0EwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC/ueKrX2Ei4U7N7tqwJi67sH4czhC+ -Fik1DAwdkwEpKvb5wm5cEETK+NytegZkD4oYrbKilEnJuoxFlHzZ4BFF2BZ5oiCf -jGNgcipb+WaArIVnAVrrkcHSiIeeTBjJ8vB6GMDmqyx43l+yIk6UnPXN5uIzMOkg -EKahdetZq0Wp9z5UQK4FJb50xTr9r3MWYEV8SuAODaEVf5ofwqcEre+z5PYALE4L -BJBJ7tPbphLEkQsyTxGEx8SK71FmerAgL8uVjZZXYGZe+U9alJxxrevKcD5iBsI6 -Kfiehq/aB3j4Ma9CSEmeSt8bJx9ENYFt+nrFago1I8fE1f7JnmHJMM0fAgMBAAGj -YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQ8 -HqjGTAVNIOyI2ynUe/kSXc7qGjAfBgNVHSMEGDAWgBQ8HqjGTAVNIOyI2ynUe/kS -Xc7qGjANBgkqhkiG9w0BAQUFAAOCAQEAVi955RKR9Rmn0TIo/eOdj+E8u6Ol8lWK -A60sHRiC4X8ZddlHW+d85KXg69x+JKN9mRrPObqltLhFaIPPcK1W8jRzZfxssFOa -eQT3Pn5LIhvndiMgvJwFol0B0vAJSReyYXQaW/Tg/c4RuhNK5gcRfTDiEYfuMxpo -3mf0rLVYGqzPei39w0RbS81s//ZJtFVKCaCSLVc7aYVUPuns77Kleil1K/jrS9TP -aO4+yGN+EuvkL2Ojp8gP6Tn/XCllfyXwQr8Huga4XtZWuvhnVhtCqrME2G6IEKVw -tYEEpJCj8INNDGsSXaRMg1r/qHqGYf8PTOUPF9FkPL3ZIn63+puDug== ------END CERTIFICATE----- diff --git a/https/testdata/tls_config_auth_clientCAs_invalid.bad.yml b/https/testdata/tls_config_auth_clientCAs_invalid.bad.yml deleted file mode 100644 index 91ec706862..0000000000 --- a/https/testdata/tls_config_auth_clientCAs_invalid.bad.yml +++ /dev/null @@ -1,4 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_ca_file : "somefile" \ No newline at end of file diff --git a/https/testdata/tls_config_auth_clientCAs_missing.bad.yml b/https/testdata/tls_config_auth_clientCAs_missing.bad.yml deleted file mode 100644 index fea2a67f89..0000000000 --- a/https/testdata/tls_config_auth_clientCAs_missing.bad.yml +++ /dev/null @@ -1,4 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "RequireAndVerifyClientCert" \ No newline at end of file diff --git a/https/testdata/tls_config_auth_user_list_invalid.bad.yml b/https/testdata/tls_config_auth_user_list_invalid.bad.yml deleted file mode 100644 index 7324573175..0000000000 --- a/https/testdata/tls_config_auth_user_list_invalid.bad.yml +++ /dev/null @@ -1,5 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" -basic_auth_users: - john: doe diff --git a/https/testdata/tls_config_empty.yml b/https/testdata/tls_config_empty.yml deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/https/testdata/tls_config_junk.yml b/https/testdata/tls_config_junk.yml deleted file mode 100644 index 568a7c4041..0000000000 --- a/https/testdata/tls_config_junk.yml +++ /dev/null @@ -1,20 +0,0 @@ -hWkNKCp3fvIx3jKnsaBI -TuEjdwNS8A2vYdFbiKqr -ay3RiOtykgt4m6m3KOol -ZreGpJRGmpDSVV9cioiF -r7kDOHhHU2frvv0nLcY2 -uQMQM4XgqFkCG6gFAIJZ -g99tTkrZhN9b6pkJ6J2y -rzdt729HrA2RblDGYfjs -MW7GxrBdlCnliYJGPhfr -g9kaXxMXcDwsw0C0rv0u -637ZmfRGElb6VBVOtgqn -RG0MRezjLYCJQBMUdRDE -RzO4VicAzj7asVZAT3oo -nPw267UONk7h7KBYRgch -Alj38foWqjV3heXXdahm -TrMzMgl6JIQ1x4OZB5i4 -qlrXFJoeV6Pr77nuiEh9 -3yE5vMnnKHm2nImEfzMG -bI01UDObHRSaoJLC0vTD -G9tlcKU883NkQ6nsxJ8Y diff --git a/https/testdata/tls_config_junk_key.yml b/https/testdata/tls_config_junk_key.yml deleted file mode 100644 index 77f553469f..0000000000 --- a/https/testdata/tls_config_junk_key.yml +++ /dev/null @@ -1,2 +0,0 @@ -tls_server_config : - cert_filse: "testdata/server.crt" diff --git a/https/testdata/tls_config_noAuth.bad.yml b/https/testdata/tls_config_noAuth.bad.yml deleted file mode 100644 index f309180681..0000000000 --- a/https/testdata/tls_config_noAuth.bad.yml +++ /dev/null @@ -1,4 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_ca_file : "testdata/tls-ca-chain.pem" diff --git a/https/testdata/tls_config_noAuth.good.blocking.yml b/https/testdata/tls_config_noAuth.good.blocking.yml deleted file mode 100644 index 43e47ca817..0000000000 --- a/https/testdata/tls_config_noAuth.good.blocking.yml +++ /dev/null @@ -1,5 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "RequireAndVerifyClientCert" - client_ca_file: "testdata/tls-ca-chain.pem" \ No newline at end of file diff --git a/https/testdata/tls_config_noAuth.good.yml b/https/testdata/tls_config_noAuth.good.yml deleted file mode 100644 index 33b6a680d9..0000000000 --- a/https/testdata/tls_config_noAuth.good.yml +++ /dev/null @@ -1,5 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" diff --git a/https/testdata/tls_config_noAuth_allCiphers.good.yml b/https/testdata/tls_config_noAuth_allCiphers.good.yml deleted file mode 100644 index e16aec15af..0000000000 --- a/https/testdata/tls_config_noAuth_allCiphers.good.yml +++ /dev/null @@ -1,26 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - cipher_suites: - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA - - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA - - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA - - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA - - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA - - TLS_RSA_WITH_3DES_EDE_CBC_SHA - - TLS_RSA_WITH_AES_128_CBC_SHA - - TLS_RSA_WITH_AES_256_CBC_SHA - - TLS_RSA_WITH_AES_128_GCM_SHA256 - - TLS_RSA_WITH_AES_256_GCM_SHA384 - diff --git a/https/testdata/tls_config_noAuth_allCurves.good.yml b/https/testdata/tls_config_noAuth_allCurves.good.yml deleted file mode 100644 index e727402a5f..0000000000 --- a/https/testdata/tls_config_noAuth_allCurves.good.yml +++ /dev/null @@ -1,10 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - curve_preferences: - - CurveP256 - - CurveP384 - - CurveP521 - - X25519 diff --git a/https/testdata/tls_config_noAuth_certPath_empty.bad.yml b/https/testdata/tls_config_noAuth_certPath_empty.bad.yml deleted file mode 100644 index b9739c04f1..0000000000 --- a/https/testdata/tls_config_noAuth_certPath_empty.bad.yml +++ /dev/null @@ -1,3 +0,0 @@ -tls_server_config : - cert_file : "" - key_file : "testdata/server.key" \ No newline at end of file diff --git a/https/testdata/tls_config_noAuth_certPath_invalid.bad.yml b/https/testdata/tls_config_noAuth_certPath_invalid.bad.yml deleted file mode 100644 index b2f46d93e3..0000000000 --- a/https/testdata/tls_config_noAuth_certPath_invalid.bad.yml +++ /dev/null @@ -1,3 +0,0 @@ -tls_server_config : - cert_file : "somefile" - key_file : "testdata/server.key" \ No newline at end of file diff --git a/https/testdata/tls_config_noAuth_certPath_keyPath_empty.bad.yml b/https/testdata/tls_config_noAuth_certPath_keyPath_empty.bad.yml deleted file mode 100644 index 4e366adfc4..0000000000 --- a/https/testdata/tls_config_noAuth_certPath_keyPath_empty.bad.yml +++ /dev/null @@ -1,4 +0,0 @@ -tls_server_config : - cert_file : "" - key_file : "" - client_auth_type: "x" diff --git a/https/testdata/tls_config_noAuth_certPath_keyPath_invalid.bad.yml b/https/testdata/tls_config_noAuth_certPath_keyPath_invalid.bad.yml deleted file mode 100644 index ab0a262e31..0000000000 --- a/https/testdata/tls_config_noAuth_certPath_keyPath_invalid.bad.yml +++ /dev/null @@ -1,3 +0,0 @@ -tls_server_config : - cert_file : "somefile" - key_file : "somefile" \ No newline at end of file diff --git a/https/testdata/tls_config_noAuth_inventedCiphers.bad.yml b/https/testdata/tls_config_noAuth_inventedCiphers.bad.yml deleted file mode 100644 index 1c5b28e1bb..0000000000 --- a/https/testdata/tls_config_noAuth_inventedCiphers.bad.yml +++ /dev/null @@ -1,8 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - cipher_suites: - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA2048 - diff --git a/https/testdata/tls_config_noAuth_inventedCurves.bad.yml b/https/testdata/tls_config_noAuth_inventedCurves.bad.yml deleted file mode 100644 index 16de7381e0..0000000000 --- a/https/testdata/tls_config_noAuth_inventedCurves.bad.yml +++ /dev/null @@ -1,7 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - curve_preferences: - - CurveP257 diff --git a/https/testdata/tls_config_noAuth_keyPath_empty.bad.yml b/https/testdata/tls_config_noAuth_keyPath_empty.bad.yml deleted file mode 100644 index d9970298d5..0000000000 --- a/https/testdata/tls_config_noAuth_keyPath_empty.bad.yml +++ /dev/null @@ -1,3 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "" \ No newline at end of file diff --git a/https/testdata/tls_config_noAuth_keyPath_invalid.bad.yml b/https/testdata/tls_config_noAuth_keyPath_invalid.bad.yml deleted file mode 100644 index 2b9d37f787..0000000000 --- a/https/testdata/tls_config_noAuth_keyPath_invalid.bad.yml +++ /dev/null @@ -1,3 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.cert" - key_file : "somefile" \ No newline at end of file diff --git a/https/testdata/tls_config_noAuth_noHTTP2.good.yml b/https/testdata/tls_config_noAuth_noHTTP2.good.yml deleted file mode 100644 index d962c3d07c..0000000000 --- a/https/testdata/tls_config_noAuth_noHTTP2.good.yml +++ /dev/null @@ -1,10 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - cipher_suites: - - TLS_RSA_WITH_AES_128_CBC_SHA - max_version: TLS12 -http_server_config: - http2: false diff --git a/https/testdata/tls_config_noAuth_noHTTP2Cipher.bad.yml b/https/testdata/tls_config_noAuth_noHTTP2Cipher.bad.yml deleted file mode 100644 index 2d6723a721..0000000000 --- a/https/testdata/tls_config_noAuth_noHTTP2Cipher.bad.yml +++ /dev/null @@ -1,8 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - cipher_suites: - - TLS_RSA_WITH_AES_128_CBC_SHA - max_version: TLS12 diff --git a/https/testdata/tls_config_noAuth_someCiphers.good.yml b/https/testdata/tls_config_noAuth_someCiphers.good.yml deleted file mode 100644 index aae1e65896..0000000000 --- a/https/testdata/tls_config_noAuth_someCiphers.good.yml +++ /dev/null @@ -1,10 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - cipher_suites: - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - min_version: TLS12 - max_version: TLS12 diff --git a/https/testdata/tls_config_noAuth_someCiphers_noOrder.good.yml b/https/testdata/tls_config_noAuth_someCiphers_noOrder.good.yml deleted file mode 100644 index d21c6be070..0000000000 --- a/https/testdata/tls_config_noAuth_someCiphers_noOrder.good.yml +++ /dev/null @@ -1,11 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - cipher_suites: - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - prefer_server_cipher_suites: false - min_version: TLS12 - max_version: TLS12 diff --git a/https/testdata/tls_config_noAuth_someCurves.good.yml b/https/testdata/tls_config_noAuth_someCurves.good.yml deleted file mode 100644 index 2e860fc8db..0000000000 --- a/https/testdata/tls_config_noAuth_someCurves.good.yml +++ /dev/null @@ -1,8 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - min_version: TLS13 - curve_preferences: - - CurveP521 diff --git a/https/testdata/tls_config_noAuth_wrongTLSVersion.bad.yml b/https/testdata/tls_config_noAuth_wrongTLSVersion.bad.yml deleted file mode 100644 index 51a0d6a60d..0000000000 --- a/https/testdata/tls_config_noAuth_wrongTLSVersion.bad.yml +++ /dev/null @@ -1,6 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" - client_auth_type : "VerifyClientCertIfGiven" - client_ca_file : "testdata/tls-ca-chain.pem" - min_version: TLS111 diff --git a/https/testdata/tls_config_users.good.yml b/https/testdata/tls_config_users.good.yml deleted file mode 100644 index 8c686fc152..0000000000 --- a/https/testdata/tls_config_users.good.yml +++ /dev/null @@ -1,8 +0,0 @@ -tls_server_config : - cert_file : "testdata/server.crt" - key_file : "testdata/server.key" -basic_auth_users: - alice: $2y$12$1DpfPeqF9HzHJt.EWswy1exHluGfbhnn3yXhR7Xes6m3WJqFg0Wby - bob: $2y$18$4VeFDzXIoPHKnKTU3O3GH.N.vZu06CVqczYZ8WvfzrddFU6tGqjR. - carol: $2y$10$qRTBuFoULoYNA7AQ/F3ck.trZBPyjV64.oA4ZsSBCIWvXuvQlQTuu - dave: $2y$10$2UXri9cIDdgeKjBo4Rlpx.U3ZLDV8X1IxKmsfOvhcM5oXQt/mLmXq diff --git a/https/testdata/tls_config_users_noTLS.good.yml b/https/testdata/tls_config_users_noTLS.good.yml deleted file mode 100644 index d3a7987167..0000000000 --- a/https/testdata/tls_config_users_noTLS.good.yml +++ /dev/null @@ -1,5 +0,0 @@ -basic_auth_users: - alice: $2y$12$1DpfPeqF9HzHJt.EWswy1exHluGfbhnn3yXhR7Xes6m3WJqFg0Wby - bob: $2y$18$4VeFDzXIoPHKnKTU3O3GH.N.vZu06CVqczYZ8WvfzrddFU6tGqjR. - carol: $2y$10$qRTBuFoULoYNA7AQ/F3ck.trZBPyjV64.oA4ZsSBCIWvXuvQlQTuu - dave: $2y$10$2UXri9cIDdgeKjBo4Rlpx.U3ZLDV8X1IxKmsfOvhcM5oXQt/mLmXq diff --git a/https/tls_config.go b/https/tls_config.go deleted file mode 100644 index e7cc6321b1..0000000000 --- a/https/tls_config.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package https allows the implementation of TLS. -package https - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/pkg/errors" - config_util "github.com/prometheus/common/config" - "gopkg.in/yaml.v2" -) - -var ( - errNoTLSConfig = errors.New("TLS config is not present") -) - -type Config struct { - TLSConfig TLSStruct `yaml:"tls_server_config"` - HTTPConfig HTTPStruct `yaml:"http_server_config"` - Users map[string]config_util.Secret `yaml:"basic_auth_users"` -} - -type TLSStruct struct { - TLSCertPath string `yaml:"cert_file"` - TLSKeyPath string `yaml:"key_file"` - ClientAuth string `yaml:"client_auth_type"` - ClientCAs string `yaml:"client_ca_file"` - CipherSuites []cipher `yaml:"cipher_suites"` - CurvePreferences []curve `yaml:"curve_preferences"` - MinVersion tlsVersion `yaml:"min_version"` - MaxVersion tlsVersion `yaml:"max_version"` - PreferServerCipherSuites bool `yaml:"prefer_server_cipher_suites"` -} - -type HTTPStruct struct { - HTTP2 bool `yaml:"http2"` -} - -func getConfig(configPath string) (*Config, error) { - content, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, err - } - c := &Config{ - TLSConfig: TLSStruct{ - MinVersion: tls.VersionTLS12, - MaxVersion: tls.VersionTLS13, - PreferServerCipherSuites: true, - }, - HTTPConfig: HTTPStruct{HTTP2: true}, - } - err = yaml.UnmarshalStrict(content, c) - return c, err -} - -func getTLSConfig(configPath string) (*tls.Config, error) { - c, err := getConfig(configPath) - if err != nil { - return nil, err - } - return ConfigToTLSConfig(&c.TLSConfig) -} - -// ConfigToTLSConfig generates the golang tls.Config from the TLSStruct config. -func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) { - if c.TLSCertPath == "" && c.TLSKeyPath == "" && c.ClientAuth == "" && c.ClientCAs == "" { - return nil, errNoTLSConfig - } - - if c.TLSCertPath == "" { - return nil, errors.New("missing cert_file") - } - - if c.TLSKeyPath == "" { - return nil, errors.New("missing key_file") - } - - loadCert := func() (*tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(c.TLSCertPath, c.TLSKeyPath) - if err != nil { - return nil, errors.Wrap(err, "failed to load X509KeyPair") - } - return &cert, nil - } - - // Confirm that certificate and key paths are valid. - if _, err := loadCert(); err != nil { - return nil, err - } - - cfg := &tls.Config{ - MinVersion: (uint16)(c.MinVersion), - MaxVersion: (uint16)(c.MaxVersion), - PreferServerCipherSuites: c.PreferServerCipherSuites, - } - - cfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { - return loadCert() - } - - var cf []uint16 - for _, c := range c.CipherSuites { - cf = append(cf, (uint16)(c)) - } - if len(cf) > 0 { - cfg.CipherSuites = cf - } - - var cp []tls.CurveID - for _, c := range c.CurvePreferences { - cp = append(cp, (tls.CurveID)(c)) - } - if len(cp) > 0 { - cfg.CurvePreferences = cp - } - - if c.ClientCAs != "" { - clientCAPool := x509.NewCertPool() - clientCAFile, err := ioutil.ReadFile(c.ClientCAs) - if err != nil { - return nil, err - } - clientCAPool.AppendCertsFromPEM(clientCAFile) - cfg.ClientCAs = clientCAPool - } - - switch c.ClientAuth { - case "RequestClientCert": - cfg.ClientAuth = tls.RequestClientCert - case "RequireClientCert": - cfg.ClientAuth = tls.RequireAnyClientCert - case "VerifyClientCertIfGiven": - cfg.ClientAuth = tls.VerifyClientCertIfGiven - case "RequireAndVerifyClientCert": - cfg.ClientAuth = tls.RequireAndVerifyClientCert - case "", "NoClientCert": - cfg.ClientAuth = tls.NoClientCert - default: - return nil, errors.New("Invalid ClientAuth: " + c.ClientAuth) - } - - if c.ClientCAs != "" && cfg.ClientAuth == tls.NoClientCert { - return nil, errors.New("Client CA's have been configured without a Client Auth Policy") - } - - return cfg, nil -} - -// Listen starts the server on the given address. If tlsConfigPath isn't empty the server connection will be started using TLS. -func Listen(server *http.Server, tlsConfigPath string, logger log.Logger) error { - if tlsConfigPath == "" { - level.Info(logger).Log("msg", "TLS is disabled and it cannot be enabled on the fly.", "http2", false) - return server.ListenAndServe() - } - - if err := validateUsers(tlsConfigPath); err != nil { - return err - } - - // Setup basic authentication. - var handler http.Handler = http.DefaultServeMux - if server.Handler != nil { - handler = server.Handler - } - server.Handler = &userAuthRoundtrip{ - tlsConfigPath: tlsConfigPath, - logger: logger, - handler: handler, - } - - c, err := getConfig(tlsConfigPath) - if err != nil { - return err - } - config, err := ConfigToTLSConfig(&c.TLSConfig) - switch err { - case nil: - if !c.HTTPConfig.HTTP2 { - server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) - } - // Valid TLS config. - level.Info(logger).Log("msg", "TLS is enabled and it cannot be disabled on the fly.", "http2", c.HTTPConfig.HTTP2) - case errNoTLSConfig: - // No TLS config, back to plain HTTP. - level.Info(logger).Log("msg", "TLS is disabled and it cannot be enabled on the fly.", "http2", false) - return server.ListenAndServe() - default: - // Invalid TLS config. - return err - } - - server.TLSConfig = config - - // Set the GetConfigForClient method of the HTTPS server so that the config - // and certs are reloaded on new connections. - server.TLSConfig.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { - return getTLSConfig(tlsConfigPath) - } - return server.ListenAndServeTLS("", "") -} - -type cipher uint16 - -func (c *cipher) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - err := unmarshal((*string)(&s)) - if err != nil { - return err - } - for _, cs := range tls.CipherSuites() { - if cs.Name == s { - *c = (cipher)(cs.ID) - return nil - } - } - return errors.New("unknown cipher: " + s) -} - -func (c cipher) MarshalYAML() (interface{}, error) { - return tls.CipherSuiteName((uint16)(c)), nil -} - -type curve tls.CurveID - -var curves = map[string]curve{ - "CurveP256": (curve)(tls.CurveP256), - "CurveP384": (curve)(tls.CurveP384), - "CurveP521": (curve)(tls.CurveP521), - "X25519": (curve)(tls.X25519), -} - -func (c *curve) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - err := unmarshal((*string)(&s)) - if err != nil { - return err - } - if curveid, ok := curves[s]; ok { - *c = curveid - return nil - } - return errors.New("unknown curve: " + s) -} - -func (c *curve) MarshalYAML() (interface{}, error) { - for s, curveid := range curves { - if *c == curveid { - return s, nil - } - } - return fmt.Sprintf("%v", c), nil -} - -type tlsVersion uint16 - -var tlsVersions = map[string]tlsVersion{ - "TLS13": (tlsVersion)(tls.VersionTLS13), - "TLS12": (tlsVersion)(tls.VersionTLS12), - "TLS11": (tlsVersion)(tls.VersionTLS11), - "TLS10": (tlsVersion)(tls.VersionTLS10), -} - -func (tv *tlsVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - err := unmarshal((*string)(&s)) - if err != nil { - return err - } - if v, ok := tlsVersions[s]; ok { - *tv = v - return nil - } - return errors.New("unknown TLS version: " + s) -} - -func (tv *tlsVersion) MarshalYAML() (interface{}, error) { - for s, v := range tlsVersions { - if *tv == v { - return s, nil - } - } - return fmt.Sprintf("%v", tv), nil -} diff --git a/https/tls_config_test.go b/https/tls_config_test.go deleted file mode 100644 index ccc7a992a2..0000000000 --- a/https/tls_config_test.go +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.14 - -package https - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "regexp" - "sync" - "testing" - "time" -) - -var ( - port = getPort() - testlogger = &testLogger{} - - ErrorMap = map[string]*regexp.Regexp{ - "HTTP Response to HTTPS": regexp.MustCompile(`server gave HTTP response to HTTPS client`), - "No such file": regexp.MustCompile(`no such file`), - "Invalid argument": regexp.MustCompile(`invalid argument`), - "YAML error": regexp.MustCompile(`yaml`), - "Invalid ClientAuth": regexp.MustCompile(`invalid ClientAuth`), - "TLS handshake": regexp.MustCompile(`tls`), - "HTTP Request to HTTPS server": regexp.MustCompile(`HTTP`), - "Invalid CertPath": regexp.MustCompile(`missing cert_file`), - "Invalid KeyPath": regexp.MustCompile(`missing key_file`), - "ClientCA set without policy": regexp.MustCompile(`Client CA's have been configured without a Client Auth Policy`), - "Bad password": regexp.MustCompile(`hashedSecret too short to be a bcrypted password`), - "Unauthorized": regexp.MustCompile(`Unauthorized`), - "Forbidden": regexp.MustCompile(`Forbidden`), - "Handshake failure": regexp.MustCompile(`handshake failure`), - "Unknown cipher": regexp.MustCompile(`unknown cipher`), - "Unknown curve": regexp.MustCompile(`unknown curve`), - "Unknown TLS version": regexp.MustCompile(`unknown TLS version`), - "No HTTP2 cipher": regexp.MustCompile(`TLSConfig.CipherSuites is missing an HTTP/2-required`), - "Incompatible TLS version": regexp.MustCompile(`protocol version not supported`), - } -) - -type testLogger struct{} - -func (t *testLogger) Log(keyvals ...interface{}) error { - return nil -} - -func getPort() string { - listener, err := net.Listen("tcp", ":0") - if err != nil { - panic(err) - } - defer listener.Close() - p := listener.Addr().(*net.TCPAddr).Port - return fmt.Sprintf(":%v", p) -} - -type TestInputs struct { - Name string - Server func() *http.Server - UseNilServer bool - YAMLConfigPath string - ExpectedError *regexp.Regexp - UseTLSClient bool - ClientMaxTLSVersion uint16 - CipherSuites []uint16 - ActualCipher uint16 - CurvePreferences []tls.CurveID - Username string - Password string -} - -func TestYAMLFiles(t *testing.T) { - testTables := []*TestInputs{ - { - Name: `path to config yml invalid`, - YAMLConfigPath: "somefile", - ExpectedError: ErrorMap["No such file"], - }, - { - Name: `empty config yml`, - YAMLConfigPath: "testdata/tls_config_empty.yml", - ExpectedError: nil, - }, - { - Name: `invalid config yml (invalid structure)`, - YAMLConfigPath: "testdata/tls_config_junk.yml", - ExpectedError: ErrorMap["YAML error"], - }, - { - Name: `invalid config yml (invalid key)`, - YAMLConfigPath: "testdata/tls_config_junk_key.yml", - ExpectedError: ErrorMap["YAML error"], - }, - { - Name: `invalid config yml (cert path empty)`, - YAMLConfigPath: "testdata/tls_config_noAuth_certPath_empty.bad.yml", - ExpectedError: ErrorMap["Invalid CertPath"], - }, - { - Name: `invalid config yml (key path empty)`, - YAMLConfigPath: "testdata/tls_config_noAuth_keyPath_empty.bad.yml", - ExpectedError: ErrorMap["Invalid KeyPath"], - }, - { - Name: `invalid config yml (cert path and key path empty)`, - YAMLConfigPath: "testdata/tls_config_noAuth_certPath_keyPath_empty.bad.yml", - ExpectedError: ErrorMap["Invalid CertPath"], - }, - { - Name: `invalid config yml (cert path invalid)`, - YAMLConfigPath: "testdata/tls_config_noAuth_certPath_invalid.bad.yml", - ExpectedError: ErrorMap["No such file"], - }, - { - Name: `invalid config yml (key path invalid)`, - YAMLConfigPath: "testdata/tls_config_noAuth_keyPath_invalid.bad.yml", - ExpectedError: ErrorMap["No such file"], - }, - { - Name: `invalid config yml (cert path and key path invalid)`, - YAMLConfigPath: "testdata/tls_config_noAuth_certPath_keyPath_invalid.bad.yml", - ExpectedError: ErrorMap["No such file"], - }, - { - Name: `invalid config yml (invalid ClientAuth)`, - YAMLConfigPath: "testdata/tls_config_noAuth.bad.yml", - ExpectedError: ErrorMap["ClientCA set without policy"], - }, - { - Name: `invalid config yml (invalid ClientCAs filepath)`, - YAMLConfigPath: "testdata/tls_config_auth_clientCAs_invalid.bad.yml", - ExpectedError: ErrorMap["No such file"], - }, - { - Name: `invalid config yml (invalid user list)`, - YAMLConfigPath: "testdata/tls_config_auth_user_list_invalid.bad.yml", - ExpectedError: ErrorMap["Bad password"], - }, - { - Name: `invalid config yml (bad cipher)`, - YAMLConfigPath: "testdata/tls_config_noAuth_inventedCiphers.bad.yml", - ExpectedError: ErrorMap["Unknown cipher"], - }, - { - Name: `invalid config yml (bad curves)`, - YAMLConfigPath: "testdata/tls_config_noAuth_inventedCurves.bad.yml", - ExpectedError: ErrorMap["Unknown curve"], - }, - { - Name: `invalid config yml (bad TLS version)`, - YAMLConfigPath: "testdata/tls_config_noAuth_wrongTLSVersion.bad.yml", - ExpectedError: ErrorMap["Unknown TLS version"], - }, - } - for _, testInputs := range testTables { - t.Run(testInputs.Name, testInputs.Test) - } -} - -func TestServerBehaviour(t *testing.T) { - testTables := []*TestInputs{ - { - Name: `empty string YAMLConfigPath and default client`, - YAMLConfigPath: "", - ExpectedError: nil, - }, - { - Name: `empty string YAMLConfigPath and TLS client`, - YAMLConfigPath: "", - UseTLSClient: true, - ExpectedError: ErrorMap["HTTP Response to HTTPS"], - }, - { - Name: `valid tls config yml and default client`, - YAMLConfigPath: "testdata/tls_config_noAuth.good.yml", - ExpectedError: ErrorMap["HTTP Request to HTTPS server"], - }, - { - Name: `valid tls config yml and tls client`, - YAMLConfigPath: "testdata/tls_config_noAuth.good.yml", - UseTLSClient: true, - ExpectedError: nil, - }, - { - Name: `valid tls config yml with TLS 1.1 client`, - YAMLConfigPath: "testdata/tls_config_noAuth.good.yml", - UseTLSClient: true, - ClientMaxTLSVersion: tls.VersionTLS11, - ExpectedError: ErrorMap["Incompatible TLS version"], - }, - { - Name: `valid tls config yml with all ciphers`, - YAMLConfigPath: "testdata/tls_config_noAuth_allCiphers.good.yml", - UseTLSClient: true, - ExpectedError: nil, - }, - { - Name: `valid tls config yml with some ciphers`, - YAMLConfigPath: "testdata/tls_config_noAuth_someCiphers.good.yml", - UseTLSClient: true, - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, - ExpectedError: nil, - }, - { - Name: `valid tls config yml with no common cipher`, - YAMLConfigPath: "testdata/tls_config_noAuth_someCiphers.good.yml", - UseTLSClient: true, - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA}, - ExpectedError: ErrorMap["Handshake failure"], - }, - { - Name: `valid tls config yml with multiple client ciphers`, - YAMLConfigPath: "testdata/tls_config_noAuth_someCiphers.good.yml", - UseTLSClient: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - }, - ActualCipher: tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - ExpectedError: nil, - }, - { - Name: `valid tls config yml with multiple client ciphers, client chooses cipher`, - YAMLConfigPath: "testdata/tls_config_noAuth_someCiphers_noOrder.good.yml", - UseTLSClient: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - }, - ActualCipher: tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - ExpectedError: nil, - }, - { - Name: `valid tls config yml with all curves`, - YAMLConfigPath: "testdata/tls_config_noAuth_allCurves.good.yml", - UseTLSClient: true, - ExpectedError: nil, - }, - { - Name: `valid tls config yml with some curves`, - YAMLConfigPath: "testdata/tls_config_noAuth_someCurves.good.yml", - UseTLSClient: true, - CurvePreferences: []tls.CurveID{tls.CurveP521}, - ExpectedError: nil, - }, - { - Name: `valid tls config yml with no common curves`, - YAMLConfigPath: "testdata/tls_config_noAuth_someCurves.good.yml", - UseTLSClient: true, - CurvePreferences: []tls.CurveID{tls.CurveP384}, - ExpectedError: ErrorMap["Handshake failure"], - }, - { - Name: `valid tls config yml with non-http2 ciphers`, - YAMLConfigPath: "testdata/tls_config_noAuth_noHTTP2.good.yml", - UseTLSClient: true, - ExpectedError: nil, - }, - { - Name: `valid tls config yml with non-http2 ciphers but http2 enabled`, - YAMLConfigPath: "testdata/tls_config_noAuth_noHTTP2Cipher.bad.yml", - UseTLSClient: true, - ExpectedError: ErrorMap["No HTTP2 cipher"], - }, - } - for _, testInputs := range testTables { - t.Run(testInputs.Name, testInputs.Test) - } -} - -func TestConfigReloading(t *testing.T) { - errorChannel := make(chan error, 1) - var once sync.Once - recordConnectionError := func(err error) { - once.Do(func() { - errorChannel <- err - }) - } - defer func() { - if recover() != nil { - recordConnectionError(errors.New("Panic in test function")) - } - }() - - goodYAMLPath := "testdata/tls_config_noAuth.good.yml" - badYAMLPath := "testdata/tls_config_noAuth.good.blocking.yml" - - server := &http.Server{ - Addr: port, - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Hello World!")) - }), - } - defer func() { - server.Close() - }() - - go func() { - defer func() { - if recover() != nil { - recordConnectionError(errors.New("Panic starting server")) - } - }() - err := Listen(server, badYAMLPath, testlogger) - recordConnectionError(err) - }() - - client := getTLSClient() - - TestClientConnection := func() error { - time.Sleep(250 * time.Millisecond) - r, err := client.Get("https://localhost" + port) - if err != nil { - return (err) - } - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return (err) - } - if string(body) != "Hello World!" { - return (errors.New(string(body))) - } - return (nil) - } - - err := TestClientConnection() - if err == nil { - recordConnectionError(errors.New("connection accepted but should have failed")) - } else { - swapFileContents(goodYAMLPath, badYAMLPath) - defer swapFileContents(goodYAMLPath, badYAMLPath) - err = TestClientConnection() - if err != nil { - recordConnectionError(errors.New("connection failed but should have been accepted")) - } else { - - recordConnectionError(nil) - } - } - - err = <-errorChannel - if err != nil { - t.Errorf(" *** Failed test: %s *** Returned error: %v", "TestConfigReloading", err) - } -} - -func (test *TestInputs) Test(t *testing.T) { - errorChannel := make(chan error, 1) - var once sync.Once - recordConnectionError := func(err error) { - once.Do(func() { - errorChannel <- err - }) - } - defer func() { - if recover() != nil { - recordConnectionError(errors.New("Panic in test function")) - } - }() - - var server *http.Server - if test.UseNilServer { - server = nil - } else { - server = &http.Server{ - Addr: port, - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Hello World!")) - }), - } - defer func() { - server.Close() - }() - } - go func() { - defer func() { - if recover() != nil { - recordConnectionError(errors.New("Panic starting server")) - } - }() - err := Listen(server, test.YAMLConfigPath, testlogger) - recordConnectionError(err) - }() - - ClientConnection := func() (*http.Response, error) { - var client *http.Client - var proto string - if test.UseTLSClient { - client = getTLSClient() - t := client.Transport.(*http.Transport) - t.TLSClientConfig.MaxVersion = test.ClientMaxTLSVersion - if len(test.CipherSuites) > 0 { - t.TLSClientConfig.CipherSuites = test.CipherSuites - } - if len(test.CurvePreferences) > 0 { - t.TLSClientConfig.CurvePreferences = test.CurvePreferences - } - proto = "https" - } else { - client = http.DefaultClient - proto = "http" - } - req, err := http.NewRequest("GET", proto+"://localhost"+port, nil) - if err != nil { - t.Error(err) - } - if test.Username != "" { - req.SetBasicAuth(test.Username, test.Password) - } - return client.Do(req) - } - go func() { - time.Sleep(250 * time.Millisecond) - r, err := ClientConnection() - if err != nil { - recordConnectionError(err) - return - } - - if test.ActualCipher != 0 { - if r.TLS.CipherSuite != test.ActualCipher { - recordConnectionError( - fmt.Errorf("bad cipher suite selected. Expected: %s, got: %s", - tls.CipherSuiteName(r.TLS.CipherSuite), - tls.CipherSuiteName(test.ActualCipher), - ), - ) - } - } - - body, err := ioutil.ReadAll(r.Body) - if err != nil { - recordConnectionError(err) - return - } - if string(body) != "Hello World!" { - recordConnectionError(errors.New(string(body))) - return - } - recordConnectionError(nil) - }() - err := <-errorChannel - if test.isCorrectError(err) == false { - if test.ExpectedError == nil { - t.Logf("Expected no error, got error: %v", err) - } else { - t.Logf("Expected error matching regular expression: %v", test.ExpectedError) - t.Logf("Got: %v", err) - } - t.Fail() - } -} - -func (test *TestInputs) isCorrectError(returnedError error) bool { - switch { - case returnedError == nil && test.ExpectedError == nil: - case returnedError != nil && test.ExpectedError != nil && test.ExpectedError.MatchString(returnedError.Error()): - default: - return false - } - return true -} - -func getTLSClient() *http.Client { - cert, err := ioutil.ReadFile("testdata/tls-ca-chain.pem") - if err != nil { - panic("Unable to start TLS client. Check cert path") - } - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: func() *x509.CertPool { - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(cert) - return caCertPool - }(), - }, - }, - } - return client -} - -func swapFileContents(file1, file2 string) error { - content1, err := ioutil.ReadFile(file1) - if err != nil { - return err - } - content2, err := ioutil.ReadFile(file2) - if err != nil { - return err - } - err = ioutil.WriteFile(file1, content2, 0644) - if err != nil { - return err - } - err = ioutil.WriteFile(file2, content1, 0644) - if err != nil { - return err - } - return nil -} - -func TestUsers(t *testing.T) { - testTables := []*TestInputs{ - { - Name: `without basic auth`, - YAMLConfigPath: "testdata/tls_config_users_noTLS.good.yml", - ExpectedError: ErrorMap["Unauthorized"], - }, - { - Name: `with correct basic auth`, - YAMLConfigPath: "testdata/tls_config_users_noTLS.good.yml", - Username: "dave", - Password: "dave123", - ExpectedError: nil, - }, - { - Name: `without basic auth and TLS`, - YAMLConfigPath: "testdata/tls_config_users.good.yml", - UseTLSClient: true, - ExpectedError: ErrorMap["Unauthorized"], - }, - { - Name: `with correct basic auth and TLS`, - YAMLConfigPath: "testdata/tls_config_users.good.yml", - UseTLSClient: true, - Username: "dave", - Password: "dave123", - ExpectedError: nil, - }, - { - Name: `with another correct basic auth and TLS`, - YAMLConfigPath: "testdata/tls_config_users.good.yml", - UseTLSClient: true, - Username: "carol", - Password: "carol123", - ExpectedError: nil, - }, - { - Name: `with bad password and TLS`, - YAMLConfigPath: "testdata/tls_config_users.good.yml", - UseTLSClient: true, - Username: "dave", - Password: "bad", - ExpectedError: ErrorMap["Forbidden"], - }, - { - Name: `with bad username and TLS`, - YAMLConfigPath: "testdata/tls_config_users.good.yml", - UseTLSClient: true, - Username: "nonexistent", - Password: "nonexistent", - ExpectedError: ErrorMap["Forbidden"], - }, - } - for _, testInputs := range testTables { - t.Run(testInputs.Name, testInputs.Test) - } -} diff --git a/https/users.go b/https/users.go deleted file mode 100644 index 170c87bc73..0000000000 --- a/https/users.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package https - -import ( - "net/http" - - "github.com/go-kit/kit/log" - "golang.org/x/crypto/bcrypt" -) - -func validateUsers(configPath string) error { - c, err := getConfig(configPath) - if err != nil { - return err - } - - for _, p := range c.Users { - _, err = bcrypt.Cost([]byte(p)) - if err != nil { - return err - } - } - - return nil -} - -type userAuthRoundtrip struct { - tlsConfigPath string - handler http.Handler - logger log.Logger -} - -func (u *userAuthRoundtrip) ServeHTTP(w http.ResponseWriter, r *http.Request) { - c, err := getConfig(u.tlsConfigPath) - if err != nil { - u.logger.Log("msg", "Unable to parse configuration", "err", err) - http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) - return - } - - if len(c.Users) == 0 { - u.handler.ServeHTTP(w, r) - return - } - - user, pass, ok := r.BasicAuth() - if !ok { - w.Header().Set("WWW-Authenticate", "Basic") - http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) - return - } - - if hashedPassword, ok := c.Users[user]; ok { - if err := bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(pass)); err == nil { - u.handler.ServeHTTP(w, r) - return - } - } - - http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) -} diff --git a/https/web-config.yml b/https/web-config.yml deleted file mode 100644 index 7d40d9b708..0000000000 --- a/https/web-config.yml +++ /dev/null @@ -1,6 +0,0 @@ -# Minimal TLS configuration example. Additionally, a certificate and a key file -# are needed. -tls_server_config: - cert_file: server.crt - key_file: server.key - diff --git a/node_exporter.go b/node_exporter.go index f4e8d58517..b4064b9460 100644 --- a/node_exporter.go +++ b/node_exporter.go @@ -15,25 +15,31 @@ package main import ( "fmt" - "github.com/prometheus/node_exporter/https" + "log/slog" "net" "net/http" _ "net/http/pprof" "os" "os/signal" + "os/user" + "runtime" + "slices" "sort" "syscall" - "github.com/prometheus/common/promlog" - "github.com/prometheus/common/promlog/flag" + "github.com/prometheus/common/promslog" + "github.com/prometheus/common/promslog/flag" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" + promcollectors "github.com/prometheus/client_golang/prometheus/collectors" + versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/version" + "github.com/prometheus/exporter-toolkit/web" + "github.com/prometheus/exporter-toolkit/web/kingpinflag" + "github.com/prometheus/node_exporter/collector" - kingpin "gopkg.in/alecthomas/kingpin.v2" ) // handler wraps an unfiltered http.Handler but uses a filtered handler, @@ -41,15 +47,17 @@ import ( // newHandler. type handler struct { unfilteredHandler http.Handler + // enabledCollectors list is used for logging and filtering + enabledCollectors []string // exporterMetricsRegistry is a separate registry for the metrics about // the exporter itself. exporterMetricsRegistry *prometheus.Registry includeExporterMetrics bool maxRequests int - logger log.Logger + logger *slog.Logger } -func newHandler(includeExporterMetrics bool, maxRequests int, logger log.Logger) *handler { +func newHandler(includeExporterMetrics bool, maxRequests int, logger *slog.Logger) *handler { h := &handler{ exporterMetricsRegistry: prometheus.NewRegistry(), includeExporterMetrics: includeExporterMetrics, @@ -58,8 +66,8 @@ func newHandler(includeExporterMetrics bool, maxRequests int, logger log.Logger) } if h.includeExporterMetrics { h.exporterMetricsRegistry.MustRegister( - prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), - prometheus.NewGoCollector(), + promcollectors.NewProcessCollector(promcollectors.ProcessCollectorOpts{}), + promcollectors.NewGoCollector(), ) } if innerHandler, err := h.innerHandler(); err != nil { @@ -72,20 +80,43 @@ func newHandler(includeExporterMetrics bool, maxRequests int, logger log.Logger) // ServeHTTP implements http.Handler. func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - filters := r.URL.Query()["collect[]"] - level.Debug(h.logger).Log("msg", "collect query:", "filters", filters) + collects := r.URL.Query()["collect[]"] + h.logger.Debug("collect query:", "collects", collects) - if len(filters) == 0 { + excludes := r.URL.Query()["exclude[]"] + h.logger.Debug("exclude query:", "excludes", excludes) + + if len(collects) == 0 && len(excludes) == 0 { // No filters, use the prepared unfiltered handler. h.unfilteredHandler.ServeHTTP(w, r) return } + + if len(collects) > 0 && len(excludes) > 0 { + h.logger.Debug("rejecting combined collect and exclude queries") + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte("Combined collect and exclude queries are not allowed.")) + return + } + + filters := &collects + if len(excludes) > 0 { + // In exclude mode, filtered collectors = enabled - excludeed. + f := []string{} + for _, c := range h.enabledCollectors { + if (slices.Index(excludes, c)) == -1 { + f = append(f, c) + } + } + filters = &f + } + // To serve filtered metrics, we create a filtering handler on the fly. - filteredHandler, err := h.innerHandler(filters...) + filteredHandler, err := h.innerHandler(*filters...) if err != nil { - level.Warn(h.logger).Log("msg", "Couldn't create filtered metrics handler:", "err", err) + h.logger.Warn("Couldn't create filtered metrics handler:", "err", err) w.WriteHeader(http.StatusBadRequest) - w.Write([]byte(fmt.Sprintf("Couldn't create filtered metrics handler: %s", err))) + fmt.Fprintf(w, "Couldn't create filtered metrics handler: %s", err) return } filteredHandler.ServeHTTP(w, r) @@ -105,45 +136,53 @@ func (h *handler) innerHandler(filters ...string) (http.Handler, error) { // Only log the creation of an unfiltered handler, which should happen // only once upon startup. if len(filters) == 0 { - level.Info(h.logger).Log("msg", "Enabled collectors") - collectors := []string{} + h.logger.Info("Enabled collectors") for n := range nc.Collectors { - collectors = append(collectors, n) + h.enabledCollectors = append(h.enabledCollectors, n) } - sort.Strings(collectors) - for _, c := range collectors { - level.Info(h.logger).Log("collector", c) + sort.Strings(h.enabledCollectors) + for _, c := range h.enabledCollectors { + h.logger.Info(c) } } r := prometheus.NewRegistry() - r.MustRegister(version.NewCollector("node_exporter")) + r.MustRegister(versioncollector.NewCollector("node_exporter")) if err := r.Register(nc); err != nil { return nil, fmt.Errorf("couldn't register node collector: %s", err) } - handler := promhttp.HandlerFor( - prometheus.Gatherers{h.exporterMetricsRegistry, r}, - promhttp.HandlerOpts{ - ErrorHandling: promhttp.ContinueOnError, - MaxRequestsInFlight: h.maxRequests, - Registry: h.exporterMetricsRegistry, - }, - ) + + var handler http.Handler if h.includeExporterMetrics { + handler = promhttp.HandlerFor( + prometheus.Gatherers{h.exporterMetricsRegistry, r}, + promhttp.HandlerOpts{ + ErrorLog: slog.NewLogLogger(h.logger.Handler(), slog.LevelError), + ErrorHandling: promhttp.ContinueOnError, + MaxRequestsInFlight: h.maxRequests, + Registry: h.exporterMetricsRegistry, + }, + ) // Note that we have to use h.exporterMetricsRegistry here to // use the same promhttp metrics for all expositions. handler = promhttp.InstrumentMetricHandler( h.exporterMetricsRegistry, handler, ) + } else { + handler = promhttp.HandlerFor( + r, + promhttp.HandlerOpts{ + ErrorLog: slog.NewLogLogger(h.logger.Handler(), slog.LevelError), + ErrorHandling: promhttp.ContinueOnError, + MaxRequestsInFlight: h.maxRequests, + }, + ) } + return handler, nil } func main() { - - done := make(chan os.Signal, 1) - signal.Notify(done, syscall.SIGINT, syscall.SIGTERM) - var ( socketPath = kingpin.Flag( "web.socket-path", @@ -153,10 +192,6 @@ func main() { "web.socket-permissions", "Permissions of unix socket file.", ).Default("0640").Int32() - listenAddress = kingpin.Flag( - "web.listen-address", - "Address on which to expose metrics and web interface.", - ).Default(":9100").String() metricsPath = kingpin.Flag( "web.telemetry-path", "Path under which to expose metrics.", @@ -173,72 +208,86 @@ func main() { "collector.disable-defaults", "Set all collectors to disabled by default.", ).Default("false").Bool() - configFile = kingpin.Flag( - "web.config", - "[EXPERIMENTAL] Path to config yaml file that can enable TLS or authentication.", - ).Default("").String() + maxProcs = kingpin.Flag( + "runtime.gomaxprocs", "The target number of CPUs Go will run on (GOMAXPROCS)", + ).Envar("GOMAXPROCS").Default("1").Int() + toolkitFlags = kingpinflag.AddFlags(kingpin.CommandLine, ":9100") ) - promlogConfig := &promlog.Config{} - flag.AddFlags(kingpin.CommandLine, promlogConfig) + promslogConfig := &promslog.Config{} + flag.AddFlags(kingpin.CommandLine, promslogConfig) kingpin.Version(version.Print("node_exporter")) + kingpin.CommandLine.UsageWriter(os.Stdout) kingpin.HelpFlag.Short('h') kingpin.Parse() - logger := promlog.New(promlogConfig) + logger := promslog.New(promslogConfig) if *disableDefaultCollectors { collector.DisableDefaultCollectors() } - level.Info(logger).Log("msg", "Starting node_exporter", "version", version.Info()) - level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext()) + logger.Info("Starting node_exporter", "version", version.Info()) + logger.Info("Build context", "build_context", version.BuildContext()) + if user, err := user.Current(); err == nil && user.Uid == "0" { + logger.Warn("Node Exporter is running as root user. This exporter is designed to run as unprivileged user, root is not required.") + } + runtime.GOMAXPROCS(*maxProcs) + logger.Debug("Go MAXPROCS", "procs", runtime.GOMAXPROCS(0)) http.Handle(*metricsPath, newHandler(!*disableExporterMetrics, *maxRequests, logger)) - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(` - Node Exporter - -

Node Exporter

-

Metrics

- - `)) - }) - - humanAddress := "address " + *listenAddress - if *socketPath != "" { - humanAddress = "path " + *socketPath - } - var server *http.Server - var serve func() error - level.Info(logger).Log("msg", "Listening on", "address", humanAddress) - server = &http.Server{} - if *socketPath == "" { - server = &http.Server{Addr: *listenAddress} - serve = func() error { - return https.Listen(server, *configFile, logger) + if *metricsPath != "/" { + landingConfig := web.LandingConfig{ + Name: "Node Exporter", + Description: "Prometheus Node Exporter", + Version: version.Info(), + Links: []web.LandingLinks{ + { + Address: *metricsPath, + Text: "Metrics", + }, + }, } - } else { + landingPage, err := web.NewLandingPage(landingConfig) + if err != nil { + logger.Error(err.Error()) + os.Exit(1) + } + http.Handle("/", landingPage) + } + + server := &http.Server{} + if *socketPath != "" { os.Remove(*socketPath) unixListener, err := net.Listen("unix", *socketPath) if err != nil { - level.Error(logger).Log("err", err) + logger.Error("Error listening on socket", "err", err) os.Exit(1) } if err := os.Chmod(*socketPath, os.FileMode(*socketPermissions)); err != nil { - level.Error(logger).Log("err", err) + logger.Error("Error changing socket permissions", "err", err) + os.Remove(*socketPath) os.Exit(1) } - serve = func() error { - return server.Serve(unixListener) - } - } - go func() { - if err := serve(); err != nil { - level.Error(logger).Log("err", err) + defer os.Remove(*socketPath) + + go func() { + if err := server.Serve(unixListener); err != nil && err != http.ErrServerClosed { + logger.Error("Error serving", "err", err) + os.Exit(1) + } + }() + + done := make(chan os.Signal, 1) + signal.Notify(done, syscall.SIGINT, syscall.SIGTERM) + logger.Info("Listening on", "path", *socketPath) + <-done + logger.Info("Connection closed on", "path", *socketPath) + server.Close() + os.Remove(*socketPath) + os.Exit(0) + } else { + if err := web.ListenAndServe(server, toolkitFlags, logger); err != nil { + logger.Error(err.Error()) os.Exit(1) } - }() - <-done - level.Info(logger).Log("msg", "Connection closed on", humanAddress) - server.Close() - os.Exit(0) + } } diff --git a/node_exporter.spec b/node_exporter.spec index 905b25a368..9241aadd47 100644 --- a/node_exporter.spec +++ b/node_exporter.spec @@ -4,8 +4,8 @@ Autoreq: 0 %define pkg_version_file %{cl_dir}%{name} Name: cl-node-exporter -Version: 1.1.0 -Release: 3%{dist}.cloudlinux +Version: 1.2.0 +Release: 1%{dist}.cloudlinux Summary: CL Node Exporter tool License: Apache License, Version 2.0 Group: System Environment/Base @@ -38,15 +38,16 @@ This package provides end-to-end tests for Node Exporter tool %build # download new version of Go compiler %ifarch x86_64 amd64 ia32e -curl https://dl.google.com/go/go1.14.4.linux-amd64.tar.gz --output %{_tmppath}/go.tar.gz +curl https://dl.google.com/go/go1.24.0.linux-amd64.tar.gz --output %{_tmppath}/go.tar.gz %else -curl https://dl.google.com/go/go1.14.4.linux-386.tar.gz --output %{_tmppath}/go.tar.gz +curl https://dl.google.com/go/go1.24.0.linux-386.tar.gz --output %{_tmppath}/go.tar.gz %endif tar xzf %{_tmppath}/go.tar.gz -C %{_tmppath} export PATH=$PATH:%{_tmppath}/go/bin export GOROOT=%{_tmppath}/go export GOPATH=%{_tmppath} make build +make tools make test # run cross-testing %ifarch x86_64 amd64 ia32e @@ -64,10 +65,15 @@ rm -rf $RPM_BUILD_ROOT install -D -m 755 node_exporter $RPM_BUILD_ROOT%{_clshare_plus}/node_exporter #install tests -mkdir -p $RPM_BUILD_ROOT/opt/node_exporter_tests/collector -cp -r collector/fixtures $RPM_BUILD_ROOT/opt/node_exporter_tests/collector/ +mkdir -p $RPM_BUILD_ROOT/opt/node_exporter_tests/ +cp -r collector $RPM_BUILD_ROOT/opt/node_exporter_tests/ install -D -m 755 end-to-end-test.sh $RPM_BUILD_ROOT/opt/node_exporter_tests/end-to-end-test.sh install -D -m 755 node_exporter $RPM_BUILD_ROOT/opt/node_exporter_tests/node_exporter +mkdir -p $RPM_BUILD_ROOT/opt/node_exporter_tests/tools +install -D -m 755 tools/tools $RPM_BUILD_ROOT/opt/node_exporter_tests/tools/tools + +# remove broken symlinks +find $RPM_BUILD_ROOT/opt/node_exporter_tests/collector/fixtures -xtype l -delete # write package version to file if [[ ! -d "$RPM_BUILD_ROOT%{cl_dir}" ]]; then @@ -88,6 +94,9 @@ exit 0 %changelog +* Mon Dec 01 2025 Ruslan Koliada 1.2.0-1 +- CLPRO-2902: Sync repository with upstream + * Wed Aug 19 2020 Stepan Oksanichenko 1.1.0-2 - CMT-221: Add package versions tags to sentry diff --git a/node_exporter_test.go b/node_exporter_test.go index f10db7c661..0d6b41e4d5 100644 --- a/node_exporter_test.go +++ b/node_exporter_test.go @@ -15,7 +15,7 @@ package main import ( "fmt" - "io/ioutil" + "io" "net/http" "os" "os/exec" @@ -59,7 +59,7 @@ func TestFileDescriptorLeak(t *testing.T) { if err != nil { return err } - for i := 0; i < 5; i++ { + for range 5 { if err := queryExporter(address); err != nil { return err } @@ -84,17 +84,17 @@ func TestHandlingOfDuplicatedMetrics(t *testing.T) { t.Skipf("node_exporter binary not available, try to run `make build` first: %s", err) } - dir, err := ioutil.TempDir("", "node-exporter") + dir, err := os.MkdirTemp("", "node-exporter") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) content := []byte("dummy_metric 1\n") - if err := ioutil.WriteFile(filepath.Join(dir, "a.prom"), content, 0600); err != nil { + if err := os.WriteFile(filepath.Join(dir, "a.prom"), content, 0600); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(dir, "b.prom"), content, 0600); err != nil { + if err := os.WriteFile(filepath.Join(dir, "b.prom"), content, 0600); err != nil { t.Fatal(err) } @@ -113,7 +113,7 @@ func queryExporter(address string) error { if err != nil { return err } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -131,7 +131,7 @@ func runCommandAndTests(cmd *exec.Cmd, address string, fn func(pid int) error) e return fmt.Errorf("failed to start command: %s", err) } time.Sleep(50 * time.Millisecond) - for i := 0; i < 10; i++ { + for i := range 10 { if err := queryExporter(address); err == nil { break } diff --git a/scripts/errcheck_excludes.txt b/scripts/errcheck_excludes.txt deleted file mode 100644 index ed0736fbab..0000000000 --- a/scripts/errcheck_excludes.txt +++ /dev/null @@ -1,4 +0,0 @@ -// Used in HTTP handlers, any error is handled by the server itself. -(net/http.ResponseWriter).Write -// Never check for logger errors. -(github.com/go-kit/kit/log.Logger).Log \ No newline at end of file diff --git a/tools/main.go b/tools/main.go new file mode 100644 index 0000000000..cfd4c1135e --- /dev/null +++ b/tools/main.go @@ -0,0 +1,79 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "fmt" + "go/build" + "os" + "path/filepath" + "runtime" +) + +func main() { + printHelpAndDie := func() { + fmt.Println(` +Usage: tools [command]`) + os.Exit(1) + } + if len(os.Args) < 2 { + printHelpAndDie() + } + + // Sub-commands. + matchCmd := flag.NewFlagSet("match", flag.ExitOnError) + switch os.Args[1] { + case "match": + err := matchCmd.Parse(os.Args[2:]) + if err != nil { + fmt.Println("Error parsing flags:", err) + os.Exit(1) + } + if matchCmd.NArg() != 1 { + fmt.Println("Usage: match [file]") + os.Exit(1) + } + file := matchCmd.Arg(0) + + // For debugging purposes, allow overriding these. + goos, found := os.LookupEnv("GOHOSTOS") + if !found { + goos = runtime.GOOS + } + goarch, found := os.LookupEnv("GOARCH") + if !found { + goarch = runtime.GOARCH + } + ctx := build.Context{ + GOOS: goos, + GOARCH: goarch, + } + abs, err := filepath.Abs(file) + if err != nil { + panic(err) + } + match, err := ctx.MatchFile(filepath.Dir(abs), filepath.Base(abs)) + if err != nil { + fmt.Println("Error:", err) + os.Exit(1) + } + if match { + os.Exit(0) + } + os.Exit(1) + default: + printHelpAndDie() + } +} diff --git a/ttar b/ttar index b0171a12b5..e6cade7614 100755 --- a/ttar +++ b/ttar @@ -212,16 +212,16 @@ function extract { local eof_without_newline if [ "$size" -gt 0 ]; then if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceeded by a backslash indicates that the line + # An EOF not preceded by a backslash indicates that the line # does not end with a newline eof_without_newline=1 else eof_without_newline=0 fi # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceeded by backslash + # Replace NULLBYTE with null byte unless preceded by backslash # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceeded by backslash + # Remove EOF unless preceded by backslash # Remove one backslash in front of EOF if [ $USE_PYTHON -eq 1 ]; then echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/github.com/alecthomas/template/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md deleted file mode 100644 index ef6a8ee303..0000000000 --- a/vendor/github.com/alecthomas/template/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Go's `text/template` package with newline elision - -This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. - -eg. - -``` -{{if true}}\ -hello -{{end}}\ -``` - -Will result in: - -``` -hello\n -``` - -Rather than: - -``` -\n -hello\n -\n -``` diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go deleted file mode 100644 index 223c595c25..0000000000 --- a/vendor/github.com/alecthomas/template/doc.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package template implements data-driven templates for generating textual output. - -To generate HTML output, see package html/template, which has the same interface -as this package but automatically secures HTML output against certain attacks. - -Templates are executed by applying them to a data structure. Annotations in the -template refer to elements of the data structure (typically a field of a struct -or a key in a map) to control execution and derive values to be displayed. -Execution of the template walks the structure and sets the cursor, represented -by a period '.' and called "dot", to the value at the current location in the -structure as execution proceeds. - -The input text for a template is UTF-8-encoded text in any format. -"Actions"--data evaluations or control structures--are delimited by -"{{" and "}}"; all text outside actions is copied to the output unchanged. -Actions may not span newlines, although comments can. - -Once parsed, a template may be executed safely in parallel. - -Here is a trivial example that prints "17 items are made of wool". - - type Inventory struct { - Material string - Count uint - } - sweaters := Inventory{"wool", 17} - tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") - if err != nil { panic(err) } - err = tmpl.Execute(os.Stdout, sweaters) - if err != nil { panic(err) } - -More intricate examples appear below. - -Actions - -Here is the list of actions. "Arguments" and "pipelines" are evaluations of -data, defined in detail below. - -*/ -// {{/* a comment */}} -// A comment; discarded. May contain newlines. -// Comments do not nest and must start and end at the -// delimiters, as shown here. -/* - - {{pipeline}} - The default textual representation of the value of the pipeline - is copied to the output. - - {{if pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, T1 is executed. The empty values are false, 0, any - nil pointer or interface value, and any array, slice, map, or - string of length zero. - Dot is unaffected. - - {{if pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, T0 is executed; - otherwise, T1 is executed. Dot is unaffected. - - {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} - To simplify the appearance of if-else chains, the else action - of an if may include another if directly; the effect is exactly - the same as writing - {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} - - {{range pipeline}} T1 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, nothing is output; - otherwise, dot is set to the successive elements of the array, - slice, or map and T1 is executed. If the value is a map and the - keys are of basic type with a defined order ("comparable"), the - elements will be visited in sorted key order. - - {{range pipeline}} T1 {{else}} T0 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, dot is unaffected and - T0 is executed; otherwise, dot is set to the successive elements - of the array, slice, or map and T1 is executed. - - {{template "name"}} - The template with the specified name is executed with nil data. - - {{template "name" pipeline}} - The template with the specified name is executed with dot set - to the value of the pipeline. - - {{with pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, dot is set to the value of the pipeline and T1 is - executed. - - {{with pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, dot is unaffected and T0 - is executed; otherwise, dot is set to the value of the pipeline - and T1 is executed. - -Arguments - -An argument is a simple value, denoted by one of the following. - - - A boolean, string, character, integer, floating-point, imaginary - or complex constant in Go syntax. These behave like Go's untyped - constants, although raw strings may not span newlines. - - The keyword nil, representing an untyped Go nil. - - The character '.' (period): - . - The result is the value of dot. - - A variable name, which is a (possibly empty) alphanumeric string - preceded by a dollar sign, such as - $piOver2 - or - $ - The result is the value of the variable. - Variables are described below. - - The name of a field of the data, which must be a struct, preceded - by a period, such as - .Field - The result is the value of the field. Field invocations may be - chained: - .Field1.Field2 - Fields can also be evaluated on variables, including chaining: - $x.Field1.Field2 - - The name of a key of the data, which must be a map, preceded - by a period, such as - .Key - The result is the map element value indexed by the key. - Key invocations may be chained and combined with fields to any - depth: - .Field1.Key1.Field2.Key2 - Although the key must be an alphanumeric identifier, unlike with - field names they do not need to start with an upper case letter. - Keys can also be evaluated on variables, including chaining: - $x.key1.key2 - - The name of a niladic method of the data, preceded by a period, - such as - .Method - The result is the value of invoking the method with dot as the - receiver, dot.Method(). Such a method must have one return value (of - any type) or two return values, the second of which is an error. - If it has two and the returned error is non-nil, execution terminates - and an error is returned to the caller as the value of Execute. - Method invocations may be chained and combined with fields and keys - to any depth: - .Field1.Key1.Method1.Field2.Key2.Method2 - Methods can also be evaluated on variables, including chaining: - $x.Method1.Field - - The name of a niladic function, such as - fun - The result is the value of invoking the function, fun(). The return - types and values behave as in methods. Functions and function - names are described below. - - A parenthesized instance of one the above, for grouping. The result - may be accessed by a field or map key invocation. - print (.F1 arg1) (.F2 arg2) - (.StructValuedMethod "arg").Field - -Arguments may evaluate to any type; if they are pointers the implementation -automatically indirects to the base type when required. -If an evaluation yields a function value, such as a function-valued -field of a struct, the function is not invoked automatically, but it -can be used as a truth value for an if action and the like. To invoke -it, use the call function, defined below. - -A pipeline is a possibly chained sequence of "commands". A command is a simple -value (argument) or a function or method call, possibly with multiple arguments: - - Argument - The result is the value of evaluating the argument. - .Method [Argument...] - The method can be alone or the last element of a chain but, - unlike methods in the middle of a chain, it can take arguments. - The result is the value of calling the method with the - arguments: - dot.Method(Argument1, etc.) - functionName [Argument...] - The result is the value of calling the function associated - with the name: - function(Argument1, etc.) - Functions and function names are described below. - -Pipelines - -A pipeline may be "chained" by separating a sequence of commands with pipeline -characters '|'. In a chained pipeline, the result of the each command is -passed as the last argument of the following command. The output of the final -command in the pipeline is the value of the pipeline. - -The output of a command will be either one value or two values, the second of -which has type error. If that second value is present and evaluates to -non-nil, execution terminates and the error is returned to the caller of -Execute. - -Variables - -A pipeline inside an action may initialize a variable to capture the result. -The initialization has syntax - - $variable := pipeline - -where $variable is the name of the variable. An action that declares a -variable produces no output. - -If a "range" action initializes a variable, the variable is set to the -successive elements of the iteration. Also, a "range" may declare two -variables, separated by a comma: - - range $index, $element := pipeline - -in which case $index and $element are set to the successive values of the -array/slice index or map key and element, respectively. Note that if there is -only one variable, it is assigned the element; this is opposite to the -convention in Go range clauses. - -A variable's scope extends to the "end" action of the control structure ("if", -"with", or "range") in which it is declared, or to the end of the template if -there is no such control structure. A template invocation does not inherit -variables from the point of its invocation. - -When execution begins, $ is set to the data argument passed to Execute, that is, -to the starting value of dot. - -Examples - -Here are some example one-line templates demonstrating pipelines and variables. -All produce the quoted word "output": - - {{"\"output\""}} - A string constant. - {{`"output"`}} - A raw string constant. - {{printf "%q" "output"}} - A function call. - {{"output" | printf "%q"}} - A function call whose final argument comes from the previous - command. - {{printf "%q" (print "out" "put")}} - A parenthesized argument. - {{"put" | printf "%s%s" "out" | printf "%q"}} - A more elaborate call. - {{"output" | printf "%s" | printf "%q"}} - A longer chain. - {{with "output"}}{{printf "%q" .}}{{end}} - A with action using dot. - {{with $x := "output" | printf "%q"}}{{$x}}{{end}} - A with action that creates and uses a variable. - {{with $x := "output"}}{{printf "%q" $x}}{{end}} - A with action that uses the variable in another action. - {{with $x := "output"}}{{$x | printf "%q"}}{{end}} - The same, but pipelined. - -Functions - -During execution functions are found in two function maps: first in the -template, then in the global function map. By default, no functions are defined -in the template but the Funcs method can be used to add them. - -Predefined global functions are named as follows. - - and - Returns the boolean AND of its arguments by returning the - first empty argument or the last argument, that is, - "and x y" behaves as "if x then y else x". All the - arguments are evaluated. - call - Returns the result of calling the first argument, which - must be a function, with the remaining arguments as parameters. - Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where - Y is a func-valued field, map entry, or the like. - The first argument must be the result of an evaluation - that yields a value of function type (as distinct from - a predefined function such as print). The function must - return either one or two result values, the second of which - is of type error. If the arguments don't match the function - or the returned error value is non-nil, execution stops. - html - Returns the escaped HTML equivalent of the textual - representation of its arguments. - index - Returns the result of indexing its first argument by the - following arguments. Thus "index x 1 2 3" is, in Go syntax, - x[1][2][3]. Each indexed item must be a map, slice, or array. - js - Returns the escaped JavaScript equivalent of the textual - representation of its arguments. - len - Returns the integer length of its argument. - not - Returns the boolean negation of its single argument. - or - Returns the boolean OR of its arguments by returning the - first non-empty argument or the last argument, that is, - "or x y" behaves as "if x then x else y". All the - arguments are evaluated. - print - An alias for fmt.Sprint - printf - An alias for fmt.Sprintf - println - An alias for fmt.Sprintln - urlquery - Returns the escaped value of the textual representation of - its arguments in a form suitable for embedding in a URL query. - -The boolean functions take any zero value to be false and a non-zero -value to be true. - -There is also a set of binary comparison operators defined as -functions: - - eq - Returns the boolean truth of arg1 == arg2 - ne - Returns the boolean truth of arg1 != arg2 - lt - Returns the boolean truth of arg1 < arg2 - le - Returns the boolean truth of arg1 <= arg2 - gt - Returns the boolean truth of arg1 > arg2 - ge - Returns the boolean truth of arg1 >= arg2 - -For simpler multi-way equality tests, eq (only) accepts two or more -arguments and compares the second and subsequent to the first, -returning in effect - - arg1==arg2 || arg1==arg3 || arg1==arg4 ... - -(Unlike with || in Go, however, eq is a function call and all the -arguments will be evaluated.) - -The comparison functions work on basic types only (or named basic -types, such as "type Celsius float32"). They implement the Go rules -for comparison of values, except that size and exact type are -ignored, so any integer value, signed or unsigned, may be compared -with any other integer value. (The arithmetic value is compared, -not the bit pattern, so all negative integers are less than all -unsigned integers.) However, as usual, one may not compare an int -with a float32 and so on. - -Associated templates - -Each template is named by a string specified when it is created. Also, each -template is associated with zero or more other templates that it may invoke by -name; such associations are transitive and form a name space of templates. - -A template may use a template invocation to instantiate another associated -template; see the explanation of the "template" action above. The name must be -that of a template associated with the template that contains the invocation. - -Nested template definitions - -When parsing a template, another template may be defined and associated with the -template being parsed. Template definitions must appear at the top level of the -template, much like global variables in a Go program. - -The syntax of such definitions is to surround each template declaration with a -"define" and "end" action. - -The define action names the template being created by providing a string -constant. Here is a simple example: - - `{{define "T1"}}ONE{{end}} - {{define "T2"}}TWO{{end}} - {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} - {{template "T3"}}` - -This defines two templates, T1 and T2, and a third T3 that invokes the other two -when it is executed. Finally it invokes T3. If executed this template will -produce the text - - ONE TWO - -By construction, a template may reside in only one association. If it's -necessary to have a template addressable from multiple associations, the -template definition must be parsed multiple times to create distinct *Template -values, or must be copied with the Clone or AddParseTree method. - -Parse may be called multiple times to assemble the various associated templates; -see the ParseFiles and ParseGlob functions and methods for simple ways to parse -related templates stored in files. - -A template may be executed directly or through ExecuteTemplate, which executes -an associated template identified by name. To invoke our example above, we -might write, - - err := tmpl.Execute(os.Stdout, "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -or to invoke a particular template explicitly by name, - - err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -*/ -package template diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go deleted file mode 100644 index c3078e5d0c..0000000000 --- a/vendor/github.com/alecthomas/template/exec.go +++ /dev/null @@ -1,845 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "fmt" - "io" - "reflect" - "runtime" - "sort" - "strings" - - "github.com/alecthomas/template/parse" -) - -// state represents the state of an execution. It's not part of the -// template so that multiple executions of the same template -// can execute in parallel. -type state struct { - tmpl *Template - wr io.Writer - node parse.Node // current node, for errors - vars []variable // push-down stack of variable values. -} - -// variable holds the dynamic value of a variable such as $, $x etc. -type variable struct { - name string - value reflect.Value -} - -// push pushes a new variable on the stack. -func (s *state) push(name string, value reflect.Value) { - s.vars = append(s.vars, variable{name, value}) -} - -// mark returns the length of the variable stack. -func (s *state) mark() int { - return len(s.vars) -} - -// pop pops the variable stack up to the mark. -func (s *state) pop(mark int) { - s.vars = s.vars[0:mark] -} - -// setVar overwrites the top-nth variable on the stack. Used by range iterations. -func (s *state) setVar(n int, value reflect.Value) { - s.vars[len(s.vars)-n].value = value -} - -// varValue returns the value of the named variable. -func (s *state) varValue(name string) reflect.Value { - for i := s.mark() - 1; i >= 0; i-- { - if s.vars[i].name == name { - return s.vars[i].value - } - } - s.errorf("undefined variable: %s", name) - return zero -} - -var zero reflect.Value - -// at marks the state to be on node n, for error reporting. -func (s *state) at(node parse.Node) { - s.node = node -} - -// doublePercent returns the string with %'s replaced by %%, if necessary, -// so it can be used safely inside a Printf format string. -func doublePercent(str string) string { - if strings.Contains(str, "%") { - str = strings.Replace(str, "%", "%%", -1) - } - return str -} - -// errorf formats the error and terminates processing. -func (s *state) errorf(format string, args ...interface{}) { - name := doublePercent(s.tmpl.Name()) - if s.node == nil { - format = fmt.Sprintf("template: %s: %s", name, format) - } else { - location, context := s.tmpl.ErrorContext(s.node) - format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) - } - panic(fmt.Errorf(format, args...)) -} - -// errRecover is the handler that turns panics into returns from the top -// level of Parse. -func errRecover(errp *error) { - e := recover() - if e != nil { - switch err := e.(type) { - case runtime.Error: - panic(e) - case error: - *errp = err - default: - panic(e) - } - } -} - -// ExecuteTemplate applies the template associated with t that has the given name -// to the specified data object and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { - tmpl := t.tmpl[name] - if tmpl == nil { - return fmt.Errorf("template: no template %q associated with template %q", name, t.name) - } - return tmpl.Execute(wr, data) -} - -// Execute applies a parsed template to the specified data object, -// and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { - defer errRecover(&err) - value := reflect.ValueOf(data) - state := &state{ - tmpl: t, - wr: wr, - vars: []variable{{"$", value}}, - } - t.init() - if t.Tree == nil || t.Root == nil { - var b bytes.Buffer - for name, tmpl := range t.tmpl { - if tmpl.Tree == nil || tmpl.Root == nil { - continue - } - if b.Len() > 0 { - b.WriteString(", ") - } - fmt.Fprintf(&b, "%q", name) - } - var s string - if b.Len() > 0 { - s = "; defined templates are: " + b.String() - } - state.errorf("%q is an incomplete or empty template%s", t.Name(), s) - } - state.walk(value, t.Root) - return -} - -// Walk functions step through the major pieces of the template structure, -// generating output as they go. -func (s *state) walk(dot reflect.Value, node parse.Node) { - s.at(node) - switch node := node.(type) { - case *parse.ActionNode: - // Do not pop variables so they persist until next end. - // Also, if the action declares variables, don't print the result. - val := s.evalPipeline(dot, node.Pipe) - if len(node.Pipe.Decl) == 0 { - s.printValue(node, val) - } - case *parse.IfNode: - s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) - case *parse.ListNode: - for _, node := range node.Nodes { - s.walk(dot, node) - } - case *parse.RangeNode: - s.walkRange(dot, node) - case *parse.TemplateNode: - s.walkTemplate(dot, node) - case *parse.TextNode: - if _, err := s.wr.Write(node.Text); err != nil { - s.errorf("%s", err) - } - case *parse.WithNode: - s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) - default: - s.errorf("unknown node: %s", node) - } -} - -// walkIfOrWith walks an 'if' or 'with' node. The two control structures -// are identical in behavior except that 'with' sets dot. -func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { - defer s.pop(s.mark()) - val := s.evalPipeline(dot, pipe) - truth, ok := isTrue(val) - if !ok { - s.errorf("if/with can't use %v", val) - } - if truth { - if typ == parse.NodeWith { - s.walk(val, list) - } else { - s.walk(dot, list) - } - } else if elseList != nil { - s.walk(dot, elseList) - } -} - -// isTrue reports whether the value is 'true', in the sense of not the zero of its type, -// and whether the value has a meaningful truth value. -func isTrue(val reflect.Value) (truth, ok bool) { - if !val.IsValid() { - // Something like var x interface{}, never set. It's a form of nil. - return false, true - } - switch val.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - truth = val.Len() > 0 - case reflect.Bool: - truth = val.Bool() - case reflect.Complex64, reflect.Complex128: - truth = val.Complex() != 0 - case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: - truth = !val.IsNil() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - truth = val.Int() != 0 - case reflect.Float32, reflect.Float64: - truth = val.Float() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - truth = val.Uint() != 0 - case reflect.Struct: - truth = true // Struct values are always true. - default: - return - } - return truth, true -} - -func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { - s.at(r) - defer s.pop(s.mark()) - val, _ := indirect(s.evalPipeline(dot, r.Pipe)) - // mark top of stack before any variables in the body are pushed. - mark := s.mark() - oneIteration := func(index, elem reflect.Value) { - // Set top var (lexically the second if there are two) to the element. - if len(r.Pipe.Decl) > 0 { - s.setVar(1, elem) - } - // Set next var (lexically the first if there are two) to the index. - if len(r.Pipe.Decl) > 1 { - s.setVar(2, index) - } - s.walk(elem, r.List) - s.pop(mark) - } - switch val.Kind() { - case reflect.Array, reflect.Slice: - if val.Len() == 0 { - break - } - for i := 0; i < val.Len(); i++ { - oneIteration(reflect.ValueOf(i), val.Index(i)) - } - return - case reflect.Map: - if val.Len() == 0 { - break - } - for _, key := range sortKeys(val.MapKeys()) { - oneIteration(key, val.MapIndex(key)) - } - return - case reflect.Chan: - if val.IsNil() { - break - } - i := 0 - for ; ; i++ { - elem, ok := val.Recv() - if !ok { - break - } - oneIteration(reflect.ValueOf(i), elem) - } - if i == 0 { - break - } - return - case reflect.Invalid: - break // An invalid value is likely a nil map, etc. and acts like an empty map. - default: - s.errorf("range can't iterate over %v", val) - } - if r.ElseList != nil { - s.walk(dot, r.ElseList) - } -} - -func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { - s.at(t) - tmpl := s.tmpl.tmpl[t.Name] - if tmpl == nil { - s.errorf("template %q not defined", t.Name) - } - // Variables declared by the pipeline persist. - dot = s.evalPipeline(dot, t.Pipe) - newState := *s - newState.tmpl = tmpl - // No dynamic scoping: template invocations inherit no variables. - newState.vars = []variable{{"$", dot}} - newState.walk(dot, tmpl.Root) -} - -// Eval functions evaluate pipelines, commands, and their elements and extract -// values from the data structure by examining fields, calling methods, and so on. -// The printing of those values happens only through walk functions. - -// evalPipeline returns the value acquired by evaluating a pipeline. If the -// pipeline has a variable declaration, the variable will be pushed on the -// stack. Callers should therefore pop the stack after they are finished -// executing commands depending on the pipeline value. -func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { - if pipe == nil { - return - } - s.at(pipe) - for _, cmd := range pipe.Cmds { - value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. - // If the object has type interface{}, dig down one level to the thing inside. - if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { - value = reflect.ValueOf(value.Interface()) // lovely! - } - } - for _, variable := range pipe.Decl { - s.push(variable.Ident[0], value) - } - return value -} - -func (s *state) notAFunction(args []parse.Node, final reflect.Value) { - if len(args) > 1 || final.IsValid() { - s.errorf("can't give argument to non-function %s", args[0]) - } -} - -func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { - firstWord := cmd.Args[0] - switch n := firstWord.(type) { - case *parse.FieldNode: - return s.evalFieldNode(dot, n, cmd.Args, final) - case *parse.ChainNode: - return s.evalChainNode(dot, n, cmd.Args, final) - case *parse.IdentifierNode: - // Must be a function. - return s.evalFunction(dot, n, cmd, cmd.Args, final) - case *parse.PipeNode: - // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. - return s.evalPipeline(dot, n) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, cmd.Args, final) - } - s.at(firstWord) - s.notAFunction(cmd.Args, final) - switch word := firstWord.(type) { - case *parse.BoolNode: - return reflect.ValueOf(word.True) - case *parse.DotNode: - return dot - case *parse.NilNode: - s.errorf("nil is not a command") - case *parse.NumberNode: - return s.idealConstant(word) - case *parse.StringNode: - return reflect.ValueOf(word.Text) - } - s.errorf("can't evaluate command %q", firstWord) - panic("not reached") -} - -// idealConstant is called to return the value of a number in a context where -// we don't know the type. In that case, the syntax of the number tells us -// its type, and we use Go rules to resolve. Note there is no such thing as -// a uint ideal constant in this situation - the value must be of int type. -func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { - // These are ideal constants but we don't know the type - // and we have no context. (If it was a method argument, - // we'd know what we need.) The syntax guides us to some extent. - s.at(constant) - switch { - case constant.IsComplex: - return reflect.ValueOf(constant.Complex128) // incontrovertible. - case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: - return reflect.ValueOf(constant.Float64) - case constant.IsInt: - n := int(constant.Int64) - if int64(n) != constant.Int64 { - s.errorf("%s overflows int", constant.Text) - } - return reflect.ValueOf(n) - case constant.IsUint: - s.errorf("%s overflows int", constant.Text) - } - return zero -} - -func isHexConstant(s string) bool { - return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') -} - -func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(field) - return s.evalFieldChain(dot, dot, field, field.Ident, args, final) -} - -func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(chain) - // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. - pipe := s.evalArg(dot, nil, chain.Node) - if len(chain.Field) == 0 { - s.errorf("internal error: no fields in evalChainNode") - } - return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) -} - -func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { - // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. - s.at(variable) - value := s.varValue(variable.Ident[0]) - if len(variable.Ident) == 1 { - s.notAFunction(args, final) - return value - } - return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) -} - -// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. -// dot is the environment in which to evaluate arguments, while -// receiver is the value being walked along the chain. -func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { - n := len(ident) - for i := 0; i < n-1; i++ { - receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) - } - // Now if it's a method, it gets the arguments. - return s.evalField(dot, ident[n-1], node, args, final, receiver) -} - -func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { - s.at(node) - name := node.Ident - function, ok := findFunction(name, s.tmpl) - if !ok { - s.errorf("%q is not a defined function", name) - } - return s.evalCall(dot, function, cmd, name, args, final) -} - -// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). -// The 'final' argument represents the return value from the preceding -// value of the pipeline, if any. -func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { - if !receiver.IsValid() { - return zero - } - typ := receiver.Type() - receiver, _ = indirect(receiver) - // Unless it's an interface, need to get to a value of type *T to guarantee - // we see all methods of T and *T. - ptr := receiver - if ptr.Kind() != reflect.Interface && ptr.CanAddr() { - ptr = ptr.Addr() - } - if method := ptr.MethodByName(fieldName); method.IsValid() { - return s.evalCall(dot, method, node, fieldName, args, final) - } - hasArgs := len(args) > 1 || final.IsValid() - // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. - receiver, isNil := indirect(receiver) - if isNil { - s.errorf("nil pointer evaluating %s.%s", typ, fieldName) - } - switch receiver.Kind() { - case reflect.Struct: - tField, ok := receiver.Type().FieldByName(fieldName) - if ok { - field := receiver.FieldByIndex(tField.Index) - if tField.PkgPath != "" { // field is unexported - s.errorf("%s is an unexported field of struct type %s", fieldName, typ) - } - // If it's a function, we must call it. - if hasArgs { - s.errorf("%s has arguments but cannot be invoked as function", fieldName) - } - return field - } - s.errorf("%s is not a field of struct type %s", fieldName, typ) - case reflect.Map: - // If it's a map, attempt to use the field name as a key. - nameVal := reflect.ValueOf(fieldName) - if nameVal.Type().AssignableTo(receiver.Type().Key()) { - if hasArgs { - s.errorf("%s is not a method but has arguments", fieldName) - } - return receiver.MapIndex(nameVal) - } - } - s.errorf("can't evaluate field %s in type %s", fieldName, typ) - panic("not reached") -} - -var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -) - -// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so -// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] -// as the function itself. -func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { - if args != nil { - args = args[1:] // Zeroth arg is function name/node; not passed to function. - } - typ := fun.Type() - numIn := len(args) - if final.IsValid() { - numIn++ - } - numFixed := len(args) - if typ.IsVariadic() { - numFixed = typ.NumIn() - 1 // last arg is the variadic one. - if numIn < numFixed { - s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) - } - } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { - s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) - } - if !goodFunc(typ) { - // TODO: This could still be a confusing error; maybe goodFunc should provide info. - s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) - } - // Build the arg list. - argv := make([]reflect.Value, numIn) - // Args must be evaluated. Fixed args first. - i := 0 - for ; i < numFixed && i < len(args); i++ { - argv[i] = s.evalArg(dot, typ.In(i), args[i]) - } - // Now the ... args. - if typ.IsVariadic() { - argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. - for ; i < len(args); i++ { - argv[i] = s.evalArg(dot, argType, args[i]) - } - } - // Add final value if necessary. - if final.IsValid() { - t := typ.In(typ.NumIn() - 1) - if typ.IsVariadic() { - t = t.Elem() - } - argv[i] = s.validateType(final, t) - } - result := fun.Call(argv) - // If we have an error that is not nil, stop execution and return that error to the caller. - if len(result) == 2 && !result[1].IsNil() { - s.at(node) - s.errorf("error calling %s: %s", name, result[1].Interface().(error)) - } - return result[0] -} - -// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. -func canBeNil(typ reflect.Type) bool { - switch typ.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -// validateType guarantees that the value is valid and assignable to the type. -func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { - if !value.IsValid() { - if typ == nil || canBeNil(typ) { - // An untyped nil interface{}. Accept as a proper nil value. - return reflect.Zero(typ) - } - s.errorf("invalid value; expected %s", typ) - } - if typ != nil && !value.Type().AssignableTo(typ) { - if value.Kind() == reflect.Interface && !value.IsNil() { - value = value.Elem() - if value.Type().AssignableTo(typ) { - return value - } - // fallthrough - } - // Does one dereference or indirection work? We could do more, as we - // do with method receivers, but that gets messy and method receivers - // are much more constrained, so it makes more sense there than here. - // Besides, one is almost always all you need. - switch { - case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): - value = value.Elem() - if !value.IsValid() { - s.errorf("dereference of nil pointer of type %s", typ) - } - case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): - value = value.Addr() - default: - s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) - } - } - return value -} - -func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - switch arg := n.(type) { - case *parse.DotNode: - return s.validateType(dot, typ) - case *parse.NilNode: - if canBeNil(typ) { - return reflect.Zero(typ) - } - s.errorf("cannot assign nil to %s", typ) - case *parse.FieldNode: - return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) - case *parse.VariableNode: - return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) - case *parse.PipeNode: - return s.validateType(s.evalPipeline(dot, arg), typ) - case *parse.IdentifierNode: - return s.evalFunction(dot, arg, arg, nil, zero) - case *parse.ChainNode: - return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) - } - switch typ.Kind() { - case reflect.Bool: - return s.evalBool(typ, n) - case reflect.Complex64, reflect.Complex128: - return s.evalComplex(typ, n) - case reflect.Float32, reflect.Float64: - return s.evalFloat(typ, n) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return s.evalInteger(typ, n) - case reflect.Interface: - if typ.NumMethod() == 0 { - return s.evalEmptyInterface(dot, n) - } - case reflect.String: - return s.evalString(typ, n) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return s.evalUnsignedInteger(typ, n) - } - s.errorf("can't handle %s for arg of type %s", n, typ) - panic("not reached") -} - -func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.BoolNode); ok { - value := reflect.New(typ).Elem() - value.SetBool(n.True) - return value - } - s.errorf("expected bool; found %s", n) - panic("not reached") -} - -func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.StringNode); ok { - value := reflect.New(typ).Elem() - value.SetString(n.Text) - return value - } - s.errorf("expected string; found %s", n) - panic("not reached") -} - -func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsInt { - value := reflect.New(typ).Elem() - value.SetInt(n.Int64) - return value - } - s.errorf("expected integer; found %s", n) - panic("not reached") -} - -func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsUint { - value := reflect.New(typ).Elem() - value.SetUint(n.Uint64) - return value - } - s.errorf("expected unsigned integer; found %s", n) - panic("not reached") -} - -func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { - value := reflect.New(typ).Elem() - value.SetFloat(n.Float64) - return value - } - s.errorf("expected float; found %s", n) - panic("not reached") -} - -func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { - if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { - value := reflect.New(typ).Elem() - value.SetComplex(n.Complex128) - return value - } - s.errorf("expected complex; found %s", n) - panic("not reached") -} - -func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { - s.at(n) - switch n := n.(type) { - case *parse.BoolNode: - return reflect.ValueOf(n.True) - case *parse.DotNode: - return dot - case *parse.FieldNode: - return s.evalFieldNode(dot, n, nil, zero) - case *parse.IdentifierNode: - return s.evalFunction(dot, n, n, nil, zero) - case *parse.NilNode: - // NilNode is handled in evalArg, the only place that calls here. - s.errorf("evalEmptyInterface: nil (can't happen)") - case *parse.NumberNode: - return s.idealConstant(n) - case *parse.StringNode: - return reflect.ValueOf(n.Text) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, nil, zero) - case *parse.PipeNode: - return s.evalPipeline(dot, n) - } - s.errorf("can't handle assignment of %s to empty interface argument", n) - panic("not reached") -} - -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// printValue writes the textual representation of the value to the output of -// the template. -func (s *state) printValue(n parse.Node, v reflect.Value) { - s.at(n) - iface, ok := printableValue(v) - if !ok { - s.errorf("can't print %s of type %s", n, v.Type()) - } - fmt.Fprint(s.wr, iface) -} - -// printableValue returns the, possibly indirected, interface value inside v that -// is best for a call to formatted printer. -func printableValue(v reflect.Value) (interface{}, bool) { - if v.Kind() == reflect.Ptr { - v, _ = indirect(v) // fmt.Fprint handles nil. - } - if !v.IsValid() { - return "", true - } - - if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { - if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { - v = v.Addr() - } else { - switch v.Kind() { - case reflect.Chan, reflect.Func: - return nil, false - } - } - } - return v.Interface(), true -} - -// Types to help sort the keys in a map for reproducible output. - -type rvs []reflect.Value - -func (x rvs) Len() int { return len(x) } -func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -type rvInts struct{ rvs } - -func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } - -type rvUints struct{ rvs } - -func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } - -type rvFloats struct{ rvs } - -func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } - -type rvStrings struct{ rvs } - -func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } - -// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. -func sortKeys(v []reflect.Value) []reflect.Value { - if len(v) <= 1 { - return v - } - switch v[0].Kind() { - case reflect.Float32, reflect.Float64: - sort.Sort(rvFloats{v}) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - sort.Sort(rvInts{v}) - case reflect.String: - sort.Sort(rvStrings{v}) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - sort.Sort(rvUints{v}) - } - return v -} diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go deleted file mode 100644 index 39ee5ed68f..0000000000 --- a/vendor/github.com/alecthomas/template/funcs.go +++ /dev/null @@ -1,598 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/url" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// FuncMap is the type of the map defining the mapping from names to functions. -// Each function must have either a single return value, or two return values of -// which the second has type error. In that case, if the second (error) -// return value evaluates to non-nil during execution, execution terminates and -// Execute returns that error. -type FuncMap map[string]interface{} - -var builtins = FuncMap{ - "and": and, - "call": call, - "html": HTMLEscaper, - "index": index, - "js": JSEscaper, - "len": length, - "not": not, - "or": or, - "print": fmt.Sprint, - "printf": fmt.Sprintf, - "println": fmt.Sprintln, - "urlquery": URLQueryEscaper, - - // Comparisons - "eq": eq, // == - "ge": ge, // >= - "gt": gt, // > - "le": le, // <= - "lt": lt, // < - "ne": ne, // != -} - -var builtinFuncs = createValueFuncs(builtins) - -// createValueFuncs turns a FuncMap into a map[string]reflect.Value -func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { - m := make(map[string]reflect.Value) - addValueFuncs(m, funcMap) - return m -} - -// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. -func addValueFuncs(out map[string]reflect.Value, in FuncMap) { - for name, fn := range in { - v := reflect.ValueOf(fn) - if v.Kind() != reflect.Func { - panic("value for " + name + " not a function") - } - if !goodFunc(v.Type()) { - panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) - } - out[name] = v - } -} - -// addFuncs adds to values the functions in funcs. It does no checking of the input - -// call addValueFuncs first. -func addFuncs(out, in FuncMap) { - for name, fn := range in { - out[name] = fn - } -} - -// goodFunc checks that the function or method has the right result signature. -func goodFunc(typ reflect.Type) bool { - // We allow functions with 1 result or 2 results where the second is an error. - switch { - case typ.NumOut() == 1: - return true - case typ.NumOut() == 2 && typ.Out(1) == errorType: - return true - } - return false -} - -// findFunction looks for a function in the template, and global map. -func findFunction(name string, tmpl *Template) (reflect.Value, bool) { - if tmpl != nil && tmpl.common != nil { - if fn := tmpl.execFuncs[name]; fn.IsValid() { - return fn, true - } - } - if fn := builtinFuncs[name]; fn.IsValid() { - return fn, true - } - return reflect.Value{}, false -} - -// Indexing. - -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func index(item interface{}, indices ...interface{}) (interface{}, error) { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return nil, fmt.Errorf("index of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) - } - if x < 0 || x >= int64(v.Len()) { - return nil, fmt.Errorf("index out of range: %d", x) - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return nil, fmt.Errorf("can't index item of type %s", v.Type()) - } - } - return v.Interface(), nil -} - -// Length - -// length returns the length of the item, with an error if it has no defined length. -func length(item interface{}) (int, error) { - v, isNil := indirect(reflect.ValueOf(item)) - if isNil { - return 0, fmt.Errorf("len of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len(), nil - } - return 0, fmt.Errorf("len of type %s", v.Type()) -} - -// Function invocation - -// call returns the result of evaluating the first argument as a function. -// The function must return 1 result, or 2 results, the second of which is an error. -func call(fn interface{}, args ...interface{}) (interface{}, error) { - v := reflect.ValueOf(fn) - typ := v.Type() - if typ.Kind() != reflect.Func { - return nil, fmt.Errorf("non-function of type %s", typ) - } - if !goodFunc(typ) { - return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) - } - numIn := typ.NumIn() - var dddType reflect.Type - if typ.IsVariadic() { - if len(args) < numIn-1 { - return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) - } - dddType = typ.In(numIn - 1).Elem() - } else { - if len(args) != numIn { - return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) - } - } - argv := make([]reflect.Value, len(args)) - for i, arg := range args { - value := reflect.ValueOf(arg) - // Compute the expected type. Clumsy because of variadics. - var argType reflect.Type - if !typ.IsVariadic() || i < numIn-1 { - argType = typ.In(i) - } else { - argType = dddType - } - if !value.IsValid() && canBeNil(argType) { - value = reflect.Zero(argType) - } - if !value.Type().AssignableTo(argType) { - return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) - } - argv[i] = value - } - result := v.Call(argv) - if len(result) == 2 && !result[1].IsNil() { - return result[0].Interface(), result[1].Interface().(error) - } - return result[0].Interface(), nil -} - -// Boolean logic. - -func truth(a interface{}) bool { - t, _ := isTrue(reflect.ValueOf(a)) - return t -} - -// and computes the Boolean AND of its arguments, returning -// the first false argument it encounters, or the last argument. -func and(arg0 interface{}, args ...interface{}) interface{} { - if !truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if !truth(arg0) { - break - } - } - return arg0 -} - -// or computes the Boolean OR of its arguments, returning -// the first true argument it encounters, or the last argument. -func or(arg0 interface{}, args ...interface{}) interface{} { - if truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if truth(arg0) { - break - } - } - return arg0 -} - -// not returns the Boolean negation of its argument. -func not(arg interface{}) (truth bool) { - truth, _ = isTrue(reflect.ValueOf(arg)) - return !truth -} - -// Comparison. - -// TODO: Perhaps allow comparison between signed and unsigned integers. - -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - integerKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// eq evaluates the comparison a == b || a == c || ... -func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - } - if truth { - return true, nil - } - } - return false, nil -} - -// ne evaluates the comparison a != b. -func ne(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := eq(arg1, arg2) - return !equal, err -} - -// lt evaluates the comparison a < b. -func lt(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - } - return truth, nil -} - -// le evaluates the comparison <= b. -func le(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := lt(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return eq(arg1, arg2) -} - -// gt evaluates the comparison a > b. -func gt(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := le(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// ge evaluates the comparison a >= b. -func ge(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := lt(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} - -// HTML escaping. - -var ( - htmlQuot = []byte(""") // shorter than """ - htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 - htmlAmp = []byte("&") - htmlLt = []byte("<") - htmlGt = []byte(">") -) - -// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. -func HTMLEscape(w io.Writer, b []byte) { - last := 0 - for i, c := range b { - var html []byte - switch c { - case '"': - html = htmlQuot - case '\'': - html = htmlApos - case '&': - html = htmlAmp - case '<': - html = htmlLt - case '>': - html = htmlGt - default: - continue - } - w.Write(b[last:i]) - w.Write(html) - last = i + 1 - } - w.Write(b[last:]) -} - -// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. -func HTMLEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexAny(s, `'"&<>`) < 0 { - return s - } - var b bytes.Buffer - HTMLEscape(&b, []byte(s)) - return b.String() -} - -// HTMLEscaper returns the escaped HTML equivalent of the textual -// representation of its arguments. -func HTMLEscaper(args ...interface{}) string { - return HTMLEscapeString(evalArgs(args)) -} - -// JavaScript escaping. - -var ( - jsLowUni = []byte(`\u00`) - hex = []byte("0123456789ABCDEF") - - jsBackslash = []byte(`\\`) - jsApos = []byte(`\'`) - jsQuot = []byte(`\"`) - jsLt = []byte(`\x3C`) - jsGt = []byte(`\x3E`) -) - -// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. -func JSEscape(w io.Writer, b []byte) { - last := 0 - for i := 0; i < len(b); i++ { - c := b[i] - - if !jsIsSpecial(rune(c)) { - // fast path: nothing to do - continue - } - w.Write(b[last:i]) - - if c < utf8.RuneSelf { - // Quotes, slashes and angle brackets get quoted. - // Control characters get written as \u00XX. - switch c { - case '\\': - w.Write(jsBackslash) - case '\'': - w.Write(jsApos) - case '"': - w.Write(jsQuot) - case '<': - w.Write(jsLt) - case '>': - w.Write(jsGt) - default: - w.Write(jsLowUni) - t, b := c>>4, c&0x0f - w.Write(hex[t : t+1]) - w.Write(hex[b : b+1]) - } - } else { - // Unicode rune. - r, size := utf8.DecodeRune(b[i:]) - if unicode.IsPrint(r) { - w.Write(b[i : i+size]) - } else { - fmt.Fprintf(w, "\\u%04X", r) - } - i += size - 1 - } - last = i + 1 - } - w.Write(b[last:]) -} - -// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. -func JSEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexFunc(s, jsIsSpecial) < 0 { - return s - } - var b bytes.Buffer - JSEscape(&b, []byte(s)) - return b.String() -} - -func jsIsSpecial(r rune) bool { - switch r { - case '\\', '\'', '"', '<', '>': - return true - } - return r < ' ' || utf8.RuneSelf <= r -} - -// JSEscaper returns the escaped JavaScript equivalent of the textual -// representation of its arguments. -func JSEscaper(args ...interface{}) string { - return JSEscapeString(evalArgs(args)) -} - -// URLQueryEscaper returns the escaped value of the textual representation of -// its arguments in a form suitable for embedding in a URL query. -func URLQueryEscaper(args ...interface{}) string { - return url.QueryEscape(evalArgs(args)) -} - -// evalArgs formats the list of arguments into a string. It is therefore equivalent to -// fmt.Sprint(args...) -// except that each argument is indirected (if a pointer), as required, -// using the same rules as the default string evaluation during template -// execution. -func evalArgs(args []interface{}) string { - ok := false - var s string - // Fast path for simple common case. - if len(args) == 1 { - s, ok = args[0].(string) - } - if !ok { - for i, arg := range args { - a, ok := printableValue(reflect.ValueOf(arg)) - if ok { - args[i] = a - } // else left fmt do its thing - } - s = fmt.Sprint(args...) - } - return s -} diff --git a/vendor/github.com/alecthomas/template/go.mod b/vendor/github.com/alecthomas/template/go.mod deleted file mode 100644 index a70670ae21..0000000000 --- a/vendor/github.com/alecthomas/template/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/alecthomas/template diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go deleted file mode 100644 index 3636fb54d6..0000000000 --- a/vendor/github.com/alecthomas/template/helper.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Helper functions to make constructing templates easier. - -package template - -import ( - "fmt" - "io/ioutil" - "path/filepath" -) - -// Functions and methods to parse templates. - -// Must is a helper that wraps a call to a function returning (*Template, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// var t = template.Must(template.New("name").Parse("text")) -func Must(t *Template, err error) *Template { - if err != nil { - panic(err) - } - return t -} - -// ParseFiles creates a new Template and parses the template definitions from -// the named files. The returned template's name will have the (base) name and -// (parsed) contents of the first file. There must be at least one file. -// If an error occurs, parsing stops and the returned *Template is nil. -func ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(nil, filenames...) -} - -// ParseFiles parses the named files and associates the resulting templates with -// t. If an error occurs, parsing stops and the returned template is nil; -// otherwise it is t. There must be at least one file. -func (t *Template) ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(t, filenames...) -} - -// parseFiles is the helper for the method and function. If the argument -// template is nil, it is created from the first file. -func parseFiles(t *Template, filenames ...string) (*Template, error) { - if len(filenames) == 0 { - // Not really a problem, but be consistent. - return nil, fmt.Errorf("template: no files named in call to ParseFiles") - } - for _, filename := range filenames { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - s := string(b) - name := filepath.Base(filename) - // First template becomes return value if not already defined, - // and we use that one for subsequent New calls to associate - // all the templates together. Also, if this file has the same name - // as t, this file becomes the contents of t, so - // t, err := New(name).Funcs(xxx).ParseFiles(name) - // works. Otherwise we create a new template associated with t. - var tmpl *Template - if t == nil { - t = New(name) - } - if name == t.Name() { - tmpl = t - } else { - tmpl = t.New(name) - } - _, err = tmpl.Parse(s) - if err != nil { - return nil, err - } - } - return t, nil -} - -// ParseGlob creates a new Template and parses the template definitions from the -// files identified by the pattern, which must match at least one file. The -// returned template will have the (base) name and (parsed) contents of the -// first file matched by the pattern. ParseGlob is equivalent to calling -// ParseFiles with the list of files matched by the pattern. -func ParseGlob(pattern string) (*Template, error) { - return parseGlob(nil, pattern) -} - -// ParseGlob parses the template definitions in the files identified by the -// pattern and associates the resulting templates with t. The pattern is -// processed by filepath.Glob and must match at least one file. ParseGlob is -// equivalent to calling t.ParseFiles with the list of files matched by the -// pattern. -func (t *Template) ParseGlob(pattern string) (*Template, error) { - return parseGlob(t, pattern) -} - -// parseGlob is the implementation of the function and method ParseGlob. -func parseGlob(t *Template, pattern string) (*Template, error) { - filenames, err := filepath.Glob(pattern) - if err != nil { - return nil, err - } - if len(filenames) == 0 { - return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) - } - return parseFiles(t, filenames...) -} diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go deleted file mode 100644 index 55f1c051e8..0000000000 --- a/vendor/github.com/alecthomas/template/parse/lex.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package parse - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos Pos // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case i.typ > itemKeyword: - return fmt.Sprintf("<%s>", i.val) - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemBool // boolean constant - itemChar // printable ASCII character; grab bag for comma etc. - itemCharConstant // character constant - itemComplex // complex constant (1+2i); imaginary is just a number - itemColonEquals // colon-equals (':=') introducing a declaration - itemEOF - itemField // alphanumeric identifier starting with '.' - itemIdentifier // alphanumeric identifier not starting with '.' - itemLeftDelim // left action delimiter - itemLeftParen // '(' inside action - itemNumber // simple number, including imaginary - itemPipe // pipe symbol - itemRawString // raw quoted string (includes quotes) - itemRightDelim // right action delimiter - itemElideNewline // elide newline after right delim - itemRightParen // ')' inside action - itemSpace // run of spaces separating arguments - itemString // quoted string (includes quotes) - itemText // plain text - itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' - // Keywords appear after all the rest. - itemKeyword // used only to delimit the keywords - itemDot // the cursor, spelled '.' - itemDefine // define keyword - itemElse // else keyword - itemEnd // end keyword - itemIf // if keyword - itemNil // the untyped nil constant, easiest to treat as a keyword - itemRange // range keyword - itemTemplate // template keyword - itemWith // with keyword -) - -var key = map[string]itemType{ - ".": itemDot, - "define": itemDefine, - "else": itemElse, - "end": itemEnd, - "if": itemIf, - "range": itemRange, - "nil": itemNil, - "template": itemTemplate, - "with": itemWith, -} - -const eof = -1 - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - name string // the name of the input; used only for error reports - input string // the string being scanned - leftDelim string // start of action - rightDelim string // end of action - state stateFn // the next lexing function to enter - pos Pos // current position in the input - start Pos // start position of this item - width Pos // width of last rune read from input - lastPos Pos // position of most recent item returned by nextItem - items chan item // channel of scanned items - parenDepth int // nesting depth of ( ) exprs -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if int(l.pos) >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = Pos(w) - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - l.items <- item{t, l.start, l.input[l.start:l.pos]} - l.start = l.pos -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.IndexRune(valid, l.next()) >= 0 { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.IndexRune(valid, l.next()) >= 0 { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - item := <-l.items - l.lastPos = item.pos - return item -} - -// lex creates a new scanner for the input string. -func lex(name, input, left, right string) *lexer { - if left == "" { - left = leftDelim - } - if right == "" { - right = rightDelim - } - l := &lexer{ - name: name, - input: input, - leftDelim: left, - rightDelim: right, - items: make(chan item), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexText; l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -const ( - leftDelim = "{{" - rightDelim = "}}" - leftComment = "/*" - rightComment = "*/" -) - -// lexText scans until an opening action delimiter, "{{". -func lexText(l *lexer) stateFn { - for { - if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { - if l.pos > l.start { - l.emit(itemText) - } - return lexLeftDelim - } - if l.next() == eof { - break - } - } - // Correctly reached EOF. - if l.pos > l.start { - l.emit(itemText) - } - l.emit(itemEOF) - return nil -} - -// lexLeftDelim scans the left delimiter, which is known to be present. -func lexLeftDelim(l *lexer) stateFn { - l.pos += Pos(len(l.leftDelim)) - if strings.HasPrefix(l.input[l.pos:], leftComment) { - return lexComment - } - l.emit(itemLeftDelim) - l.parenDepth = 0 - return lexInsideAction -} - -// lexComment scans a comment. The left comment marker is known to be present. -func lexComment(l *lexer) stateFn { - l.pos += Pos(len(leftComment)) - i := strings.Index(l.input[l.pos:], rightComment) - if i < 0 { - return l.errorf("unclosed comment") - } - l.pos += Pos(i + len(rightComment)) - if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - return l.errorf("comment ends before closing delimiter") - - } - l.pos += Pos(len(l.rightDelim)) - l.ignore() - return lexText -} - -// lexRightDelim scans the right delimiter, which is known to be present. -func lexRightDelim(l *lexer) stateFn { - l.pos += Pos(len(l.rightDelim)) - l.emit(itemRightDelim) - if l.peek() == '\\' { - l.pos++ - l.emit(itemElideNewline) - } - return lexText -} - -// lexInsideAction scans the elements inside action delimiters. -func lexInsideAction(l *lexer) stateFn { - // Either number, quoted string, or identifier. - // Spaces separate arguments; runs of spaces turn into itemSpace. - // Pipe symbols separate and are emitted. - if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - if l.parenDepth == 0 { - return lexRightDelim - } - return l.errorf("unclosed left paren") - } - switch r := l.next(); { - case r == eof || isEndOfLine(r): - return l.errorf("unclosed action") - case isSpace(r): - return lexSpace - case r == ':': - if l.next() != '=' { - return l.errorf("expected :=") - } - l.emit(itemColonEquals) - case r == '|': - l.emit(itemPipe) - case r == '"': - return lexQuote - case r == '`': - return lexRawQuote - case r == '$': - return lexVariable - case r == '\'': - return lexChar - case r == '.': - // special look-ahead for ".field" so we don't break l.backup(). - if l.pos < Pos(len(l.input)) { - r := l.input[l.pos] - if r < '0' || '9' < r { - return lexField - } - } - fallthrough // '.' can start a number. - case r == '+' || r == '-' || ('0' <= r && r <= '9'): - l.backup() - return lexNumber - case isAlphaNumeric(r): - l.backup() - return lexIdentifier - case r == '(': - l.emit(itemLeftParen) - l.parenDepth++ - return lexInsideAction - case r == ')': - l.emit(itemRightParen) - l.parenDepth-- - if l.parenDepth < 0 { - return l.errorf("unexpected right paren %#U", r) - } - return lexInsideAction - case r <= unicode.MaxASCII && unicode.IsPrint(r): - l.emit(itemChar) - return lexInsideAction - default: - return l.errorf("unrecognized character in action: %#U", r) - } - return lexInsideAction -} - -// lexSpace scans a run of space characters. -// One space has already been seen. -func lexSpace(l *lexer) stateFn { - for isSpace(l.peek()) { - l.next() - } - l.emit(itemSpace) - return lexInsideAction -} - -// lexIdentifier scans an alphanumeric. -func lexIdentifier(l *lexer) stateFn { -Loop: - for { - switch r := l.next(); { - case isAlphaNumeric(r): - // absorb. - default: - l.backup() - word := l.input[l.start:l.pos] - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - switch { - case key[word] > itemKeyword: - l.emit(key[word]) - case word[0] == '.': - l.emit(itemField) - case word == "true", word == "false": - l.emit(itemBool) - default: - l.emit(itemIdentifier) - } - break Loop - } - } - return lexInsideAction -} - -// lexField scans a field: .Alphanumeric. -// The . has been scanned. -func lexField(l *lexer) stateFn { - return lexFieldOrVariable(l, itemField) -} - -// lexVariable scans a Variable: $Alphanumeric. -// The $ has been scanned. -func lexVariable(l *lexer) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "$". - l.emit(itemVariable) - return lexInsideAction - } - return lexFieldOrVariable(l, itemVariable) -} - -// lexVariable scans a field or variable: [.$]Alphanumeric. -// The . or $ has been scanned. -func lexFieldOrVariable(l *lexer, typ itemType) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "." or "$". - if typ == itemVariable { - l.emit(itemVariable) - } else { - l.emit(itemDot) - } - return lexInsideAction - } - var r rune - for { - r = l.next() - if !isAlphaNumeric(r) { - l.backup() - break - } - } - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - l.emit(typ) - return lexInsideAction -} - -// atTerminator reports whether the input is at valid termination character to -// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases -// like "$x+2" not being acceptable without a space, in case we decide one -// day to implement arithmetic. -func (l *lexer) atTerminator() bool { - r := l.peek() - if isSpace(r) || isEndOfLine(r) { - return true - } - switch r { - case eof, '.', ',', '|', ':', ')', '(': - return true - } - // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will - // succeed but should fail) but only in extremely rare cases caused by willfully - // bad choice of delimiter. - if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { - return true - } - return false -} - -// lexChar scans a character constant. The initial quote is already -// scanned. Syntax checking is done by the parser. -func lexChar(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated character constant") - case '\'': - break Loop - } - } - l.emit(itemCharConstant) - return lexInsideAction -} - -// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This -// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" -// and "089" - but when it's wrong the input is invalid and the parser (via -// strconv) will notice. -func lexNumber(l *lexer) stateFn { - if !l.scanNumber() { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - if sign := l.peek(); sign == '+' || sign == '-' { - // Complex: 1+2i. No spaces, must end in 'i'. - if !l.scanNumber() || l.input[l.pos-1] != 'i' { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - l.emit(itemComplex) - } else { - l.emit(itemNumber) - } - return lexInsideAction -} - -func (l *lexer) scanNumber() bool { - // Optional leading sign. - l.accept("+-") - // Is it hex? - digits := "0123456789" - if l.accept("0") && l.accept("xX") { - digits = "0123456789abcdefABCDEF" - } - l.acceptRun(digits) - if l.accept(".") { - l.acceptRun(digits) - } - if l.accept("eE") { - l.accept("+-") - l.acceptRun("0123456789") - } - // Is it imaginary? - l.accept("i") - // Next thing mustn't be alphanumeric. - if isAlphaNumeric(l.peek()) { - l.next() - return false - } - return true -} - -// lexQuote scans a quoted string. -func lexQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated quoted string") - case '"': - break Loop - } - } - l.emit(itemString) - return lexInsideAction -} - -// lexRawQuote scans a raw quoted string. -func lexRawQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case eof, '\n': - return l.errorf("unterminated raw quoted string") - case '`': - break Loop - } - } - l.emit(itemRawString) - return lexInsideAction -} - -// isSpace reports whether r is a space character. -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go deleted file mode 100644 index 55c37f6dba..0000000000 --- a/vendor/github.com/alecthomas/template/parse/node.go +++ /dev/null @@ -1,834 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Parse nodes. - -package parse - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -var textFormat = "%s" // Changed to "%q" in tests for better error messages. - -// A Node is an element in the parse tree. The interface is trivial. -// The interface contains an unexported method so that only -// types local to this package can satisfy it. -type Node interface { - Type() NodeType - String() string - // Copy does a deep copy of the Node and all its components. - // To avoid type assertions, some XxxNodes also have specialized - // CopyXxx methods that return *XxxNode. - Copy() Node - Position() Pos // byte position of start of node in full original input string - // tree returns the containing *Tree. - // It is unexported so all implementations of Node are in this package. - tree() *Tree -} - -// NodeType identifies the type of a parse tree node. -type NodeType int - -// Pos represents a byte position in the original input text from which -// this template was parsed. -type Pos int - -func (p Pos) Position() Pos { - return p -} - -// Type returns itself and provides an easy default implementation -// for embedding in a Node. Embedded in all non-trivial Nodes. -func (t NodeType) Type() NodeType { - return t -} - -const ( - NodeText NodeType = iota // Plain text. - NodeAction // A non-control action such as a field evaluation. - NodeBool // A boolean constant. - NodeChain // A sequence of field accesses. - NodeCommand // An element of a pipeline. - NodeDot // The cursor, dot. - nodeElse // An else action. Not added to tree. - nodeEnd // An end action. Not added to tree. - NodeField // A field or method name. - NodeIdentifier // An identifier; always a function name. - NodeIf // An if action. - NodeList // A list of Nodes. - NodeNil // An untyped nil constant. - NodeNumber // A numerical constant. - NodePipe // A pipeline of commands. - NodeRange // A range action. - NodeString // A string constant. - NodeTemplate // A template invocation action. - NodeVariable // A $ variable. - NodeWith // A with action. -) - -// Nodes. - -// ListNode holds a sequence of nodes. -type ListNode struct { - NodeType - Pos - tr *Tree - Nodes []Node // The element nodes in lexical order. -} - -func (t *Tree) newList(pos Pos) *ListNode { - return &ListNode{tr: t, NodeType: NodeList, Pos: pos} -} - -func (l *ListNode) append(n Node) { - l.Nodes = append(l.Nodes, n) -} - -func (l *ListNode) tree() *Tree { - return l.tr -} - -func (l *ListNode) String() string { - b := new(bytes.Buffer) - for _, n := range l.Nodes { - fmt.Fprint(b, n) - } - return b.String() -} - -func (l *ListNode) CopyList() *ListNode { - if l == nil { - return l - } - n := l.tr.newList(l.Pos) - for _, elem := range l.Nodes { - n.append(elem.Copy()) - } - return n -} - -func (l *ListNode) Copy() Node { - return l.CopyList() -} - -// TextNode holds plain text. -type TextNode struct { - NodeType - Pos - tr *Tree - Text []byte // The text; may span newlines. -} - -func (t *Tree) newText(pos Pos, text string) *TextNode { - return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} -} - -func (t *TextNode) String() string { - return fmt.Sprintf(textFormat, t.Text) -} - -func (t *TextNode) tree() *Tree { - return t.tr -} - -func (t *TextNode) Copy() Node { - return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} -} - -// PipeNode holds a pipeline with optional declaration -type PipeNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Decl []*VariableNode // Variable declarations in lexical order. - Cmds []*CommandNode // The commands in lexical order. -} - -func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { - return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} -} - -func (p *PipeNode) append(command *CommandNode) { - p.Cmds = append(p.Cmds, command) -} - -func (p *PipeNode) String() string { - s := "" - if len(p.Decl) > 0 { - for i, v := range p.Decl { - if i > 0 { - s += ", " - } - s += v.String() - } - s += " := " - } - for i, c := range p.Cmds { - if i > 0 { - s += " | " - } - s += c.String() - } - return s -} - -func (p *PipeNode) tree() *Tree { - return p.tr -} - -func (p *PipeNode) CopyPipe() *PipeNode { - if p == nil { - return p - } - var decl []*VariableNode - for _, d := range p.Decl { - decl = append(decl, d.Copy().(*VariableNode)) - } - n := p.tr.newPipeline(p.Pos, p.Line, decl) - for _, c := range p.Cmds { - n.append(c.Copy().(*CommandNode)) - } - return n -} - -func (p *PipeNode) Copy() Node { - return p.CopyPipe() -} - -// ActionNode holds an action (something bounded by delimiters). -// Control actions have their own nodes; ActionNode represents simple -// ones such as field evaluations and parenthesized pipelines. -type ActionNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline in the action. -} - -func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { - return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} -} - -func (a *ActionNode) String() string { - return fmt.Sprintf("{{%s}}", a.Pipe) - -} - -func (a *ActionNode) tree() *Tree { - return a.tr -} - -func (a *ActionNode) Copy() Node { - return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) - -} - -// CommandNode holds a command (a pipeline inside an evaluating action). -type CommandNode struct { - NodeType - Pos - tr *Tree - Args []Node // Arguments in lexical order: Identifier, field, or constant. -} - -func (t *Tree) newCommand(pos Pos) *CommandNode { - return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} -} - -func (c *CommandNode) append(arg Node) { - c.Args = append(c.Args, arg) -} - -func (c *CommandNode) String() string { - s := "" - for i, arg := range c.Args { - if i > 0 { - s += " " - } - if arg, ok := arg.(*PipeNode); ok { - s += "(" + arg.String() + ")" - continue - } - s += arg.String() - } - return s -} - -func (c *CommandNode) tree() *Tree { - return c.tr -} - -func (c *CommandNode) Copy() Node { - if c == nil { - return c - } - n := c.tr.newCommand(c.Pos) - for _, c := range c.Args { - n.append(c.Copy()) - } - return n -} - -// IdentifierNode holds an identifier. -type IdentifierNode struct { - NodeType - Pos - tr *Tree - Ident string // The identifier's name. -} - -// NewIdentifier returns a new IdentifierNode with the given identifier name. -func NewIdentifier(ident string) *IdentifierNode { - return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} -} - -// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { - i.Pos = pos - return i -} - -// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { - i.tr = t - return i -} - -func (i *IdentifierNode) String() string { - return i.Ident -} - -func (i *IdentifierNode) tree() *Tree { - return i.tr -} - -func (i *IdentifierNode) Copy() Node { - return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) -} - -// VariableNode holds a list of variable names, possibly with chained field -// accesses. The dollar sign is part of the (first) name. -type VariableNode struct { - NodeType - Pos - tr *Tree - Ident []string // Variable name and fields in lexical order. -} - -func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { - return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} -} - -func (v *VariableNode) String() string { - s := "" - for i, id := range v.Ident { - if i > 0 { - s += "." - } - s += id - } - return s -} - -func (v *VariableNode) tree() *Tree { - return v.tr -} - -func (v *VariableNode) Copy() Node { - return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} -} - -// DotNode holds the special identifier '.'. -type DotNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newDot(pos Pos) *DotNode { - return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} -} - -func (d *DotNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeDot -} - -func (d *DotNode) String() string { - return "." -} - -func (d *DotNode) tree() *Tree { - return d.tr -} - -func (d *DotNode) Copy() Node { - return d.tr.newDot(d.Pos) -} - -// NilNode holds the special identifier 'nil' representing an untyped nil constant. -type NilNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newNil(pos Pos) *NilNode { - return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} -} - -func (n *NilNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeNil -} - -func (n *NilNode) String() string { - return "nil" -} - -func (n *NilNode) tree() *Tree { - return n.tr -} - -func (n *NilNode) Copy() Node { - return n.tr.newNil(n.Pos) -} - -// FieldNode holds a field (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The period is dropped from each ident. -type FieldNode struct { - NodeType - Pos - tr *Tree - Ident []string // The identifiers in lexical order. -} - -func (t *Tree) newField(pos Pos, ident string) *FieldNode { - return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period -} - -func (f *FieldNode) String() string { - s := "" - for _, id := range f.Ident { - s += "." + id - } - return s -} - -func (f *FieldNode) tree() *Tree { - return f.tr -} - -func (f *FieldNode) Copy() Node { - return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} -} - -// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The periods are dropped from each ident. -type ChainNode struct { - NodeType - Pos - tr *Tree - Node Node - Field []string // The identifiers in lexical order. -} - -func (t *Tree) newChain(pos Pos, node Node) *ChainNode { - return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} -} - -// Add adds the named field (which should start with a period) to the end of the chain. -func (c *ChainNode) Add(field string) { - if len(field) == 0 || field[0] != '.' { - panic("no dot in field") - } - field = field[1:] // Remove leading dot. - if field == "" { - panic("empty field") - } - c.Field = append(c.Field, field) -} - -func (c *ChainNode) String() string { - s := c.Node.String() - if _, ok := c.Node.(*PipeNode); ok { - s = "(" + s + ")" - } - for _, field := range c.Field { - s += "." + field - } - return s -} - -func (c *ChainNode) tree() *Tree { - return c.tr -} - -func (c *ChainNode) Copy() Node { - return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} -} - -// BoolNode holds a boolean constant. -type BoolNode struct { - NodeType - Pos - tr *Tree - True bool // The value of the boolean constant. -} - -func (t *Tree) newBool(pos Pos, true bool) *BoolNode { - return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} -} - -func (b *BoolNode) String() string { - if b.True { - return "true" - } - return "false" -} - -func (b *BoolNode) tree() *Tree { - return b.tr -} - -func (b *BoolNode) Copy() Node { - return b.tr.newBool(b.Pos, b.True) -} - -// NumberNode holds a number: signed or unsigned integer, float, or complex. -// The value is parsed and stored under all the types that can represent the value. -// This simulates in a small amount of code the behavior of Go's ideal constants. -type NumberNode struct { - NodeType - Pos - tr *Tree - IsInt bool // Number has an integral value. - IsUint bool // Number has an unsigned integral value. - IsFloat bool // Number has a floating-point value. - IsComplex bool // Number is complex. - Int64 int64 // The signed integer value. - Uint64 uint64 // The unsigned integer value. - Float64 float64 // The floating-point value. - Complex128 complex128 // The complex value. - Text string // The original textual representation from the input. -} - -func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { - n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} - switch typ { - case itemCharConstant: - rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) - if err != nil { - return nil, err - } - if tail != "'" { - return nil, fmt.Errorf("malformed character constant: %s", text) - } - n.Int64 = int64(rune) - n.IsInt = true - n.Uint64 = uint64(rune) - n.IsUint = true - n.Float64 = float64(rune) // odd but those are the rules. - n.IsFloat = true - return n, nil - case itemComplex: - // fmt.Sscan can parse the pair, so let it do the work. - if _, err := fmt.Sscan(text, &n.Complex128); err != nil { - return nil, err - } - n.IsComplex = true - n.simplifyComplex() - return n, nil - } - // Imaginary constants can only be complex unless they are zero. - if len(text) > 0 && text[len(text)-1] == 'i' { - f, err := strconv.ParseFloat(text[:len(text)-1], 64) - if err == nil { - n.IsComplex = true - n.Complex128 = complex(0, f) - n.simplifyComplex() - return n, nil - } - } - // Do integer test first so we get 0x123 etc. - u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. - if err == nil { - n.IsUint = true - n.Uint64 = u - } - i, err := strconv.ParseInt(text, 0, 64) - if err == nil { - n.IsInt = true - n.Int64 = i - if i == 0 { - n.IsUint = true // in case of -0. - n.Uint64 = u - } - } - // If an integer extraction succeeded, promote the float. - if n.IsInt { - n.IsFloat = true - n.Float64 = float64(n.Int64) - } else if n.IsUint { - n.IsFloat = true - n.Float64 = float64(n.Uint64) - } else { - f, err := strconv.ParseFloat(text, 64) - if err == nil { - n.IsFloat = true - n.Float64 = f - // If a floating-point extraction succeeded, extract the int if needed. - if !n.IsInt && float64(int64(f)) == f { - n.IsInt = true - n.Int64 = int64(f) - } - if !n.IsUint && float64(uint64(f)) == f { - n.IsUint = true - n.Uint64 = uint64(f) - } - } - } - if !n.IsInt && !n.IsUint && !n.IsFloat { - return nil, fmt.Errorf("illegal number syntax: %q", text) - } - return n, nil -} - -// simplifyComplex pulls out any other types that are represented by the complex number. -// These all require that the imaginary part be zero. -func (n *NumberNode) simplifyComplex() { - n.IsFloat = imag(n.Complex128) == 0 - if n.IsFloat { - n.Float64 = real(n.Complex128) - n.IsInt = float64(int64(n.Float64)) == n.Float64 - if n.IsInt { - n.Int64 = int64(n.Float64) - } - n.IsUint = float64(uint64(n.Float64)) == n.Float64 - if n.IsUint { - n.Uint64 = uint64(n.Float64) - } - } -} - -func (n *NumberNode) String() string { - return n.Text -} - -func (n *NumberNode) tree() *Tree { - return n.tr -} - -func (n *NumberNode) Copy() Node { - nn := new(NumberNode) - *nn = *n // Easy, fast, correct. - return nn -} - -// StringNode holds a string constant. The value has been "unquoted". -type StringNode struct { - NodeType - Pos - tr *Tree - Quoted string // The original text of the string, with quotes. - Text string // The string, after quote processing. -} - -func (t *Tree) newString(pos Pos, orig, text string) *StringNode { - return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} -} - -func (s *StringNode) String() string { - return s.Quoted -} - -func (s *StringNode) tree() *Tree { - return s.tr -} - -func (s *StringNode) Copy() Node { - return s.tr.newString(s.Pos, s.Quoted, s.Text) -} - -// endNode represents an {{end}} action. -// It does not appear in the final parse tree. -type endNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newEnd(pos Pos) *endNode { - return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} -} - -func (e *endNode) String() string { - return "{{end}}" -} - -func (e *endNode) tree() *Tree { - return e.tr -} - -func (e *endNode) Copy() Node { - return e.tr.newEnd(e.Pos) -} - -// elseNode represents an {{else}} action. Does not appear in the final tree. -type elseNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) -} - -func (t *Tree) newElse(pos Pos, line int) *elseNode { - return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} -} - -func (e *elseNode) Type() NodeType { - return nodeElse -} - -func (e *elseNode) String() string { - return "{{else}}" -} - -func (e *elseNode) tree() *Tree { - return e.tr -} - -func (e *elseNode) Copy() Node { - return e.tr.newElse(e.Pos, e.Line) -} - -// BranchNode is the common representation of if, range, and with. -type BranchNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline to be evaluated. - List *ListNode // What to execute if the value is non-empty. - ElseList *ListNode // What to execute if the value is empty (nil if absent). -} - -func (b *BranchNode) String() string { - name := "" - switch b.NodeType { - case NodeIf: - name = "if" - case NodeRange: - name = "range" - case NodeWith: - name = "with" - default: - panic("unknown branch type") - } - if b.ElseList != nil { - return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) - } - return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) -} - -func (b *BranchNode) tree() *Tree { - return b.tr -} - -func (b *BranchNode) Copy() Node { - switch b.NodeType { - case NodeIf: - return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeRange: - return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeWith: - return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - default: - panic("unknown branch type") - } -} - -// IfNode represents an {{if}} action and its commands. -type IfNode struct { - BranchNode -} - -func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { - return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (i *IfNode) Copy() Node { - return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) -} - -// RangeNode represents a {{range}} action and its commands. -type RangeNode struct { - BranchNode -} - -func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { - return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (r *RangeNode) Copy() Node { - return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) -} - -// WithNode represents a {{with}} action and its commands. -type WithNode struct { - BranchNode -} - -func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { - return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (w *WithNode) Copy() Node { - return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) -} - -// TemplateNode represents a {{template}} action. -type TemplateNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Name string // The name of the template (unquoted). - Pipe *PipeNode // The command to evaluate as dot for the template. -} - -func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { - return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} -} - -func (t *TemplateNode) String() string { - if t.Pipe == nil { - return fmt.Sprintf("{{template %q}}", t.Name) - } - return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) -} - -func (t *TemplateNode) tree() *Tree { - return t.tr -} - -func (t *TemplateNode) Copy() Node { - return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) -} diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go deleted file mode 100644 index 0d77ade871..0000000000 --- a/vendor/github.com/alecthomas/template/parse/parse.go +++ /dev/null @@ -1,700 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package parse builds parse trees for templates as defined by text/template -// and html/template. Clients should use those packages to construct templates -// rather than this one, which provides shared internal data structures not -// intended for general use. -package parse - -import ( - "bytes" - "fmt" - "runtime" - "strconv" - "strings" -) - -// Tree is the representation of a single parsed template. -type Tree struct { - Name string // name of the template represented by the tree. - ParseName string // name of the top-level template during parsing, for error messages. - Root *ListNode // top-level root of the tree. - text string // text parsed to create the template (or its parent) - // Parsing only; cleared after parse. - funcs []map[string]interface{} - lex *lexer - token [3]item // three-token lookahead for parser. - peekCount int - vars []string // variables defined at the moment. -} - -// Copy returns a copy of the Tree. Any parsing state is discarded. -func (t *Tree) Copy() *Tree { - if t == nil { - return nil - } - return &Tree{ - Name: t.Name, - ParseName: t.ParseName, - Root: t.Root.CopyList(), - text: t.text, - } -} - -// Parse returns a map from template name to parse.Tree, created by parsing the -// templates described in the argument string. The top-level template will be -// given the specified name. If an error is encountered, parsing stops and an -// empty map is returned with the error. -func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { - treeSet = make(map[string]*Tree) - t := New(name) - t.text = text - _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) - return -} - -// next returns the next token. -func (t *Tree) next() item { - if t.peekCount > 0 { - t.peekCount-- - } else { - t.token[0] = t.lex.nextItem() - } - return t.token[t.peekCount] -} - -// backup backs the input stream up one token. -func (t *Tree) backup() { - t.peekCount++ -} - -// backup2 backs the input stream up two tokens. -// The zeroth token is already there. -func (t *Tree) backup2(t1 item) { - t.token[1] = t1 - t.peekCount = 2 -} - -// backup3 backs the input stream up three tokens -// The zeroth token is already there. -func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. - t.token[1] = t1 - t.token[2] = t2 - t.peekCount = 3 -} - -// peek returns but does not consume the next token. -func (t *Tree) peek() item { - if t.peekCount > 0 { - return t.token[t.peekCount-1] - } - t.peekCount = 1 - t.token[0] = t.lex.nextItem() - return t.token[0] -} - -// nextNonSpace returns the next non-space token. -func (t *Tree) nextNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - return token -} - -// peekNonSpace returns but does not consume the next non-space token. -func (t *Tree) peekNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - t.backup() - return token -} - -// Parsing. - -// New allocates a new parse tree with the given name. -func New(name string, funcs ...map[string]interface{}) *Tree { - return &Tree{ - Name: name, - funcs: funcs, - } -} - -// ErrorContext returns a textual representation of the location of the node in the input text. -// The receiver is only used when the node does not have a pointer to the tree inside, -// which can occur in old code. -func (t *Tree) ErrorContext(n Node) (location, context string) { - pos := int(n.Position()) - tree := n.tree() - if tree == nil { - tree = t - } - text := tree.text[:pos] - byteNum := strings.LastIndex(text, "\n") - if byteNum == -1 { - byteNum = pos // On first line. - } else { - byteNum++ // After the newline. - byteNum = pos - byteNum - } - lineNum := 1 + strings.Count(text, "\n") - context = n.String() - if len(context) > 20 { - context = fmt.Sprintf("%.20s...", context) - } - return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context -} - -// errorf formats the error and terminates processing. -func (t *Tree) errorf(format string, args ...interface{}) { - t.Root = nil - format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -// error terminates processing. -func (t *Tree) error(err error) { - t.errorf("%s", err) -} - -// expect consumes the next token and guarantees it has the required type. -func (t *Tree) expect(expected itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected { - t.unexpected(token, context) - } - return token -} - -// expectOneOf consumes the next token and guarantees it has one of the required types. -func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected1 && token.typ != expected2 { - t.unexpected(token, context) - } - return token -} - -// unexpected complains about the token and terminates processing. -func (t *Tree) unexpected(token item, context string) { - t.errorf("unexpected %s in %s", token, context) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (t *Tree) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - if t != nil { - t.stopParse() - } - *errp = e.(error) - } - return -} - -// startParse initializes the parser, using the lexer. -func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { - t.Root = nil - t.lex = lex - t.vars = []string{"$"} - t.funcs = funcs -} - -// stopParse terminates parsing. -func (t *Tree) stopParse() { - t.lex = nil - t.vars = nil - t.funcs = nil -} - -// Parse parses the template definition string to construct a representation of -// the template for execution. If either action delimiter string is empty, the -// default ("{{" or "}}") is used. Embedded template definitions are added to -// the treeSet map. -func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { - defer t.recover(&err) - t.ParseName = t.Name - t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) - t.text = text - t.parse(treeSet) - t.add(treeSet) - t.stopParse() - return t, nil -} - -// add adds tree to the treeSet. -func (t *Tree) add(treeSet map[string]*Tree) { - tree := treeSet[t.Name] - if tree == nil || IsEmptyTree(tree.Root) { - treeSet[t.Name] = t - return - } - if !IsEmptyTree(t.Root) { - t.errorf("template: multiple definition of template %q", t.Name) - } -} - -// IsEmptyTree reports whether this tree (node) is empty of everything but space. -func IsEmptyTree(n Node) bool { - switch n := n.(type) { - case nil: - return true - case *ActionNode: - case *IfNode: - case *ListNode: - for _, node := range n.Nodes { - if !IsEmptyTree(node) { - return false - } - } - return true - case *RangeNode: - case *TemplateNode: - case *TextNode: - return len(bytes.TrimSpace(n.Text)) == 0 - case *WithNode: - default: - panic("unknown node: " + n.String()) - } - return false -} - -// parse is the top-level parser for a template, essentially the same -// as itemList except it also parses {{define}} actions. -// It runs to EOF. -func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { - t.Root = t.newList(t.peek().pos) - for t.peek().typ != itemEOF { - if t.peek().typ == itemLeftDelim { - delim := t.next() - if t.nextNonSpace().typ == itemDefine { - newT := New("definition") // name will be updated once we know it. - newT.text = t.text - newT.ParseName = t.ParseName - newT.startParse(t.funcs, t.lex) - newT.parseDefinition(treeSet) - continue - } - t.backup2(delim) - } - n := t.textOrAction() - if n.Type() == nodeEnd { - t.errorf("unexpected %s", n) - } - t.Root.append(n) - } - return nil -} - -// parseDefinition parses a {{define}} ... {{end}} template definition and -// installs the definition in the treeSet map. The "define" keyword has already -// been scanned. -func (t *Tree) parseDefinition(treeSet map[string]*Tree) { - const context = "define clause" - name := t.expectOneOf(itemString, itemRawString, context) - var err error - t.Name, err = strconv.Unquote(name.val) - if err != nil { - t.error(err) - } - t.expect(itemRightDelim, context) - var end Node - t.Root, end = t.itemList() - if end.Type() != nodeEnd { - t.errorf("unexpected %s in %s", end, context) - } - t.add(treeSet) - t.stopParse() -} - -// itemList: -// textOrAction* -// Terminates at {{end}} or {{else}}, returned separately. -func (t *Tree) itemList() (list *ListNode, next Node) { - list = t.newList(t.peekNonSpace().pos) - for t.peekNonSpace().typ != itemEOF { - n := t.textOrAction() - switch n.Type() { - case nodeEnd, nodeElse: - return list, n - } - list.append(n) - } - t.errorf("unexpected EOF") - return -} - -// textOrAction: -// text | action -func (t *Tree) textOrAction() Node { - switch token := t.nextNonSpace(); token.typ { - case itemElideNewline: - return t.elideNewline() - case itemText: - return t.newText(token.pos, token.val) - case itemLeftDelim: - return t.action() - default: - t.unexpected(token, "input") - } - return nil -} - -// elideNewline: -// Remove newlines trailing rightDelim if \\ is present. -func (t *Tree) elideNewline() Node { - token := t.peek() - if token.typ != itemText { - t.unexpected(token, "input") - return nil - } - - t.next() - stripped := strings.TrimLeft(token.val, "\n\r") - diff := len(token.val) - len(stripped) - if diff > 0 { - // This is a bit nasty. We mutate the token in-place to remove - // preceding newlines. - token.pos += Pos(diff) - token.val = stripped - } - return t.newText(token.pos, token.val) -} - -// Action: -// control -// command ("|" command)* -// Left delim is past. Now get actions. -// First word could be a keyword such as range. -func (t *Tree) action() (n Node) { - switch token := t.nextNonSpace(); token.typ { - case itemElse: - return t.elseControl() - case itemEnd: - return t.endControl() - case itemIf: - return t.ifControl() - case itemRange: - return t.rangeControl() - case itemTemplate: - return t.templateControl() - case itemWith: - return t.withControl() - } - t.backup() - // Do not pop variables; they persist until "end". - return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) -} - -// Pipeline: -// declarations? command ('|' command)* -func (t *Tree) pipeline(context string) (pipe *PipeNode) { - var decl []*VariableNode - pos := t.peekNonSpace().pos - // Are there declarations? - for { - if v := t.peekNonSpace(); v.typ == itemVariable { - t.next() - // Since space is a token, we need 3-token look-ahead here in the worst case: - // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an - // argument variable rather than a declaration. So remember the token - // adjacent to the variable so we can push it back if necessary. - tokenAfterVariable := t.peek() - if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { - t.nextNonSpace() - variable := t.newVariable(v.pos, v.val) - decl = append(decl, variable) - t.vars = append(t.vars, v.val) - if next.typ == itemChar && next.val == "," { - if context == "range" && len(decl) < 2 { - continue - } - t.errorf("too many declarations in %s", context) - } - } else if tokenAfterVariable.typ == itemSpace { - t.backup3(v, tokenAfterVariable) - } else { - t.backup2(v) - } - } - break - } - pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) - for { - switch token := t.nextNonSpace(); token.typ { - case itemRightDelim, itemRightParen: - if len(pipe.Cmds) == 0 { - t.errorf("missing value for %s", context) - } - if token.typ == itemRightParen { - t.backup() - } - return - case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, - itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: - t.backup() - pipe.append(t.command()) - default: - t.unexpected(token, context) - } - } -} - -func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { - defer t.popVars(len(t.vars)) - line = t.lex.lineNumber() - pipe = t.pipeline(context) - var next Node - list, next = t.itemList() - switch next.Type() { - case nodeEnd: //done - case nodeElse: - if allowElseIf { - // Special case for "else if". If the "else" is followed immediately by an "if", - // the elseControl will have left the "if" token pending. Treat - // {{if a}}_{{else if b}}_{{end}} - // as - // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. - // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} - // is assumed. This technique works even for long if-else-if chains. - // TODO: Should we allow else-if in with and range? - if t.peek().typ == itemIf { - t.next() // Consume the "if" token. - elseList = t.newList(next.Position()) - elseList.append(t.ifControl()) - // Do not consume the next item - only one {{end}} required. - break - } - } - elseList, next = t.itemList() - if next.Type() != nodeEnd { - t.errorf("expected end; found %s", next) - } - } - return pipe.Position(), line, pipe, list, elseList -} - -// If: -// {{if pipeline}} itemList {{end}} -// {{if pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) ifControl() Node { - return t.newIf(t.parseControl(true, "if")) -} - -// Range: -// {{range pipeline}} itemList {{end}} -// {{range pipeline}} itemList {{else}} itemList {{end}} -// Range keyword is past. -func (t *Tree) rangeControl() Node { - return t.newRange(t.parseControl(false, "range")) -} - -// With: -// {{with pipeline}} itemList {{end}} -// {{with pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) withControl() Node { - return t.newWith(t.parseControl(false, "with")) -} - -// End: -// {{end}} -// End keyword is past. -func (t *Tree) endControl() Node { - return t.newEnd(t.expect(itemRightDelim, "end").pos) -} - -// Else: -// {{else}} -// Else keyword is past. -func (t *Tree) elseControl() Node { - // Special case for "else if". - peek := t.peekNonSpace() - if peek.typ == itemIf { - // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". - return t.newElse(peek.pos, t.lex.lineNumber()) - } - return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) -} - -// Template: -// {{template stringValue pipeline}} -// Template keyword is past. The name must be something that can evaluate -// to a string. -func (t *Tree) templateControl() Node { - var name string - token := t.nextNonSpace() - switch token.typ { - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - name = s - default: - t.unexpected(token, "template invocation") - } - var pipe *PipeNode - if t.nextNonSpace().typ != itemRightDelim { - t.backup() - // Do not pop variables; they persist until "end". - pipe = t.pipeline("template") - } - return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) -} - -// command: -// operand (space operand)* -// space-separated arguments up to a pipeline character or right delimiter. -// we consume the pipe character but leave the right delim to terminate the action. -func (t *Tree) command() *CommandNode { - cmd := t.newCommand(t.peekNonSpace().pos) - for { - t.peekNonSpace() // skip leading spaces. - operand := t.operand() - if operand != nil { - cmd.append(operand) - } - switch token := t.next(); token.typ { - case itemSpace: - continue - case itemError: - t.errorf("%s", token.val) - case itemRightDelim, itemRightParen: - t.backup() - case itemPipe: - default: - t.errorf("unexpected %s in operand; missing space?", token) - } - break - } - if len(cmd.Args) == 0 { - t.errorf("empty command") - } - return cmd -} - -// operand: -// term .Field* -// An operand is a space-separated component of a command, -// a term possibly followed by field accesses. -// A nil return means the next item is not an operand. -func (t *Tree) operand() Node { - node := t.term() - if node == nil { - return nil - } - if t.peek().typ == itemField { - chain := t.newChain(t.peek().pos, node) - for t.peek().typ == itemField { - chain.Add(t.next().val) - } - // Compatibility with original API: If the term is of type NodeField - // or NodeVariable, just put more fields on the original. - // Otherwise, keep the Chain node. - // TODO: Switch to Chains always when we can. - switch node.Type() { - case NodeField: - node = t.newField(chain.Position(), chain.String()) - case NodeVariable: - node = t.newVariable(chain.Position(), chain.String()) - default: - node = chain - } - } - return node -} - -// term: -// literal (number, string, nil, boolean) -// function (identifier) -// . -// .Field -// $ -// '(' pipeline ')' -// A term is a simple "expression". -// A nil return means the next item is not a term. -func (t *Tree) term() Node { - switch token := t.nextNonSpace(); token.typ { - case itemError: - t.errorf("%s", token.val) - case itemIdentifier: - if !t.hasFunction(token.val) { - t.errorf("function %q not defined", token.val) - } - return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) - case itemDot: - return t.newDot(token.pos) - case itemNil: - return t.newNil(token.pos) - case itemVariable: - return t.useVar(token.pos, token.val) - case itemField: - return t.newField(token.pos, token.val) - case itemBool: - return t.newBool(token.pos, token.val == "true") - case itemCharConstant, itemComplex, itemNumber: - number, err := t.newNumber(token.pos, token.val, token.typ) - if err != nil { - t.error(err) - } - return number - case itemLeftParen: - pipe := t.pipeline("parenthesized pipeline") - if token := t.next(); token.typ != itemRightParen { - t.errorf("unclosed right paren: unexpected %s", token) - } - return pipe - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - return t.newString(token.pos, token.val, s) - } - t.backup() - return nil -} - -// hasFunction reports if a function name exists in the Tree's maps. -func (t *Tree) hasFunction(name string) bool { - for _, funcMap := range t.funcs { - if funcMap == nil { - continue - } - if funcMap[name] != nil { - return true - } - } - return false -} - -// popVars trims the variable list to the specified length -func (t *Tree) popVars(n int) { - t.vars = t.vars[:n] -} - -// useVar returns a node for a variable reference. It errors if the -// variable is not defined. -func (t *Tree) useVar(pos Pos, name string) Node { - v := t.newVariable(pos, name) - for _, varName := range t.vars { - if varName == v.Ident[0] { - return v - } - } - t.errorf("undefined variable %q", v.Ident[0]) - return nil -} diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go deleted file mode 100644 index 447ed2abae..0000000000 --- a/vendor/github.com/alecthomas/template/template.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "fmt" - "reflect" - - "github.com/alecthomas/template/parse" -) - -// common holds the information shared by related templates. -type common struct { - tmpl map[string]*Template - // We use two maps, one for parsing and one for execution. - // This separation makes the API cleaner since it doesn't - // expose reflection to the client. - parseFuncs FuncMap - execFuncs map[string]reflect.Value -} - -// Template is the representation of a parsed template. The *parse.Tree -// field is exported only for use by html/template and should be treated -// as unexported by all other clients. -type Template struct { - name string - *parse.Tree - *common - leftDelim string - rightDelim string -} - -// New allocates a new template with the given name. -func New(name string) *Template { - return &Template{ - name: name, - } -} - -// Name returns the name of the template. -func (t *Template) Name() string { - return t.name -} - -// New allocates a new template associated with the given one and with the same -// delimiters. The association, which is transitive, allows one template to -// invoke another with a {{template}} action. -func (t *Template) New(name string) *Template { - t.init() - return &Template{ - name: name, - common: t.common, - leftDelim: t.leftDelim, - rightDelim: t.rightDelim, - } -} - -func (t *Template) init() { - if t.common == nil { - t.common = new(common) - t.tmpl = make(map[string]*Template) - t.parseFuncs = make(FuncMap) - t.execFuncs = make(map[string]reflect.Value) - } -} - -// Clone returns a duplicate of the template, including all associated -// templates. The actual representation is not copied, but the name space of -// associated templates is, so further calls to Parse in the copy will add -// templates to the copy but not to the original. Clone can be used to prepare -// common templates and use them with variant definitions for other templates -// by adding the variants after the clone is made. -func (t *Template) Clone() (*Template, error) { - nt := t.copy(nil) - nt.init() - nt.tmpl[t.name] = nt - for k, v := range t.tmpl { - if k == t.name { // Already installed. - continue - } - // The associated templates share nt's common structure. - tmpl := v.copy(nt.common) - nt.tmpl[k] = tmpl - } - for k, v := range t.parseFuncs { - nt.parseFuncs[k] = v - } - for k, v := range t.execFuncs { - nt.execFuncs[k] = v - } - return nt, nil -} - -// copy returns a shallow copy of t, with common set to the argument. -func (t *Template) copy(c *common) *Template { - nt := New(t.name) - nt.Tree = t.Tree - nt.common = c - nt.leftDelim = t.leftDelim - nt.rightDelim = t.rightDelim - return nt -} - -// AddParseTree creates a new template with the name and parse tree -// and associates it with t. -func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { - if t.common != nil && t.tmpl[name] != nil { - return nil, fmt.Errorf("template: redefinition of template %q", name) - } - nt := t.New(name) - nt.Tree = tree - t.tmpl[name] = nt - return nt, nil -} - -// Templates returns a slice of the templates associated with t, including t -// itself. -func (t *Template) Templates() []*Template { - if t.common == nil { - return nil - } - // Return a slice so we don't expose the map. - m := make([]*Template, 0, len(t.tmpl)) - for _, v := range t.tmpl { - m = append(m, v) - } - return m -} - -// Delims sets the action delimiters to the specified strings, to be used in -// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template -// definitions will inherit the settings. An empty delimiter stands for the -// corresponding default: {{ or }}. -// The return value is the template, so calls can be chained. -func (t *Template) Delims(left, right string) *Template { - t.leftDelim = left - t.rightDelim = right - return t -} - -// Funcs adds the elements of the argument map to the template's function map. -// It panics if a value in the map is not a function with appropriate return -// type. However, it is legal to overwrite elements of the map. The return -// value is the template, so calls can be chained. -func (t *Template) Funcs(funcMap FuncMap) *Template { - t.init() - addValueFuncs(t.execFuncs, funcMap) - addFuncs(t.parseFuncs, funcMap) - return t -} - -// Lookup returns the template with the given name that is associated with t, -// or nil if there is no such template. -func (t *Template) Lookup(name string) *Template { - if t.common == nil { - return nil - } - return t.tmpl[name] -} - -// Parse parses a string into a template. Nested template definitions will be -// associated with the top-level template t. Parse may be called multiple times -// to parse definitions of templates to associate with t. It is an error if a -// resulting template is non-empty (contains content other than template -// definitions) and would replace a non-empty template with the same name. -// (In multiple calls to Parse with the same receiver template, only one call -// can contain text other than space, comments, and template definitions.) -func (t *Template) Parse(text string) (*Template, error) { - t.init() - trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) - if err != nil { - return nil, err - } - // Add the newly parsed trees, including the one for t, into our common structure. - for name, tree := range trees { - // If the name we parsed is the name of this template, overwrite this template. - // The associate method checks it's not a redefinition. - tmpl := t - if name != t.name { - tmpl = t.New(name) - } - // Even if t == tmpl, we need to install it in the common.tmpl map. - if replace, err := t.associate(tmpl, tree); err != nil { - return nil, err - } else if replace { - tmpl.Tree = tree - } - tmpl.leftDelim = t.leftDelim - tmpl.rightDelim = t.rightDelim - } - return t, nil -} - -// associate installs the new template into the group of templates associated -// with t. It is an error to reuse a name except to overwrite an empty -// template. The two are already known to share the common structure. -// The boolean return value reports wither to store this tree as t.Tree. -func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { - if new.common != t.common { - panic("internal error: associate not common") - } - name := new.name - if old := t.tmpl[name]; old != nil { - oldIsEmpty := parse.IsEmptyTree(old.Root) - newIsEmpty := parse.IsEmptyTree(tree.Root) - if newIsEmpty { - // Whether old is empty or not, new is empty; no reason to replace old. - return false, nil - } - if !oldIsEmpty { - return false, fmt.Errorf("template: redefinition of template %q", name) - } - } - t.tmpl[name] = new - return true, nil -} diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING deleted file mode 100644 index 2993ec085d..0000000000 --- a/vendor/github.com/alecthomas/units/COPYING +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 Alec Thomas - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md deleted file mode 100644 index bee884e3c1..0000000000 --- a/vendor/github.com/alecthomas/units/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Units - Helpful unit multipliers and functions for Go - -The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. - -It allows for code like this: - -```go -n, err := ParseBase2Bytes("1KB") -// n == 1024 -n = units.Mebibyte * 512 -``` diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go deleted file mode 100644 index 61d0ca479a..0000000000 --- a/vendor/github.com/alecthomas/units/bytes.go +++ /dev/null @@ -1,85 +0,0 @@ -package units - -// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, -// etc.). -type Base2Bytes int64 - -// Base-2 byte units. -const ( - Kibibyte Base2Bytes = 1024 - KiB = Kibibyte - Mebibyte = Kibibyte * 1024 - MiB = Mebibyte - Gibibyte = Mebibyte * 1024 - GiB = Gibibyte - Tebibyte = Gibibyte * 1024 - TiB = Tebibyte - Pebibyte = Tebibyte * 1024 - PiB = Pebibyte - Exbibyte = Pebibyte * 1024 - EiB = Exbibyte -) - -var ( - bytesUnitMap = MakeUnitMap("iB", "B", 1024) - oldBytesUnitMap = MakeUnitMap("B", "B", 1024) -) - -// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB -// and KiB are both 1024. -// However "kB", which is the correct SI spelling of 1000 Bytes, is rejected. -func ParseBase2Bytes(s string) (Base2Bytes, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, oldBytesUnitMap) - } - return Base2Bytes(n), err -} - -func (b Base2Bytes) String() string { - return ToString(int64(b), 1024, "iB", "B") -} - -var ( - metricBytesUnitMap = MakeUnitMap("B", "B", 1000) -) - -// MetricBytes are SI byte units (1000 bytes in a kilobyte). -type MetricBytes SI - -// SI base-10 byte units. -const ( - Kilobyte MetricBytes = 1000 - KB = Kilobyte - Megabyte = Kilobyte * 1000 - MB = Megabyte - Gigabyte = Megabyte * 1000 - GB = Gigabyte - Terabyte = Gigabyte * 1000 - TB = Terabyte - Petabyte = Terabyte * 1000 - PB = Petabyte - Exabyte = Petabyte * 1000 - EB = Exabyte -) - -// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. -func ParseMetricBytes(s string) (MetricBytes, error) { - n, err := ParseUnit(s, metricBytesUnitMap) - return MetricBytes(n), err -} - -// TODO: represents 1000B as uppercase "KB", while SI standard requires "kB". -func (m MetricBytes) String() string { - return ToString(int64(m), 1000, "B", "B") -} - -// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, -// respectively. That is, KiB represents 1024 and kB, KB represent 1000. -func ParseStrictBytes(s string) (int64, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, metricBytesUnitMap) - } - return int64(n), err -} diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go deleted file mode 100644 index 156ae38672..0000000000 --- a/vendor/github.com/alecthomas/units/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package units provides helpful unit multipliers and functions for Go. -// -// The goal of this package is to have functionality similar to the time [1] package. -// -// -// [1] http://golang.org/pkg/time/ -// -// It allows for code like this: -// -// n, err := ParseBase2Bytes("1KB") -// // n == 1024 -// n = units.Mebibyte * 512 -package units diff --git a/vendor/github.com/alecthomas/units/go.mod b/vendor/github.com/alecthomas/units/go.mod deleted file mode 100644 index c7fb91f2b2..0000000000 --- a/vendor/github.com/alecthomas/units/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/alecthomas/units - -require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/alecthomas/units/go.sum b/vendor/github.com/alecthomas/units/go.sum deleted file mode 100644 index 8fdee5854f..0000000000 --- a/vendor/github.com/alecthomas/units/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go deleted file mode 100644 index 99b2fa4fcb..0000000000 --- a/vendor/github.com/alecthomas/units/si.go +++ /dev/null @@ -1,50 +0,0 @@ -package units - -// SI units. -type SI int64 - -// SI unit multiples. -const ( - Kilo SI = 1000 - Mega = Kilo * 1000 - Giga = Mega * 1000 - Tera = Giga * 1000 - Peta = Tera * 1000 - Exa = Peta * 1000 -) - -func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { - res := map[string]float64{ - shortSuffix: 1, - // see below for "k" / "K" - "M" + suffix: float64(scale * scale), - "G" + suffix: float64(scale * scale * scale), - "T" + suffix: float64(scale * scale * scale * scale), - "P" + suffix: float64(scale * scale * scale * scale * scale), - "E" + suffix: float64(scale * scale * scale * scale * scale * scale), - } - - // Standard SI prefixes use lowercase "k" for kilo = 1000. - // For compatibility, and to be fool-proof, we accept both "k" and "K" in metric mode. - // - // However, official binary prefixes are always capitalized - "KiB" - - // and we specifically never parse "kB" as 1024B because: - // - // (1) people pedantic enough to use lowercase according to SI unlikely to abuse "k" to mean 1024 :-) - // - // (2) Use of capital K for 1024 was an informal tradition predating IEC prefixes: - // "The binary meaning of the kilobyte for 1024 bytes typically uses the symbol KB, with an - // uppercase letter K." - // -- https://en.wikipedia.org/wiki/Kilobyte#Base_2_(1024_bytes) - // "Capitalization of the letter K became the de facto standard for binary notation, although this - // could not be extended to higher powers, and use of the lowercase k did persist.[13][14][15]" - // -- https://en.wikipedia.org/wiki/Binary_prefix#History - // See also the extensive https://en.wikipedia.org/wiki/Timeline_of_binary_prefixes. - if scale == 1024 { - res["K"+suffix] = float64(scale) - } else { - res["k"+suffix] = float64(scale) - res["K"+suffix] = float64(scale) - } - return res -} diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go deleted file mode 100644 index 6527e92d16..0000000000 --- a/vendor/github.com/alecthomas/units/util.go +++ /dev/null @@ -1,138 +0,0 @@ -package units - -import ( - "errors" - "fmt" - "strings" -) - -var ( - siUnits = []string{"", "K", "M", "G", "T", "P", "E"} -) - -func ToString(n int64, scale int64, suffix, baseSuffix string) string { - mn := len(siUnits) - out := make([]string, mn) - for i, m := range siUnits { - if n%scale != 0 || i == 0 && n == 0 { - s := suffix - if i == 0 { - s = baseSuffix - } - out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) - } - n /= scale - if n == 0 { - break - } - } - return strings.Join(out, "") -} - -// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 -var errLeadingInt = errors.New("units: bad [0-9]*") // never printed - -// leadingInt consumes the leading [0-9]* from s. -func leadingInt(s string) (x int64, rem string, err error) { - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c < '0' || c > '9' { - break - } - if x >= (1<<63-10)/10 { - // overflow - return 0, "", errLeadingInt - } - x = x*10 + int64(c) - '0' - } - return x, s[i:], nil -} - -func ParseUnit(s string, unitMap map[string]float64) (int64, error) { - // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ - orig := s - f := float64(0) - neg := false - - // Consume [-+]? - if s != "" { - c := s[0] - if c == '-' || c == '+' { - neg = c == '-' - s = s[1:] - } - } - // Special case: if all that is left is "0", this is zero. - if s == "0" { - return 0, nil - } - if s == "" { - return 0, errors.New("units: invalid " + orig) - } - for s != "" { - g := float64(0) // this element of the sequence - - var x int64 - var err error - - // The next character must be [0-9.] - if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { - return 0, errors.New("units: invalid " + orig) - } - // Consume [0-9]* - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - g = float64(x) - pre := pl != len(s) // whether we consumed anything before a period - - // Consume (\.[0-9]*)? - post := false - if s != "" && s[0] == '.' { - s = s[1:] - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - scale := 1.0 - for n := pl - len(s); n > 0; n-- { - scale *= 10 - } - g += float64(x) / scale - post = pl != len(s) - } - if !pre && !post { - // no digits (e.g. ".s" or "-.s") - return 0, errors.New("units: invalid " + orig) - } - - // Consume unit. - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c == '.' || ('0' <= c && c <= '9') { - break - } - } - u := s[:i] - s = s[i:] - unit, ok := unitMap[u] - if !ok { - return 0, errors.New("units: unknown unit " + u + " in " + orig) - } - - f += g * unit - } - - if neg { - f = -f - } - if f < float64(-1<<63) || f > float64(1<<63-1) { - return 0, errors.New("units: overflow parsing unit") - } - return int64(f), nil -} diff --git a/vendor/github.com/beevik/ntp/.travis.yml b/vendor/github.com/beevik/ntp/.travis.yml deleted file mode 100644 index e996c276b8..0000000000 --- a/vendor/github.com/beevik/ntp/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false - -go: - - 1.9.x - - 1.12.x - - tip - -matrix: - allow_failures: - - go: tip - -script: - - go test -v ./... diff --git a/vendor/github.com/beevik/ntp/CONTRIBUTORS b/vendor/github.com/beevik/ntp/CONTRIBUTORS deleted file mode 100644 index 626c12eb59..0000000000 --- a/vendor/github.com/beevik/ntp/CONTRIBUTORS +++ /dev/null @@ -1,7 +0,0 @@ -Brett Vickers (beevik) -Mikhail Salosin (AlphaB) -Anton Tolchanov (knyar) -Christopher Batey (chbatey) -Meng Zhuo (mengzhuo) -Leonid Evdokimov (darkk) -Ask Bjørn Hansen (abh) \ No newline at end of file diff --git a/vendor/github.com/beevik/ntp/LICENSE b/vendor/github.com/beevik/ntp/LICENSE deleted file mode 100644 index 45d3d49591..0000000000 --- a/vendor/github.com/beevik/ntp/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2015-2017 Brett Vickers. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/beevik/ntp/README.md b/vendor/github.com/beevik/ntp/README.md deleted file mode 100644 index f77bb7f0fd..0000000000 --- a/vendor/github.com/beevik/ntp/README.md +++ /dev/null @@ -1,72 +0,0 @@ -[![Build Status](https://travis-ci.org/beevik/ntp.svg?branch=master)](https://travis-ci.org/beevik/ntp) -[![GoDoc](https://godoc.org/github.com/beevik/ntp?status.svg)](https://godoc.org/github.com/beevik/ntp) - -ntp -=== - -The ntp package is an implementation of a Simple NTP (SNTP) client based on -[RFC5905](https://tools.ietf.org/html/rfc5905). It allows you to connect to -a remote NTP server and request information about the current time. - - -## Querying the current time - -If all you care about is the current time according to a remote NTP server, -simply use the `Time` function: -```go -time, err := ntp.Time("0.beevik-ntp.pool.ntp.org") -``` - - -## Querying time metadata - -To obtain the current time as well as some additional metadata about the time, -use the [`Query`](https://godoc.org/github.com/beevik/ntp#Query) function: -```go -response, err := ntp.Query("0.beevik-ntp.pool.ntp.org") -time := time.Now().Add(response.ClockOffset) -``` - -Alternatively, use the [`QueryWithOptions`](https://godoc.org/github.com/beevik/ntp#QueryWithOptions) -function if you want to change the default behavior used by the `Query` -function: -```go -options := ntp.QueryOptions{ Timeout: 30*time.Second, TTL: 5 } -response, err := ntp.QueryWithOptions("0.beevik-ntp.pool.ntp.org", options) -time := time.Now().Add(response.ClockOffset) -``` - -The [`Response`](https://godoc.org/github.com/beevik/ntp#Response) structure -returned by `Query` includes the following information: -* `Time`: The time the server transmitted its response, according to its own clock. -* `ClockOffset`: The estimated offset of the local system clock relative to the server's clock. For a more accurate time reading, you may add this offset to any subsequent system clock reading. -* `RTT`: An estimate of the round-trip-time delay between the client and the server. -* `Precision`: The precision of the server's clock reading. -* `Stratum`: The server's stratum, which indicates the number of hops from the server to the reference clock. A stratum 1 server is directly attached to the reference clock. If the stratum is zero, the server has responded with the "kiss of death". -* `ReferenceID`: A unique identifier for the consulted reference clock. -* `ReferenceTime`: The time at which the server last updated its local clock setting. -* `RootDelay`: The server's aggregate round-trip-time delay to the stratum 1 server. -* `RootDispersion`: The server's estimated maximum measurement error relative to the reference clock. -* `RootDistance`: An estimate of the root synchronization distance between the client and the stratum 1 server. -* `Leap`: The leap second indicator, indicating whether a second should be added to or removed from the current month's last minute. -* `MinError`: A lower bound on the clock error between the client and the server. -* `KissCode`: A 4-character string describing the reason for a "kiss of death" response (stratum=0). -* `Poll`: The maximum polling interval between successive messages to the server. - -The `Response` structure's [`Validate`](https://godoc.org/github.com/beevik/ntp#Response.Validate) -method performs additional sanity checks to determine whether the response is -suitable for time synchronization purposes. -```go -err := response.Validate() -if err == nil { - // response data is suitable for synchronization purposes -} -``` - -## Using the NTP pool - -The NTP pool is a shared resource used by people all over the world. -To prevent it from becoming overloaded, please avoid querying the standard -`pool.ntp.org` zone names in your applications. Instead, consider requesting -your own [vendor zone](http://www.pool.ntp.org/en/vendors.html) or [joining -the pool](http://www.pool.ntp.org/join.html). diff --git a/vendor/github.com/beevik/ntp/RELEASE_NOTES.md b/vendor/github.com/beevik/ntp/RELEASE_NOTES.md deleted file mode 100644 index 932f10194f..0000000000 --- a/vendor/github.com/beevik/ntp/RELEASE_NOTES.md +++ /dev/null @@ -1,64 +0,0 @@ -Release v0.3.0 -============== - -There have been no breaking changes or further deprecations since the -previous release. - -**Changes** - -* Fixed a bug in the calculation of NTP timestamps. - -Release v0.2.0 -============== - -There are no breaking changes or further deprecations in this release. - -**Changes** - -* Added `KissCode` to the `Response` structure. - - -Release v0.1.1 -============== - -**Breaking changes** - -* Removed the `MaxStratum` constant. - -**Deprecations** - -* Officially deprecated the `TimeV` function. - -**Internal changes** - -* Removed `minDispersion` from the `RootDistance` calculation, since the value - was arbitrary. -* Moved some validation into main code path so that invalid `TransmitTime` and - `mode` responses trigger an error even when `Response.Validate` is not - called. - - -Release v0.1.0 -============== - -This is the initial release of the `ntp` package. Currently it supports the following features: -* `Time()` to query the current time according to a remote NTP server. -* `Query()` to query multiple pieces of time-related information from a remote NTP server. -* `QueryWithOptions()`, which is like `Query()` but with the ability to override default query options. - -Time-related information returned by the `Query` functions includes: -* `Time`: the time the server transmitted its response, according to the server's clock. -* `ClockOffset`: the estimated offset of the client's clock relative to the server's clock. You may apply this offset to any local system clock reading once the query is complete. -* `RTT`: an estimate of the round-trip-time delay between the client and the server. -* `Precision`: the precision of the server's clock reading. -* `Stratum`: the "stratum" level of the server, where 1 indicates a server directly connected to a reference clock, and values greater than 1 indicating the number of hops from the reference clock. -* `ReferenceID`: A unique identifier for the NTP server that was contacted. -* `ReferenceTime`: The time at which the server last updated its local clock setting. -* `RootDelay`: The server's round-trip delay to the reference clock. -* `RootDispersion`: The server's total dispersion to the referenced clock. -* `RootDistance`: An estimate of the root synchronization distance. -* `Leap`: The leap second indicator. -* `MinError`: A lower bound on the clock error between the client and the server. -* `Poll`: the maximum polling interval between successive messages on the server. - -The `Response` structure returned by the `Query` functions also contains a `Response.Validate()` function that returns an error if any of the fields returned by the server are invalid. diff --git a/vendor/github.com/beevik/ntp/ntp.go b/vendor/github.com/beevik/ntp/ntp.go deleted file mode 100644 index d8b4952c9d..0000000000 --- a/vendor/github.com/beevik/ntp/ntp.go +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright 2015-2017 Brett Vickers. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ntp provides an implementation of a Simple NTP (SNTP) client -// capable of querying the current time from a remote NTP server. See -// RFC5905 (https://tools.ietf.org/html/rfc5905) for more details. -// -// This approach grew out of a go-nuts post by Michael Hofmann: -// https://groups.google.com/forum/?fromgroups#!topic/golang-nuts/FlcdMU5fkLQ -package ntp - -import ( - "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "net" - "time" - - "golang.org/x/net/ipv4" -) - -// The LeapIndicator is used to warn if a leap second should be inserted -// or deleted in the last minute of the current month. -type LeapIndicator uint8 - -const ( - // LeapNoWarning indicates no impending leap second. - LeapNoWarning LeapIndicator = 0 - - // LeapAddSecond indicates the last minute of the day has 61 seconds. - LeapAddSecond = 1 - - // LeapDelSecond indicates the last minute of the day has 59 seconds. - LeapDelSecond = 2 - - // LeapNotInSync indicates an unsynchronized leap second. - LeapNotInSync = 3 -) - -// Internal constants -const ( - defaultNtpVersion = 4 - nanoPerSec = 1000000000 - maxStratum = 16 - defaultTimeout = 5 * time.Second - maxPollInterval = (1 << 17) * time.Second - maxDispersion = 16 * time.Second -) - -// Internal variables -var ( - ntpEpoch = time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) -) - -type mode uint8 - -// NTP modes. This package uses only client mode. -const ( - reserved mode = 0 + iota - symmetricActive - symmetricPassive - client - server - broadcast - controlMessage - reservedPrivate -) - -// An ntpTime is a 64-bit fixed-point (Q32.32) representation of the number of -// seconds elapsed. -type ntpTime uint64 - -// Duration interprets the fixed-point ntpTime as a number of elapsed seconds -// and returns the corresponding time.Duration value. -func (t ntpTime) Duration() time.Duration { - sec := (t >> 32) * nanoPerSec - frac := (t & 0xffffffff) * nanoPerSec - nsec := frac >> 32 - if uint32(frac) >= 0x80000000 { - nsec++ - } - return time.Duration(sec + nsec) -} - -// Time interprets the fixed-point ntpTime as an absolute time and returns -// the corresponding time.Time value. -func (t ntpTime) Time() time.Time { - return ntpEpoch.Add(t.Duration()) -} - -// toNtpTime converts the time.Time value t into its 64-bit fixed-point -// ntpTime representation. -func toNtpTime(t time.Time) ntpTime { - nsec := uint64(t.Sub(ntpEpoch)) - sec := nsec / nanoPerSec - nsec = uint64(nsec-sec*nanoPerSec) << 32 - frac := uint64(nsec / nanoPerSec) - if nsec%nanoPerSec >= nanoPerSec/2 { - frac++ - } - return ntpTime(sec<<32 | frac) -} - -// An ntpTimeShort is a 32-bit fixed-point (Q16.16) representation of the -// number of seconds elapsed. -type ntpTimeShort uint32 - -// Duration interprets the fixed-point ntpTimeShort as a number of elapsed -// seconds and returns the corresponding time.Duration value. -func (t ntpTimeShort) Duration() time.Duration { - sec := uint64(t>>16) * nanoPerSec - frac := uint64(t&0xffff) * nanoPerSec - nsec := frac >> 16 - if uint16(frac) >= 0x8000 { - nsec++ - } - return time.Duration(sec + nsec) -} - -// msg is an internal representation of an NTP packet. -type msg struct { - LiVnMode uint8 // Leap Indicator (2) + Version (3) + Mode (3) - Stratum uint8 - Poll int8 - Precision int8 - RootDelay ntpTimeShort - RootDispersion ntpTimeShort - ReferenceID uint32 - ReferenceTime ntpTime - OriginTime ntpTime - ReceiveTime ntpTime - TransmitTime ntpTime -} - -// setVersion sets the NTP protocol version on the message. -func (m *msg) setVersion(v int) { - m.LiVnMode = (m.LiVnMode & 0xc7) | uint8(v)<<3 -} - -// setMode sets the NTP protocol mode on the message. -func (m *msg) setMode(md mode) { - m.LiVnMode = (m.LiVnMode & 0xf8) | uint8(md) -} - -// setLeap modifies the leap indicator on the message. -func (m *msg) setLeap(li LeapIndicator) { - m.LiVnMode = (m.LiVnMode & 0x3f) | uint8(li)<<6 -} - -// getVersion returns the version value in the message. -func (m *msg) getVersion() int { - return int((m.LiVnMode >> 3) & 0x07) -} - -// getMode returns the mode value in the message. -func (m *msg) getMode() mode { - return mode(m.LiVnMode & 0x07) -} - -// getLeap returns the leap indicator on the message. -func (m *msg) getLeap() LeapIndicator { - return LeapIndicator((m.LiVnMode >> 6) & 0x03) -} - -// QueryOptions contains the list of configurable options that may be used -// with the QueryWithOptions function. -type QueryOptions struct { - Timeout time.Duration // defaults to 5 seconds - Version int // NTP protocol version, defaults to 4 - LocalAddress string // IP address to use for the client address - Port int // Server port, defaults to 123 - TTL int // IP TTL to use, defaults to system default -} - -// A Response contains time data, some of which is returned by the NTP server -// and some of which is calculated by the client. -type Response struct { - // Time is the transmit time reported by the server just before it - // responded to the client's NTP query. - Time time.Time - - // ClockOffset is the estimated offset of the client clock relative to - // the server. Add this to the client's system clock time to obtain a - // more accurate time. - ClockOffset time.Duration - - // RTT is the measured round-trip-time delay estimate between the client - // and the server. - RTT time.Duration - - // Precision is the reported precision of the server's clock. - Precision time.Duration - - // Stratum is the "stratum level" of the server. The smaller the number, - // the closer the server is to the reference clock. Stratum 1 servers are - // attached directly to the reference clock. A stratum value of 0 - // indicates the "kiss of death," which typically occurs when the client - // issues too many requests to the server in a short period of time. - Stratum uint8 - - // ReferenceID is a 32-bit identifier identifying the server or - // reference clock. - ReferenceID uint32 - - // ReferenceTime is the time when the server's system clock was last - // set or corrected. - ReferenceTime time.Time - - // RootDelay is the server's estimated aggregate round-trip-time delay to - // the stratum 1 server. - RootDelay time.Duration - - // RootDispersion is the server's estimated maximum measurement error - // relative to the stratum 1 server. - RootDispersion time.Duration - - // RootDistance is an estimate of the total synchronization distance - // between the client and the stratum 1 server. - RootDistance time.Duration - - // Leap indicates whether a leap second should be added or removed from - // the current month's last minute. - Leap LeapIndicator - - // MinError is a lower bound on the error between the client and server - // clocks. When the client and server are not synchronized to the same - // clock, the reported timestamps may appear to violate the principle of - // causality. In other words, the NTP server's response may indicate - // that a message was received before it was sent. In such cases, the - // minimum error may be useful. - MinError time.Duration - - // KissCode is a 4-character string describing the reason for a - // "kiss of death" response (stratum = 0). For a list of standard kiss - // codes, see https://tools.ietf.org/html/rfc5905#section-7.4. - KissCode string - - // Poll is the maximum interval between successive NTP polling messages. - // It is not relevant for simple NTP clients like this one. - Poll time.Duration -} - -// Validate checks if the response is valid for the purposes of time -// synchronization. -func (r *Response) Validate() error { - // Handle invalid stratum values. - if r.Stratum == 0 { - return fmt.Errorf("kiss of death received: %s", r.KissCode) - } - if r.Stratum >= maxStratum { - return errors.New("invalid stratum in response") - } - - // Handle invalid leap second indicator. - if r.Leap == LeapNotInSync { - return errors.New("invalid leap second") - } - - // Estimate the "freshness" of the time. If it exceeds the maximum - // polling interval (~36 hours), then it cannot be considered "fresh". - freshness := r.Time.Sub(r.ReferenceTime) - if freshness > maxPollInterval { - return errors.New("server clock not fresh") - } - - // Calculate the peer synchronization distance, lambda: - // lambda := RootDelay/2 + RootDispersion - // If this value exceeds MAXDISP (16s), then the time is not suitable - // for synchronization purposes. - // https://tools.ietf.org/html/rfc5905#appendix-A.5.1.1. - lambda := r.RootDelay/2 + r.RootDispersion - if lambda > maxDispersion { - return errors.New("invalid dispersion") - } - - // If the server's transmit time is before its reference time, the - // response is invalid. - if r.Time.Before(r.ReferenceTime) { - return errors.New("invalid time reported") - } - - // nil means the response is valid. - return nil -} - -// Query returns a response from the remote NTP server host. It contains -// the time at which the server transmitted the response as well as other -// useful information about the time and the remote server. -func Query(host string) (*Response, error) { - return QueryWithOptions(host, QueryOptions{}) -} - -// QueryWithOptions performs the same function as Query but allows for the -// customization of several query options. -func QueryWithOptions(host string, opt QueryOptions) (*Response, error) { - m, now, err := getTime(host, opt) - if err != nil { - return nil, err - } - return parseTime(m, now), nil -} - -// TimeV returns the current time using information from a remote NTP server. -// On error, it returns the local system time. The version may be 2, 3, or 4. -// -// Deprecated: TimeV is deprecated. Use QueryWithOptions instead. -func TimeV(host string, version int) (time.Time, error) { - m, recvTime, err := getTime(host, QueryOptions{Version: version}) - if err != nil { - return time.Now(), err - } - - r := parseTime(m, recvTime) - err = r.Validate() - if err != nil { - return time.Now(), err - } - - // Use the clock offset to calculate the time. - return time.Now().Add(r.ClockOffset), nil -} - -// Time returns the current time using information from a remote NTP server. -// It uses version 4 of the NTP protocol. On error, it returns the local -// system time. -func Time(host string) (time.Time, error) { - return TimeV(host, defaultNtpVersion) -} - -// getTime performs the NTP server query and returns the response message -// along with the local system time it was received. -func getTime(host string, opt QueryOptions) (*msg, ntpTime, error) { - if opt.Version == 0 { - opt.Version = defaultNtpVersion - } - if opt.Version < 2 || opt.Version > 4 { - return nil, 0, errors.New("invalid protocol version requested") - } - - // Resolve the remote NTP server address. - raddr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, "123")) - if err != nil { - return nil, 0, err - } - - // Resolve the local address if specified as an option. - var laddr *net.UDPAddr - if opt.LocalAddress != "" { - laddr, err = net.ResolveUDPAddr("udp", net.JoinHostPort(opt.LocalAddress, "0")) - if err != nil { - return nil, 0, err - } - } - - // Override the port if requested. - if opt.Port != 0 { - raddr.Port = opt.Port - } - - // Prepare a "connection" to the remote server. - con, err := net.DialUDP("udp", laddr, raddr) - if err != nil { - return nil, 0, err - } - defer con.Close() - - // Set a TTL for the packet if requested. - if opt.TTL != 0 { - ipcon := ipv4.NewConn(con) - err = ipcon.SetTTL(opt.TTL) - if err != nil { - return nil, 0, err - } - } - - // Set a timeout on the connection. - if opt.Timeout == 0 { - opt.Timeout = defaultTimeout - } - con.SetDeadline(time.Now().Add(opt.Timeout)) - - // Allocate a message to hold the response. - recvMsg := new(msg) - - // Allocate a message to hold the query. - xmitMsg := new(msg) - xmitMsg.setMode(client) - xmitMsg.setVersion(opt.Version) - xmitMsg.setLeap(LeapNotInSync) - - // To ensure privacy and prevent spoofing, try to use a random 64-bit - // value for the TransmitTime. If crypto/rand couldn't generate a - // random value, fall back to using the system clock. Keep track of - // when the messsage was actually transmitted. - bits := make([]byte, 8) - _, err = rand.Read(bits) - var xmitTime time.Time - if err == nil { - xmitMsg.TransmitTime = ntpTime(binary.BigEndian.Uint64(bits)) - xmitTime = time.Now() - } else { - xmitTime = time.Now() - xmitMsg.TransmitTime = toNtpTime(xmitTime) - } - - // Transmit the query. - err = binary.Write(con, binary.BigEndian, xmitMsg) - if err != nil { - return nil, 0, err - } - - // Receive the response. - err = binary.Read(con, binary.BigEndian, recvMsg) - if err != nil { - return nil, 0, err - } - - // Keep track of the time the response was received. - delta := time.Since(xmitTime) - if delta < 0 { - // The local system may have had its clock adjusted since it - // sent the query. In go 1.9 and later, time.Since ensures - // that a monotonic clock is used, so delta can never be less - // than zero. In versions before 1.9, a monotonic clock is - // not used, so we have to check. - return nil, 0, errors.New("client clock ticked backwards") - } - recvTime := toNtpTime(xmitTime.Add(delta)) - - // Check for invalid fields. - if recvMsg.getMode() != server { - return nil, 0, errors.New("invalid mode in response") - } - if recvMsg.TransmitTime == ntpTime(0) { - return nil, 0, errors.New("invalid transmit time in response") - } - if recvMsg.OriginTime != xmitMsg.TransmitTime { - return nil, 0, errors.New("server response mismatch") - } - if recvMsg.ReceiveTime > recvMsg.TransmitTime { - return nil, 0, errors.New("server clock ticked backwards") - } - - // Correct the received message's origin time using the actual - // transmit time. - recvMsg.OriginTime = toNtpTime(xmitTime) - - return recvMsg, recvTime, nil -} - -// parseTime parses the NTP packet along with the packet receive time to -// generate a Response record. -func parseTime(m *msg, recvTime ntpTime) *Response { - r := &Response{ - Time: m.TransmitTime.Time(), - ClockOffset: offset(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), - RTT: rtt(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), - Precision: toInterval(m.Precision), - Stratum: m.Stratum, - ReferenceID: m.ReferenceID, - ReferenceTime: m.ReferenceTime.Time(), - RootDelay: m.RootDelay.Duration(), - RootDispersion: m.RootDispersion.Duration(), - Leap: m.getLeap(), - MinError: minError(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), - Poll: toInterval(m.Poll), - } - - // Calculate values depending on other calculated values - r.RootDistance = rootDistance(r.RTT, r.RootDelay, r.RootDispersion) - - // If a kiss of death was received, interpret the reference ID as - // a kiss code. - if r.Stratum == 0 { - r.KissCode = kissCode(r.ReferenceID) - } - - return r -} - -// The following helper functions calculate additional metadata about the -// timestamps received from an NTP server. The timestamps returned by -// the server are given the following variable names: -// -// org = Origin Timestamp (client send time) -// rec = Receive Timestamp (server receive time) -// xmt = Transmit Timestamp (server reply time) -// dst = Destination Timestamp (client receive time) - -func rtt(org, rec, xmt, dst ntpTime) time.Duration { - // round trip delay time - // rtt = (dst-org) - (xmt-rec) - a := dst.Time().Sub(org.Time()) - b := xmt.Time().Sub(rec.Time()) - rtt := a - b - if rtt < 0 { - rtt = 0 - } - return rtt -} - -func offset(org, rec, xmt, dst ntpTime) time.Duration { - // local clock offset - // offset = ((rec-org) + (xmt-dst)) / 2 - a := rec.Time().Sub(org.Time()) - b := xmt.Time().Sub(dst.Time()) - return (a + b) / time.Duration(2) -} - -func minError(org, rec, xmt, dst ntpTime) time.Duration { - // Each NTP response contains two pairs of send/receive timestamps. - // When either pair indicates a "causality violation", we calculate the - // error as the difference in time between them. The minimum error is - // the greater of the two causality violations. - var error0, error1 ntpTime - if org >= rec { - error0 = org - rec - } - if xmt >= dst { - error1 = xmt - dst - } - if error0 > error1 { - return error0.Duration() - } - return error1.Duration() -} - -func rootDistance(rtt, rootDelay, rootDisp time.Duration) time.Duration { - // The root distance is: - // the maximum error due to all causes of the local clock - // relative to the primary server. It is defined as half the - // total delay plus total dispersion plus peer jitter. - // (https://tools.ietf.org/html/rfc5905#appendix-A.5.5.2) - // - // In the reference implementation, it is calculated as follows: - // rootDist = max(MINDISP, rootDelay + rtt)/2 + rootDisp - // + peerDisp + PHI * (uptime - peerUptime) - // + peerJitter - // For an SNTP client which sends only a single packet, most of these - // terms are irrelevant and become 0. - totalDelay := rtt + rootDelay - return totalDelay/2 + rootDisp -} - -func toInterval(t int8) time.Duration { - switch { - case t > 0: - return time.Duration(uint64(time.Second) << uint(t)) - case t < 0: - return time.Duration(uint64(time.Second) >> uint(-t)) - default: - return time.Second - } -} - -func kissCode(id uint32) string { - isPrintable := func(ch byte) bool { return ch >= 32 && ch <= 126 } - - b := []byte{ - byte(id >> 24), - byte(id >> 16), - byte(id >> 8), - byte(id), - } - for _, ch := range b { - if !isPrintable(ch) { - return "" - } - } - return string(b) -} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be66..0000000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7c..0000000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8eb6..0000000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea88da..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt deleted file mode 100644 index 24b53065f4..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md deleted file mode 100644 index 2fd8693c21..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod deleted file mode 100644 index 49f67608bf..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/cespare/xxhash/v2 - -go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go deleted file mode 100644 index db0b35fbe3..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - b = b[len(d.mem):] - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go deleted file mode 100644 index ad14b807f4..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s deleted file mode 100644 index d580e32aed..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ /dev/null @@ -1,215 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) - - RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go deleted file mode 100644 index 4a5a821603..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go deleted file mode 100644 index fc9bea7a31..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go deleted file mode 100644 index 53bf76efbc..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Notes: -// -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. -// -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. -// -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} - -// WriteString adds more data to d. It always returns len(s), nil. -// It may be faster than Write([]byte(s)) by avoiding a copy. -func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) -} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/vendor/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE deleted file mode 100644 index 23a0ada2fb..0000000000 --- a/vendor/github.com/coreos/go-systemd/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go deleted file mode 100644 index f652582e65..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/dbus.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ -package dbus - -import ( - "encoding/hex" - "fmt" - "os" - "strconv" - "strings" - "sync" - - "github.com/godbus/dbus" -) - -const ( - alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` - num = `0123456789` - alphanum = alpha + num - signalBuffer = 100 -) - -// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped -func needsEscape(i int, b byte) bool { - // Escape everything that is not a-z-A-Z-0-9 - // Also escape 0-9 if it's the first character - return strings.IndexByte(alphanum, b) == -1 || - (i == 0 && strings.IndexByte(num, b) != -1) -} - -// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the -// rules that systemd uses for serializing special characters. -func PathBusEscape(path string) string { - // Special case the empty string - if len(path) == 0 { - return "_" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if needsEscape(i, c) { - e := fmt.Sprintf("_%x", c) - n = append(n, []byte(e)...) - } else { - n = append(n, c) - } - } - return string(n) -} - -// pathBusUnescape is the inverse of PathBusEscape. -func pathBusUnescape(path string) string { - if path == "_" { - return "" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if c == '_' && i+2 < len(path) { - res, err := hex.DecodeString(path[i+1 : i+3]) - if err == nil { - n = append(n, res...) - } - i += 2 - } else { - n = append(n, c) - } - } - return string(n) -} - -// Conn is a connection to systemd's dbus endpoint. -type Conn struct { - // sysconn/sysobj are only used to call dbus methods - sysconn *dbus.Conn - sysobj dbus.BusObject - - // sigconn/sigobj are only used to receive dbus signals - sigconn *dbus.Conn - sigobj dbus.BusObject - - jobListener struct { - jobs map[dbus.ObjectPath]chan<- string - sync.Mutex - } - subStateSubscriber struct { - updateCh chan<- *SubStateUpdate - errCh chan<- error - sync.Mutex - ignore map[dbus.ObjectPath]int64 - cleanIgnore int64 - } - propertiesSubscriber struct { - updateCh chan<- *PropertiesUpdate - errCh chan<- error - sync.Mutex - } -} - -// New establishes a connection to any available bus and authenticates. -// Callers should call Close() when done with the connection. -func New() (*Conn, error) { - conn, err := NewSystemConnection() - if err != nil && os.Geteuid() == 0 { - return NewSystemdConnection() - } - return conn, err -} - -// NewSystemConnection establishes a connection to the system bus and authenticates. -// Callers should call Close() when done with the connection -func NewSystemConnection() (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(dbus.SystemBusPrivate) - }) -} - -// NewUserConnection establishes a connection to the session bus and -// authenticates. This can be used to connect to systemd user instances. -// Callers should call Close() when done with the connection. -func NewUserConnection() (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(dbus.SessionBusPrivate) - }) -} - -// NewSystemdConnection establishes a private, direct connection to systemd. -// This can be used for communicating with systemd without a dbus daemon. -// Callers should call Close() when done with the connection. -func NewSystemdConnection() (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - // We skip Hello when talking directly to systemd. - return dbusAuthConnection(func(opts ...dbus.ConnOption) (*dbus.Conn, error) { - return dbus.Dial("unix:path=/run/systemd/private") - }) - }) -} - -// Close closes an established connection -func (c *Conn) Close() { - c.sysconn.Close() - c.sigconn.Close() -} - -// NewConnection establishes a connection to a bus using a caller-supplied function. -// This allows connecting to remote buses through a user-supplied mechanism. -// The supplied function may be called multiple times, and should return independent connections. -// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, -// and any authentication should be handled by the function. -func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { - sysconn, err := dialBus() - if err != nil { - return nil, err - } - - sigconn, err := dialBus() - if err != nil { - sysconn.Close() - return nil, err - } - - c := &Conn{ - sysconn: sysconn, - sysobj: systemdObject(sysconn), - sigconn: sigconn, - sigobj: systemdObject(sigconn), - } - - c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) - c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) - - // Setup the listeners on jobs so that we can get completions - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") - - c.dispatch() - return c, nil -} - -// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager -// interface. The value is returned in its string representation, as defined at -// https://developer.gnome.org/glib/unstable/gvariant-text.html -func (c *Conn) GetManagerProperty(prop string) (string, error) { - variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) - if err != nil { - return "", err - } - return variant.String(), nil -} - -func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := createBus() - if err != nil { - return nil, err - } - - // Only use EXTERNAL method, and hardcode the uid (not username) - // to avoid a username lookup (which requires a dynamically linked - // libc) - methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} - - err = conn.Auth(methods) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func dbusAuthHelloConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := dbusAuthConnection(createBus) - if err != nil { - return nil, err - } - - if err = conn.Hello(); err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func systemdObject(conn *dbus.Conn) dbus.BusObject { - return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go deleted file mode 100644 index 5859583eb2..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/methods.go +++ /dev/null @@ -1,600 +0,0 @@ -// Copyright 2015, 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "errors" - "fmt" - "path" - "strconv" - - "github.com/godbus/dbus" -) - -func (c *Conn) jobComplete(signal *dbus.Signal) { - var id uint32 - var job dbus.ObjectPath - var unit string - var result string - dbus.Store(signal.Body, &id, &job, &unit, &result) - c.jobListener.Lock() - out, ok := c.jobListener.jobs[job] - if ok { - out <- result - delete(c.jobListener.jobs, job) - } - c.jobListener.Unlock() -} - -func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { - if ch != nil { - c.jobListener.Lock() - defer c.jobListener.Unlock() - } - - var p dbus.ObjectPath - err := c.sysobj.Call(job, 0, args...).Store(&p) - if err != nil { - return 0, err - } - - if ch != nil { - c.jobListener.jobs[p] = ch - } - - // ignore error since 0 is fine if conversion fails - jobID, _ := strconv.Atoi(path.Base(string(p))) - - return jobID, nil -} - -// StartUnit enqueues a start job and depending jobs, if any (unless otherwise -// specified by the mode string). -// -// Takes the unit to activate, plus a mode string. The mode needs to be one of -// replace, fail, isolate, ignore-dependencies, ignore-requirements. If -// "replace" the call will start the unit and its dependencies, possibly -// replacing already queued jobs that conflict with this. If "fail" the call -// will start the unit and its dependencies, but will fail if this would change -// an already queued job. If "isolate" the call will start the unit in question -// and terminate all units that aren't dependencies of it. If -// "ignore-dependencies" it will start a unit but ignore all its dependencies. -// If "ignore-requirements" it will start a unit but only ignore the -// requirement dependencies. It is not recommended to make use of the latter -// two options. -// -// If the provided channel is non-nil, a result string will be sent to it upon -// job completion: one of done, canceled, timeout, failed, dependency, skipped. -// done indicates successful execution of a job. canceled indicates that a job -// has been canceled before it finished execution. timeout indicates that the -// job timeout was reached. failed indicates that the job failed. dependency -// indicates that a job this job has been depending on failed and the job hence -// has been removed too. skipped indicates that a job was skipped because it -// didn't apply to the units current state. -// -// If no error occurs, the ID of the underlying systemd job will be returned. There -// does exist the possibility for no error to be returned, but for the returned job -// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint -// should not be considered authoritative. -// -// If an error does occur, it will be returned to the user alongside a job ID of 0. -func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) -} - -// StopUnit is similar to StartUnit but stops the specified unit rather -// than starting it. -func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) -} - -// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. -func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) -} - -// RestartUnit restarts a service. If a service is restarted that isn't -// running it will be started. -func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) -} - -// TryRestartUnit is like RestartUnit, except that a service that isn't running -// is not affected by the restart. -func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) -} - -// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart -// otherwise. -func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) -} - -// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try" -// flavored restart otherwise. -func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) -} - -// StartTransientUnit() may be used to create and start a transient unit, which -// will be released as soon as it is not running or referenced anymore or the -// system is rebooted. name is the unit name including suffix, and must be -// unique. mode is the same as in StartUnit(), properties contains properties -// of the unit. -func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) -} - -// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's -// processes are killed. -func (c *Conn) KillUnit(name string, signal int32) { - c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() -} - -// ResetFailedUnit resets the "failed" state of a specific unit. -func (c *Conn) ResetFailedUnit(name string) error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() -} - -// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`. -func (c *Conn) SystemState() (*Property, error) { - var err error - var prop dbus.Variant - - obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") - err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: "SystemState", Value: prop}, nil -} - -// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface -func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { - var err error - var props map[string]dbus.Variant - - if !path.IsValid() { - return nil, fmt.Errorf("invalid unit name: %v", path) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) - if err != nil { - return nil, err - } - - out := make(map[string]interface{}, len(props)) - for k, v := range props { - out[k] = v.Value() - } - - return out, nil -} - -// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties. -func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(path, "org.freedesktop.systemd1.Unit") -} - -// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties. -func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { - return c.getProperties(path, "org.freedesktop.systemd1.Unit") -} - -// GetAllProperties takes the (unescaped) unit name and returns all of its dbus object properties. -func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(path, "") -} - -func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { - var err error - var prop dbus.Variant - - path := unitPath(unit) - if !path.IsValid() { - return nil, errors.New("invalid unit name: " + unit) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: propertyName, Value: prop}, nil -} - -func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) -} - -// GetServiceProperty returns property for given service name and property name -func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { - return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) -} - -// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. -// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope -// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit -func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(path, "org.freedesktop.systemd1."+unitType) -} - -// SetUnitProperties() may be used to modify certain unit properties at runtime. -// Not all properties may be changed at runtime, but many resource management -// settings (primarily those in systemd.cgroup(5)) may. The changes are applied -// instantly, and stored on disk for future boots, unless runtime is true, in which -// case the settings only apply until the next reboot. name is the name of the unit -// to modify. properties are the settings to set, encoded as an array of property -// name and value pairs. -func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() -} - -func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) -} - -type UnitStatus struct { - Name string // The primary unit name as string - Description string // The human readable description string - LoadState string // The load state (i.e. whether the unit file has been loaded successfully) - ActiveState string // The active state (i.e. whether the unit is currently started or not) - SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) - Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. - Path dbus.ObjectPath // The unit object path - JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise - JobType string // The job type as string - JobPath dbus.ObjectPath // The job object path -} - -type storeFunc func(retvalues ...interface{}) error - -func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]UnitStatus, len(result)) - statusInterface := make([]interface{}, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - err = dbus.Store(resultInterface, statusInterface...) - if err != nil { - return nil, err - } - - return status, nil -} - -// ListUnits returns an array with all currently loaded units. Note that -// units may be known by multiple names at the same time, and hence there might -// be more unit names loaded than actual units behind them. -// Also note that a unit is only loaded if it is active and/or enabled. -// Units that are both disabled and inactive will thus not be returned. -func (c *Conn) ListUnits() ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) -} - -// ListUnitsFiltered returns an array with units filtered by state. -// It takes a list of units' statuses to filter. -func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) -} - -// ListUnitsByPatterns returns an array with units. -// It takes a list of units' statuses and names to filter. -// Note that units may be known by multiple names at the same time, -// and hence there might be more unit names loaded than actual units behind them. -func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) -} - -// ListUnitsByNames returns an array with units. It takes a list of units' -// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns -// method, this method returns statuses even for inactive or non-existing -// units. Input array should contain exact unit names, but not patterns. -// Note: Requires systemd v230 or higher -func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) -} - -type UnitFile struct { - Path string - Type string -} - -func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - files := make([]UnitFile, len(result)) - fileInterface := make([]interface{}, len(files)) - for i := range files { - fileInterface[i] = &files[i] - } - - err = dbus.Store(resultInterface, fileInterface...) - if err != nil { - return nil, err - } - - return files, nil -} - -// ListUnitFiles returns an array of all available units on disk. -func (c *Conn) ListUnitFiles() ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) -} - -// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. -func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) -} - -type LinkUnitFileChange EnableUnitFileChange - -// LinkUnitFiles() links unit files (that are located outside of the -// usual unit search paths) into the unit search path. -// -// It takes a list of absolute paths to unit files to link and two -// booleans. The first boolean controls whether the unit shall be -// enabled for runtime only (true, /run), or persistently (false, -// /etc). -// The second controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns a list of the changes made. The list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]LinkUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -// EnableUnitFiles() may be used to enable one or more units in the system (by -// creating symlinks to them in /etc or /run). -// -// It takes a list of unit files to enable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and two booleans: the first controls whether the unit shall -// be enabled for runtime only (true, /run), or persistently (false, /etc). -// The second one controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns one boolean and an array with the changes made. The -// boolean signals whether the unit files contained any enablement -// information (i.e. an [Install]) section. The changes list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - var carries_install_info bool - - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) - if err != nil { - return false, nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]EnableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return false, nil, err - } - - return carries_install_info, changes, nil -} - -type EnableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// DisableUnitFiles() may be used to disable one or more units in the system (by -// removing symlinks to them from /etc or /run). -// -// It takes a list of unit files to disable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and one boolean: whether the unit was enabled for runtime -// only (true, /run), or persistently (false, /etc). -// -// This call returns an array with the changes made. The changes list -// consists of structures with three strings: the type of the change (one of -// symlink or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]DisableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type DisableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// MaskUnitFiles masks one or more units in the system -// -// It takes three arguments: -// * list of units to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) -// * force flag -func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]MaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type MaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// UnmaskUnitFiles unmasks one or more units in the system -// -// It takes two arguments: -// * list of unit files to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) -func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]UnmaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type UnmaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Reload instructs systemd to scan for and reload unit files. This is -// equivalent to a 'systemctl daemon-reload'. -func (c *Conn) Reload() error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() -} - -func unitPath(name string) dbus.ObjectPath { - return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) -} - -// unitName returns the unescaped base element of the supplied escaped path -func unitName(dpath dbus.ObjectPath) string { - return pathBusUnescape(path.Base(string(dpath))) -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go deleted file mode 100644 index 6c81895876..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/properties.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "github.com/godbus/dbus" -) - -// From the systemd docs: -// -// The properties array of StartTransientUnit() may take many of the settings -// that may also be configured in unit files. Not all parameters are currently -// accepted though, but we plan to cover more properties with future release. -// Currently you may set the Description, Slice and all dependency types of -// units, as well as RemainAfterExit, ExecStart for service units, -// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, -// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, -// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, -// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map -// directly to their counterparts in unit files and as normal D-Bus object -// properties. The exception here is the PIDs field of scope units which is -// used for construction of the scope only and specifies the initial PIDs to -// add to the scope object. - -type Property struct { - Name string - Value dbus.Variant -} - -type PropertyCollection struct { - Name string - Properties []Property -} - -type execStart struct { - Path string // the binary path to execute - Args []string // an array with all arguments to pass to the executed command, starting with argument 0 - UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly -} - -// PropExecStart sets the ExecStart service property. The first argument is a -// slice with the binary path to execute followed by the arguments to pass to -// the executed command. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= -func PropExecStart(command []string, uncleanIsFailure bool) Property { - execStarts := []execStart{ - execStart{ - Path: command[0], - Args: command, - UncleanIsFailure: uncleanIsFailure, - }, - } - - return Property{ - Name: "ExecStart", - Value: dbus.MakeVariant(execStarts), - } -} - -// PropRemainAfterExit sets the RemainAfterExit service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= -func PropRemainAfterExit(b bool) Property { - return Property{ - Name: "RemainAfterExit", - Value: dbus.MakeVariant(b), - } -} - -// PropType sets the Type service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= -func PropType(t string) Property { - return Property{ - Name: "Type", - Value: dbus.MakeVariant(t), - } -} - -// PropDescription sets the Description unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= -func PropDescription(desc string) Property { - return Property{ - Name: "Description", - Value: dbus.MakeVariant(desc), - } -} - -func propDependency(name string, units []string) Property { - return Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} - -// PropRequires sets the Requires unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= -func PropRequires(units ...string) Property { - return propDependency("Requires", units) -} - -// PropRequiresOverridable sets the RequiresOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= -func PropRequiresOverridable(units ...string) Property { - return propDependency("RequiresOverridable", units) -} - -// PropRequisite sets the Requisite unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= -func PropRequisite(units ...string) Property { - return propDependency("Requisite", units) -} - -// PropRequisiteOverridable sets the RequisiteOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= -func PropRequisiteOverridable(units ...string) Property { - return propDependency("RequisiteOverridable", units) -} - -// PropWants sets the Wants unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= -func PropWants(units ...string) Property { - return propDependency("Wants", units) -} - -// PropBindsTo sets the BindsTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= -func PropBindsTo(units ...string) Property { - return propDependency("BindsTo", units) -} - -// PropRequiredBy sets the RequiredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= -func PropRequiredBy(units ...string) Property { - return propDependency("RequiredBy", units) -} - -// PropRequiredByOverridable sets the RequiredByOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= -func PropRequiredByOverridable(units ...string) Property { - return propDependency("RequiredByOverridable", units) -} - -// PropWantedBy sets the WantedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= -func PropWantedBy(units ...string) Property { - return propDependency("WantedBy", units) -} - -// PropBoundBy sets the BoundBy unit property. See -// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= -func PropBoundBy(units ...string) Property { - return propDependency("BoundBy", units) -} - -// PropConflicts sets the Conflicts unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= -func PropConflicts(units ...string) Property { - return propDependency("Conflicts", units) -} - -// PropConflictedBy sets the ConflictedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= -func PropConflictedBy(units ...string) Property { - return propDependency("ConflictedBy", units) -} - -// PropBefore sets the Before unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= -func PropBefore(units ...string) Property { - return propDependency("Before", units) -} - -// PropAfter sets the After unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= -func PropAfter(units ...string) Property { - return propDependency("After", units) -} - -// PropOnFailure sets the OnFailure unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= -func PropOnFailure(units ...string) Property { - return propDependency("OnFailure", units) -} - -// PropTriggers sets the Triggers unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= -func PropTriggers(units ...string) Property { - return propDependency("Triggers", units) -} - -// PropTriggeredBy sets the TriggeredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= -func PropTriggeredBy(units ...string) Property { - return propDependency("TriggeredBy", units) -} - -// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= -func PropPropagatesReloadTo(units ...string) Property { - return propDependency("PropagatesReloadTo", units) -} - -// PropRequiresMountsFor sets the RequiresMountsFor unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= -func PropRequiresMountsFor(units ...string) Property { - return propDependency("RequiresMountsFor", units) -} - -// PropSlice sets the Slice unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= -func PropSlice(slice string) Property { - return Property{ - Name: "Slice", - Value: dbus.MakeVariant(slice), - } -} - -// PropPids sets the PIDs field of scope units used in the initial construction -// of the scope only and specifies the initial PIDs to add to the scope object. -// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties -func PropPids(pids ...uint32) Property { - return Property{ - Name: "PIDs", - Value: dbus.MakeVariant(pids), - } -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go deleted file mode 100644 index 17c5d48565..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/set.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -type set struct { - data map[string]bool -} - -func (s *set) Add(value string) { - s.data[value] = true -} - -func (s *set) Remove(value string) { - delete(s.data, value) -} - -func (s *set) Contains(value string) (exists bool) { - _, exists = s.data[value] - return -} - -func (s *set) Length() int { - return len(s.data) -} - -func (s *set) Values() (values []string) { - for val := range s.data { - values = append(values, val) - } - return -} - -func newSet() *set { - return &set{make(map[string]bool)} -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go deleted file mode 100644 index f6d7a08a10..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "errors" - "log" - "time" - - "github.com/godbus/dbus" -) - -const ( - cleanIgnoreInterval = int64(10 * time.Second) - ignoreInterval = int64(30 * time.Millisecond) -) - -// Subscribe sets up this connection to subscribe to all systemd dbus events. -// This is required before calling SubscribeUnits. When the connection closes -// systemd will automatically stop sending signals so there is no need to -// explicitly call Unsubscribe(). -func (c *Conn) Subscribe() error { - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") - - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() -} - -// Unsubscribe this connection from systemd dbus events. -func (c *Conn) Unsubscribe() error { - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() -} - -func (c *Conn) dispatch() { - ch := make(chan *dbus.Signal, signalBuffer) - - c.sigconn.Signal(ch) - - go func() { - for { - signal, ok := <-ch - if !ok { - return - } - - if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { - c.jobComplete(signal) - } - - if c.subStateSubscriber.updateCh == nil && - c.propertiesSubscriber.updateCh == nil { - continue - } - - var unitPath dbus.ObjectPath - switch signal.Name { - case "org.freedesktop.systemd1.Manager.JobRemoved": - unitName := signal.Body[2].(string) - c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) - case "org.freedesktop.systemd1.Manager.UnitNew": - unitPath = signal.Body[1].(dbus.ObjectPath) - case "org.freedesktop.DBus.Properties.PropertiesChanged": - if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { - unitPath = signal.Path - - if len(signal.Body) >= 2 { - if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { - c.sendPropertiesUpdate(unitPath, changed) - } - } - } - } - - if unitPath == dbus.ObjectPath("") { - continue - } - - c.sendSubStateUpdate(unitPath) - } - }() -} - -// SubscribeUnits returns two unbuffered channels which will receive all changed units every -// interval. Deleted units are sent as nil. -func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { - return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) -} - -// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer -// size of the channels, the comparison function for detecting changes and a filter -// function for cutting down on the noise that your channel receives. -func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { - old := make(map[string]*UnitStatus) - statusChan := make(chan map[string]*UnitStatus, buffer) - errChan := make(chan error, buffer) - - go func() { - for { - timerChan := time.After(interval) - - units, err := c.ListUnits() - if err == nil { - cur := make(map[string]*UnitStatus) - for i := range units { - if filterUnit != nil && filterUnit(units[i].Name) { - continue - } - cur[units[i].Name] = &units[i] - } - - // add all new or changed units - changed := make(map[string]*UnitStatus) - for n, u := range cur { - if oldU, ok := old[n]; !ok || isChanged(oldU, u) { - changed[n] = u - } - delete(old, n) - } - - // add all deleted units - for oldN := range old { - changed[oldN] = nil - } - - old = cur - - if len(changed) != 0 { - statusChan <- changed - } - } else { - errChan <- err - } - - <-timerChan - } - }() - - return statusChan, errChan -} - -type SubStateUpdate struct { - UnitName string - SubState string -} - -// SetSubStateSubscriber writes to updateCh when any unit's substate changes. -// Although this writes to updateCh on every state change, the reported state -// may be more recent than the change that generated it (due to an unavoidable -// race in the systemd dbus interface). That is, this method provides a good -// way to keep a current view of all units' states, but is not guaranteed to -// show every state transition they go through. Furthermore, state changes -// will only be written to the channel with non-blocking writes. If updateCh -// is full, it attempts to write an error to errCh; if errCh is full, the error -// passes silently. -func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { - if c == nil { - msg := "nil receiver" - select { - case errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - c.subStateSubscriber.updateCh = updateCh - c.subStateSubscriber.errCh = errCh -} - -func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - - if c.subStateSubscriber.updateCh == nil { - return - } - - isIgnored := c.shouldIgnore(unitPath) - defer c.cleanIgnore() - if isIgnored { - return - } - - info, err := c.GetUnitPathProperties(unitPath) - if err != nil { - select { - case c.subStateSubscriber.errCh <- err: - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - defer c.updateIgnore(unitPath, info) - - name, ok := info["Id"].(string) - if !ok { - msg := "failed to cast info.Id" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - substate, ok := info["SubState"].(string) - if !ok { - msg := "failed to cast info.SubState" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - update := &SubStateUpdate{name, substate} - select { - case c.subStateSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} - -// The ignore functions work around a wart in the systemd dbus interface. -// Requesting the properties of an unloaded unit will cause systemd to send a -// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's -// properties on UnitNew (as that's the only indication of a new unit coming up -// for the first time), we would enter an infinite loop if we did not attempt -// to detect and ignore these spurious signals. The signal themselves are -// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an -// unloaded unit's signals for a short time after requesting its properties. -// This means that we will miss e.g. a transient unit being restarted -// *immediately* upon failure and also a transient unit being started -// immediately after requesting its status (with systemctl status, for example, -// because this causes a UnitNew signal to be sent which then causes us to fetch -// the properties). - -func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { - t, ok := c.subStateSubscriber.ignore[path] - return ok && t >= time.Now().UnixNano() -} - -func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { - loadState, ok := info["LoadState"].(string) - if !ok { - return - } - - // unit is unloaded - it will trigger bad systemd dbus behavior - if loadState == "not-found" { - c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval - } -} - -// without this, ignore would grow unboundedly over time -func (c *Conn) cleanIgnore() { - now := time.Now().UnixNano() - if c.subStateSubscriber.cleanIgnore < now { - c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval - - for p, t := range c.subStateSubscriber.ignore { - if t < now { - delete(c.subStateSubscriber.ignore, p) - } - } - } -} - -// PropertiesUpdate holds a map of a unit's changed properties -type PropertiesUpdate struct { - UnitName string - Changed map[string]dbus.Variant -} - -// SetPropertiesSubscriber writes to updateCh when any unit's properties -// change. Every property change reported by systemd will be sent; that is, no -// transitions will be "missed" (as they might be with SetSubStateSubscriber). -// However, state changes will only be written to the channel with non-blocking -// writes. If updateCh is full, it attempts to write an error to errCh; if -// errCh is full, the error passes silently. -func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - c.propertiesSubscriber.updateCh = updateCh - c.propertiesSubscriber.errCh = errCh -} - -// we don't need to worry about shouldIgnore() here because -// sendPropertiesUpdate doesn't call GetProperties() -func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - - if c.propertiesSubscriber.updateCh == nil { - return - } - - update := &PropertiesUpdate{unitName(unitPath), changedProps} - - select { - case c.propertiesSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.propertiesSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go deleted file mode 100644 index 5b408d5847..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "time" -) - -// SubscriptionSet returns a subscription set which is like conn.Subscribe but -// can filter to only return events for a set of units. -type SubscriptionSet struct { - *set - conn *Conn -} - -func (s *SubscriptionSet) filter(unit string) bool { - return !s.Contains(unit) -} - -// Subscribe starts listening for dbus events for all of the units in the set. -// Returns channels identical to conn.SubscribeUnits. -func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { - // TODO: Make fully evented by using systemd 209 with properties changed values - return s.conn.SubscribeUnitsCustom(time.Second, 0, - mismatchUnitStatus, - func(unit string) bool { return s.filter(unit) }, - ) -} - -// NewSubscriptionSet returns a new subscription set. -func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { - return &SubscriptionSet{newSet(), conn} -} - -// mismatchUnitStatus returns true if the provided UnitStatus objects -// are not equivalent. false is returned if the objects are equivalent. -// Only the Name, Description and state-related fields are used in -// the comparison. -func mismatchUnitStatus(u1, u2 *UnitStatus) bool { - return u1.Name != u2.Name || - u1.Description != u2.Description || - u1.LoadState != u2.LoadState || - u1.ActiveState != u2.ActiveState || - u1.SubState != u2.SubState -} diff --git a/vendor/github.com/ema/qdisc/.travis.yml b/vendor/github.com/ema/qdisc/.travis.yml deleted file mode 100644 index 50a132a902..0000000000 --- a/vendor/github.com/ema/qdisc/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -go: - - 1.x -env: - - GO111MODULE=on -os: - - linux -sudo: required -before_install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 - - go get -d ./... -script: - - go build -tags=gofuzz ./... - - go vet ./... - - golangci-lint run ./... - - go test -v -race -tags=integration ./... diff --git a/vendor/github.com/ema/qdisc/LICENSE.md b/vendor/github.com/ema/qdisc/LICENSE.md deleted file mode 100644 index 0a38dae3e1..0000000000 --- a/vendor/github.com/ema/qdisc/LICENSE.md +++ /dev/null @@ -1,10 +0,0 @@ -MIT License -=========== - -Copyright (C) 2017 Emanuele Rocca - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ema/qdisc/Makefile b/vendor/github.com/ema/qdisc/Makefile deleted file mode 100644 index b313b082f9..0000000000 --- a/vendor/github.com/ema/qdisc/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -build: - go fmt - go build - go vet - staticcheck - #golint -set_exit_status - go test -v -race -tags=integration - -cover: - go test -coverprofile=coverage.out - go tool cover -html=coverage.out diff --git a/vendor/github.com/ema/qdisc/README.md b/vendor/github.com/ema/qdisc/README.md deleted file mode 100644 index 9fe5dde54c..0000000000 --- a/vendor/github.com/ema/qdisc/README.md +++ /dev/null @@ -1,26 +0,0 @@ -qdisc [![Build Status](https://travis-ci.org/ema/qdisc.svg?branch=master)](https://travis-ci.org/ema/qdisc) -===== - -Package `qdisc` allows to get queuing discipline information via netlink, -similarly to what `tc -s qdisc show` does. - -Example usage -------------- - - package main - - import ( - "fmt" - - "github.com/ema/qdisc" - ) - - func main() { - info, err := qdisc.Get() - - if err == nil { - for _, msg := range info { - fmt.Printf("%+v\n", msg) - } - } - } diff --git a/vendor/github.com/ema/qdisc/get.go b/vendor/github.com/ema/qdisc/get.go deleted file mode 100644 index 9191594b43..0000000000 --- a/vendor/github.com/ema/qdisc/get.go +++ /dev/null @@ -1,293 +0,0 @@ -package qdisc - -import ( - "fmt" - "math" - "net" - - "github.com/mdlayher/netlink" - "github.com/mdlayher/netlink/nlenc" -) - -const ( - TCA_UNSPEC = iota - TCA_KIND - TCA_OPTIONS - TCA_STATS - TCA_XSTATS - TCA_RATE - TCA_FCNT - TCA_STATS2 - TCA_STAB - // __TCA_MAX -) - -const ( - TCA_STATS_UNSPEC = iota - TCA_STATS_BASIC - TCA_STATS_RATE_EST - TCA_STATS_QUEUE - TCA_STATS_APP - TCA_STATS_RATE_EST64 - // __TCA_STATS_MAX -) - -// See struct tc_stats in /usr/include/linux/pkt_sched.h -type TC_Stats struct { - Bytes uint64 - Packets uint32 - Drops uint32 - Overlimits uint32 - Bps uint32 - Pps uint32 - Qlen uint32 - Backlog uint32 -} - -// See /usr/include/linux/gen_stats.h -type TC_Stats2 struct { - // struct gnet_stats_basic - Bytes uint64 - Packets uint32 - // struct gnet_stats_queue - Qlen uint32 - Backlog uint32 - Drops uint32 - Requeues uint32 - Overlimits uint32 -} - -// See struct tc_fq_qd_stats /usr/include/linux/pkt_sched.h -type TC_Fq_Qd_Stats struct { - GcFlows uint64 - HighprioPackets uint64 - TcpRetrans uint64 - Throttled uint64 - FlowsPlimit uint64 - PktsTooLong uint64 - AllocationErrors uint64 - TimeNextDelayedFlow int64 - Flows uint32 - InactiveFlows uint32 - ThrottledFlows uint32 - UnthrottleLatencyNs uint32 -} - -type QdiscInfo struct { - IfaceName string - Parent uint32 - Handle uint32 - Kind string - Bytes uint64 - Packets uint32 - Drops uint32 - Requeues uint32 - Overlimits uint32 - GcFlows uint64 - Throttled uint64 - FlowsPlimit uint64 -} - -func parseTCAStats(attr netlink.Attribute) TC_Stats { - var stats TC_Stats - stats.Bytes = nlenc.Uint64(attr.Data[0:8]) - stats.Packets = nlenc.Uint32(attr.Data[8:12]) - stats.Drops = nlenc.Uint32(attr.Data[12:16]) - stats.Overlimits = nlenc.Uint32(attr.Data[16:20]) - stats.Bps = nlenc.Uint32(attr.Data[20:24]) - stats.Pps = nlenc.Uint32(attr.Data[24:28]) - stats.Qlen = nlenc.Uint32(attr.Data[28:32]) - stats.Backlog = nlenc.Uint32(attr.Data[32:36]) - return stats -} - -func parseTCAStats2(attr netlink.Attribute) TC_Stats2 { - var stats TC_Stats2 - - nested, _ := netlink.UnmarshalAttributes(attr.Data) - - for _, a := range nested { - switch a.Type { - case TCA_STATS_BASIC: - stats.Bytes = nlenc.Uint64(a.Data[0:8]) - stats.Packets = nlenc.Uint32(a.Data[8:12]) - case TCA_STATS_QUEUE: - stats.Qlen = nlenc.Uint32(a.Data[0:4]) - stats.Backlog = nlenc.Uint32(a.Data[4:8]) - stats.Drops = nlenc.Uint32(a.Data[8:12]) - stats.Requeues = nlenc.Uint32(a.Data[12:16]) - stats.Overlimits = nlenc.Uint32(a.Data[16:20]) - default: - } - } - - return stats -} - -func parseTC_Fq_Qd_Stats(attr netlink.Attribute) (TC_Fq_Qd_Stats, error) { - var stats TC_Fq_Qd_Stats - - nested, err := netlink.UnmarshalAttributes(attr.Data) - if err != nil { - return stats, err - } - - pts := []*uint64{ - &stats.GcFlows, - &stats.HighprioPackets, - &stats.TcpRetrans, - &stats.Throttled, - &stats.FlowsPlimit, - &stats.PktsTooLong, - &stats.AllocationErrors, - } - for _, a := range nested { - switch a.Type { - case TCA_STATS_APP: - for i := 0; i < len(pts) && (i+1)*8 <= len(a.Data); i++ { - *pts[i] = nlenc.Uint64(a.Data[i*8 : (i+1)*8]) - } - default: - } - } - - return stats, nil -} - -func getQdiscMsgs(c *netlink.Conn) ([]netlink.Message, error) { - req := netlink.Message{ - Header: netlink.Header{ - Flags: netlink.Request | netlink.Dump, - Type: 38, // RTM_GETQDISC - }, - Data: make([]byte, 20), - } - - // Perform a request, receive replies, and validate the replies - msgs, err := c.Execute(req) - if err != nil { - return nil, fmt.Errorf("failed to execute request: %v", err) - } - - return msgs, nil -} - -// See https://tools.ietf.org/html/rfc3549#section-3.1.3 -func parseMessage(msg netlink.Message) (QdiscInfo, error) { - var m QdiscInfo - var s TC_Stats - var s2 TC_Stats2 - var s_fq TC_Fq_Qd_Stats - - /* - struct tcmsg { - unsigned char tcm_family; - unsigned char tcm__pad1; - unsigned short tcm__pad2; - int tcm_ifindex; - __u32 tcm_handle; - __u32 tcm_parent; - __u32 tcm_info; - }; - */ - - if len(msg.Data) < 20 { - return m, fmt.Errorf("short message, len=%d", len(msg.Data)) - } - - ifaceIdx := nlenc.Uint32(msg.Data[4:8]) - - m.Handle = nlenc.Uint32(msg.Data[8:12]) - m.Parent = nlenc.Uint32(msg.Data[12:16]) - - if m.Parent == math.MaxUint32 { - m.Parent = 0 - } - - // The first 20 bytes are taken by tcmsg - attrs, err := netlink.UnmarshalAttributes(msg.Data[20:]) - - if err != nil { - return m, fmt.Errorf("failed to unmarshal attributes: %v", err) - } - - for _, attr := range attrs { - switch attr.Type { - case TCA_KIND: - m.Kind = nlenc.String(attr.Data) - case TCA_STATS2: - s_fq, err = parseTC_Fq_Qd_Stats(attr) - if err != nil { - return m, err - } - if s_fq.GcFlows > 0 { - m.GcFlows = s_fq.GcFlows - } - if s_fq.Throttled > 0 { - m.Throttled = s_fq.Throttled - } - if s_fq.FlowsPlimit > 0 { - m.FlowsPlimit = s_fq.FlowsPlimit - } - - s2 = parseTCAStats2(attr) - m.Bytes = s2.Bytes - m.Packets = s2.Packets - m.Drops = s2.Drops - // requeues only available in TCA_STATS2, not in TCA_STATS - m.Requeues = s2.Requeues - m.Overlimits = s2.Overlimits - case TCA_STATS: - // Legacy - s = parseTCAStats(attr) - m.Bytes = s.Bytes - m.Packets = s.Packets - m.Drops = s.Drops - m.Overlimits = s.Overlimits - default: - // TODO: TCA_OPTIONS and TCA_XSTATS - } - } - - iface, err := net.InterfaceByIndex(int(ifaceIdx)) - - if err == nil { - m.IfaceName = iface.Name - } - - return m, err -} - -func getAndParse(c *netlink.Conn) ([]QdiscInfo, error) { - var res []QdiscInfo - - msgs, err := getQdiscMsgs(c) - - if err != nil { - return nil, err - } - - for _, msg := range msgs { - m, err := parseMessage(msg) - - if err != nil { - return nil, err - } - - res = append(res, m) - } - - return res, nil -} - -func Get() ([]QdiscInfo, error) { - const familyRoute = 0 - - c, err := netlink.Dial(familyRoute, nil) - if err != nil { - return nil, fmt.Errorf("failed to dial netlink: %v", err) - } - defer c.Close() - - return getAndParse(c) -} diff --git a/vendor/github.com/ema/qdisc/go.mod b/vendor/github.com/ema/qdisc/go.mod deleted file mode 100644 index 38279a3446..0000000000 --- a/vendor/github.com/ema/qdisc/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/ema/qdisc - -go 1.12 - -require ( - github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552 // indirect - github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09 - golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 // indirect -) diff --git a/vendor/github.com/ema/qdisc/go.sum b/vendor/github.com/ema/qdisc/go.sum deleted file mode 100644 index 116818ba82..0000000000 --- a/vendor/github.com/ema/qdisc/go.sum +++ /dev/null @@ -1,20 +0,0 @@ -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552 h1:Ve/e6edHdAHn+8/24Xco7IhQCv3u5Dab2qZNvR9e5/U= -github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= -github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09 h1:U2vuol6i4UF6MSpZJclH4HHiLRMoq1NAzxpIpCUJK/Y= -github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4= -golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE deleted file mode 100644 index 9d83342acd..0000000000 --- a/vendor/github.com/go-kit/kit/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md deleted file mode 100644 index a201a3d922..0000000000 --- a/vendor/github.com/go-kit/kit/log/README.md +++ /dev/null @@ -1,151 +0,0 @@ -# package log - -`package log` provides a minimal interface for structured logging in services. -It may be wrapped to encode conventions, enforce type-safety, provide leveled -logging, and so on. It can be used for both typical application log events, -and log-structured data streams. - -## Structured logging - -Structured logging is, basically, conceding to the reality that logs are -_data_, and warrant some level of schematic rigor. Using a stricter, -key/value-oriented message format for our logs, containing contextual and -semantic information, makes it much easier to get insight into the -operational activity of the systems we build. Consequently, `package log` is -of the strong belief that "[the benefits of structured logging outweigh the -minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". - -Migrating from unstructured to structured logging is probably a lot easier -than you'd expect. - -```go -// Unstructured -log.Printf("HTTP server listening on %s", addr) - -// Structured -logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") -``` - -## Usage - -### Typical application logging - -```go -w := log.NewSyncWriter(os.Stderr) -logger := log.NewLogfmtLogger(w) -logger.Log("question", "what is the meaning of life?", "answer", 42) - -// Output: -// question="what is the meaning of life?" answer=42 -``` - -### Contextual Loggers - -```go -func main() { - var logger log.Logger - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - logger = log.With(logger, "instance_id", 123) - - logger.Log("msg", "starting") - NewWorker(log.With(logger, "component", "worker")).Run() - NewSlacker(log.With(logger, "component", "slacker")).Run() -} - -// Output: -// instance_id=123 msg=starting -// instance_id=123 component=worker msg=running -// instance_id=123 component=slacker msg=running -``` - -### Interact with stdlib logger - -Redirect stdlib logger to Go kit logger. - -```go -import ( - "os" - stdlog "log" - kitlog "github.com/go-kit/kit/log" -) - -func main() { - logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) - stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) - stdlog.Print("I sure like pie") -} - -// Output: -// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} -``` - -Or, if, for legacy reasons, you need to pipe all of your logging through the -stdlib log package, you can redirect Go kit logger to the stdlib logger. - -```go -logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) -logger.Log("legacy", true, "msg", "at least it's something") - -// Output: -// 2016/01/01 12:34:56 legacy=true msg="at least it's something" -``` - -### Timestamps and callers - -```go -var logger log.Logger -logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) -logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - -logger.Log("msg", "hello") - -// Output: -// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello -``` - -## Levels - -Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level). - -## Supported output formats - -- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) -- JSON - -## Enhancements - -`package log` is centered on the one-method Logger interface. - -```go -type Logger interface { - Log(keyvals ...interface{}) error -} -``` - -This interface, and its supporting code like is the product of much iteration -and evaluation. For more details on the evolution of the Logger interface, -see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), -a talk by [Chris Hines](https://github.com/ChrisHines). -Also, please see -[#63](https://github.com/go-kit/kit/issues/63), -[#76](https://github.com/go-kit/kit/pull/76), -[#131](https://github.com/go-kit/kit/issues/131), -[#157](https://github.com/go-kit/kit/pull/157), -[#164](https://github.com/go-kit/kit/issues/164), and -[#252](https://github.com/go-kit/kit/pull/252) -to review historical conversations about package log and the Logger interface. - -Value-add packages and suggestions, -like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level), -are of course welcome. Good proposals should - -- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With), -- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and -- Be friendly to packages that accept only an unadorned log.Logger. - -## Benchmarks & comparisons - -There are a few Go logging benchmarks and comparisons that include Go kit's package log. - -- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log -- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go deleted file mode 100644 index 918c0af46f..0000000000 --- a/vendor/github.com/go-kit/kit/log/doc.go +++ /dev/null @@ -1,116 +0,0 @@ -// Package log provides a structured logger. -// -// Structured logging produces logs easily consumed later by humans or -// machines. Humans might be interested in debugging errors, or tracing -// specific requests. Machines might be interested in counting interesting -// events, or aggregating information for off-line processing. In both cases, -// it is important that the log messages are structured and actionable. -// Package log is designed to encourage both of these best practices. -// -// Basic Usage -// -// The fundamental interface is Logger. Loggers create log events from -// key/value data. The Logger interface has a single method, Log, which -// accepts a sequence of alternating key/value pairs, which this package names -// keyvals. -// -// type Logger interface { -// Log(keyvals ...interface{}) error -// } -// -// Here is an example of a function using a Logger to create log events. -// -// func RunTask(task Task, logger log.Logger) string { -// logger.Log("taskID", task.ID, "event", "starting task") -// ... -// logger.Log("taskID", task.ID, "event", "task complete") -// } -// -// The keys in the above example are "taskID" and "event". The values are -// task.ID, "starting task", and "task complete". Every key is followed -// immediately by its value. -// -// Keys are usually plain strings. Values may be any type that has a sensible -// encoding in the chosen log format. With structured logging it is a good -// idea to log simple values without formatting them. This practice allows -// the chosen logger to encode values in the most appropriate way. -// -// Contextual Loggers -// -// A contextual logger stores keyvals that it includes in all log events. -// Building appropriate contextual loggers reduces repetition and aids -// consistency in the resulting log output. With and WithPrefix add context to -// a logger. We can use With to improve the RunTask example. -// -// func RunTask(task Task, logger log.Logger) string { -// logger = log.With(logger, "taskID", task.ID) -// logger.Log("event", "starting task") -// ... -// taskHelper(task.Cmd, logger) -// ... -// logger.Log("event", "task complete") -// } -// -// The improved version emits the same log events as the original for the -// first and last calls to Log. Passing the contextual logger to taskHelper -// enables each log event created by taskHelper to include the task.ID even -// though taskHelper does not have access to that value. Using contextual -// loggers this way simplifies producing log output that enables tracing the -// life cycle of individual tasks. (See the Contextual example for the full -// code of the above snippet.) -// -// Dynamic Contextual Values -// -// A Valuer function stored in a contextual logger generates a new value each -// time an event is logged. The Valuer example demonstrates how this feature -// works. -// -// Valuers provide the basis for consistently logging timestamps and source -// code location. The log package defines several valuers for that purpose. -// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and -// DefaultCaller. A common logger initialization sequence that ensures all log -// entries contain a timestamp and source location looks like this: -// -// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) -// -// Concurrent Safety -// -// Applications with multiple goroutines want each log event written to the -// same logger to remain separate from other log events. Package log provides -// two simple solutions for concurrent safe logging. -// -// NewSyncWriter wraps an io.Writer and serializes each call to its Write -// method. Using a SyncWriter has the benefit that the smallest practical -// portion of the logging logic is performed within a mutex, but it requires -// the formatting Logger to make only one call to Write per log event. -// -// NewSyncLogger wraps any Logger and serializes each call to its Log method. -// Using a SyncLogger has the benefit that it guarantees each log event is -// handled atomically within the wrapped logger, but it typically serializes -// both the formatting and output logic. Use a SyncLogger if the formatting -// logger may perform multiple writes per log event. -// -// Error Handling -// -// This package relies on the practice of wrapping or decorating loggers with -// other loggers to provide composable pieces of functionality. It also means -// that Logger.Log must return an error because some -// implementations—especially those that output log data to an io.Writer—may -// encounter errors that cannot be handled locally. This in turn means that -// Loggers that wrap other loggers should return errors from the wrapped -// logger up the stack. -// -// Fortunately, the decorator pattern also provides a way to avoid the -// necessity to check for errors every time an application calls Logger.Log. -// An application required to panic whenever its Logger encounters -// an error could initialize its logger as follows. -// -// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger := log.LoggerFunc(func(keyvals ...interface{}) error { -// if err := fmtlogger.Log(keyvals...); err != nil { -// panic(err) -// } -// return nil -// }) -package log diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go deleted file mode 100644 index 0cedbf8247..0000000000 --- a/vendor/github.com/go-kit/kit/log/json_logger.go +++ /dev/null @@ -1,91 +0,0 @@ -package log - -import ( - "encoding" - "encoding/json" - "fmt" - "io" - "reflect" -) - -type jsonLogger struct { - io.Writer -} - -// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a -// single JSON object. Each log event produces no more than one call to -// w.Write. The passed Writer must be safe for concurrent use by multiple -// goroutines if the returned Logger will be used concurrently. -func NewJSONLogger(w io.Writer) Logger { - return &jsonLogger{w} -} - -func (l *jsonLogger) Log(keyvals ...interface{}) error { - n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd - m := make(map[string]interface{}, n) - for i := 0; i < len(keyvals); i += 2 { - k := keyvals[i] - var v interface{} = ErrMissingValue - if i+1 < len(keyvals) { - v = keyvals[i+1] - } - merge(m, k, v) - } - enc := json.NewEncoder(l.Writer) - enc.SetEscapeHTML(false) - return enc.Encode(m) -} - -func merge(dst map[string]interface{}, k, v interface{}) { - var key string - switch x := k.(type) { - case string: - key = x - case fmt.Stringer: - key = safeString(x) - default: - key = fmt.Sprint(x) - } - - // We want json.Marshaler and encoding.TextMarshaller to take priority over - // err.Error() and v.String(). But json.Marshall (called later) does that by - // default so we force a no-op if it's one of those 2 case. - switch x := v.(type) { - case json.Marshaler: - case encoding.TextMarshaler: - case error: - v = safeError(x) - case fmt.Stringer: - v = safeString(x) - } - - dst[key] = v -} - -func safeString(str fmt.Stringer) (s string) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s = "NULL" - } else { - panic(panicVal) - } - } - }() - s = str.String() - return -} - -func safeError(err error) (s interface{}) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - s = nil - } else { - panic(panicVal) - } - } - }() - s = err.Error() - return -} diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go deleted file mode 100644 index 505d307b11..0000000000 --- a/vendor/github.com/go-kit/kit/log/level/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package level implements leveled logging on top of Go kit's log package. To -// use the level package, create a logger as per normal in your func main, and -// wrap it with level.NewFilter. -// -// var logger log.Logger -// logger = log.NewLogfmtLogger(os.Stderr) -// logger = level.NewFilter(logger, level.AllowInfo()) // <-- -// logger = log.With(logger, "ts", log.DefaultTimestampUTC) -// -// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error -// helper methods to emit leveled log events. -// -// logger.Log("foo", "bar") // as normal, no level -// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) -// if value > 100 { -// level.Error(logger).Log("value", value) -// } -// -// NewFilter allows precise control over what happens when a log event is -// emitted without a level key, or if a squelched level is used. Check the -// Option functions for details. -package level diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go deleted file mode 100644 index fceafc454a..0000000000 --- a/vendor/github.com/go-kit/kit/log/level/level.go +++ /dev/null @@ -1,205 +0,0 @@ -package level - -import "github.com/go-kit/kit/log" - -// Error returns a logger that includes a Key/ErrorValue pair. -func Error(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), ErrorValue()) -} - -// Warn returns a logger that includes a Key/WarnValue pair. -func Warn(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), WarnValue()) -} - -// Info returns a logger that includes a Key/InfoValue pair. -func Info(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), InfoValue()) -} - -// Debug returns a logger that includes a Key/DebugValue pair. -func Debug(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), DebugValue()) -} - -// NewFilter wraps next and implements level filtering. See the commentary on -// the Option functions for a detailed description of how to configure levels. -// If no options are provided, all leveled log events created with Debug, -// Info, Warn or Error helper methods are squelched and non-leveled log -// events are passed to next unmodified. -func NewFilter(next log.Logger, options ...Option) log.Logger { - l := &logger{ - next: next, - } - for _, option := range options { - option(l) - } - return l -} - -type logger struct { - next log.Logger - allowed level - squelchNoLevel bool - errNotAllowed error - errNoLevel error -} - -func (l *logger) Log(keyvals ...interface{}) error { - var hasLevel, levelAllowed bool - for i := 1; i < len(keyvals); i += 2 { - if v, ok := keyvals[i].(*levelValue); ok { - hasLevel = true - levelAllowed = l.allowed&v.level != 0 - break - } - } - if !hasLevel && l.squelchNoLevel { - return l.errNoLevel - } - if hasLevel && !levelAllowed { - return l.errNotAllowed - } - return l.next.Log(keyvals...) -} - -// Option sets a parameter for the leveled logger. -type Option func(*logger) - -// AllowAll is an alias for AllowDebug. -func AllowAll() Option { - return AllowDebug() -} - -// AllowDebug allows error, warn, info and debug level log events to pass. -func AllowDebug() Option { - return allowed(levelError | levelWarn | levelInfo | levelDebug) -} - -// AllowInfo allows error, warn and info level log events to pass. -func AllowInfo() Option { - return allowed(levelError | levelWarn | levelInfo) -} - -// AllowWarn allows error and warn level log events to pass. -func AllowWarn() Option { - return allowed(levelError | levelWarn) -} - -// AllowError allows only error level log events to pass. -func AllowError() Option { - return allowed(levelError) -} - -// AllowNone allows no leveled log events to pass. -func AllowNone() Option { - return allowed(0) -} - -func allowed(allowed level) Option { - return func(l *logger) { l.allowed = allowed } -} - -// ErrNotAllowed sets the error to return from Log when it squelches a log -// event disallowed by the configured Allow[Level] option. By default, -// ErrNotAllowed is nil; in this case the log event is squelched with no -// error. -func ErrNotAllowed(err error) Option { - return func(l *logger) { l.errNotAllowed = err } -} - -// SquelchNoLevel instructs Log to squelch log events with no level, so that -// they don't proceed through to the wrapped logger. If SquelchNoLevel is set -// to true and a log event is squelched in this way, the error value -// configured with ErrNoLevel is returned to the caller. -func SquelchNoLevel(squelch bool) Option { - return func(l *logger) { l.squelchNoLevel = squelch } -} - -// ErrNoLevel sets the error to return from Log when it squelches a log event -// with no level. By default, ErrNoLevel is nil; in this case the log event is -// squelched with no error. -func ErrNoLevel(err error) Option { - return func(l *logger) { l.errNoLevel = err } -} - -// NewInjector wraps next and returns a logger that adds a Key/level pair to -// the beginning of log events that don't already contain a level. In effect, -// this gives a default level to logs without a level. -func NewInjector(next log.Logger, level Value) log.Logger { - return &injector{ - next: next, - level: level, - } -} - -type injector struct { - next log.Logger - level interface{} -} - -func (l *injector) Log(keyvals ...interface{}) error { - for i := 1; i < len(keyvals); i += 2 { - if _, ok := keyvals[i].(*levelValue); ok { - return l.next.Log(keyvals...) - } - } - kvs := make([]interface{}, len(keyvals)+2) - kvs[0], kvs[1] = key, l.level - copy(kvs[2:], keyvals) - return l.next.Log(kvs...) -} - -// Value is the interface that each of the canonical level values implement. -// It contains unexported methods that prevent types from other packages from -// implementing it and guaranteeing that NewFilter can distinguish the levels -// defined in this package from all other values. -type Value interface { - String() string - levelVal() -} - -// Key returns the unique key added to log events by the loggers in this -// package. -func Key() interface{} { return key } - -// ErrorValue returns the unique value added to log events by Error. -func ErrorValue() Value { return errorValue } - -// WarnValue returns the unique value added to log events by Warn. -func WarnValue() Value { return warnValue } - -// InfoValue returns the unique value added to log events by Info. -func InfoValue() Value { return infoValue } - -// DebugValue returns the unique value added to log events by Warn. -func DebugValue() Value { return debugValue } - -var ( - // key is of type interface{} so that it allocates once during package - // initialization and avoids allocating every time the value is added to a - // []interface{} later. - key interface{} = "level" - - errorValue = &levelValue{level: levelError, name: "error"} - warnValue = &levelValue{level: levelWarn, name: "warn"} - infoValue = &levelValue{level: levelInfo, name: "info"} - debugValue = &levelValue{level: levelDebug, name: "debug"} -) - -type level byte - -const ( - levelDebug level = 1 << iota - levelInfo - levelWarn - levelError -) - -type levelValue struct { - name string - level -} - -func (v *levelValue) String() string { return v.name } -func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go deleted file mode 100644 index 66a9e2fde7..0000000000 --- a/vendor/github.com/go-kit/kit/log/log.go +++ /dev/null @@ -1,135 +0,0 @@ -package log - -import "errors" - -// Logger is the fundamental interface for all log operations. Log creates a -// log event from keyvals, a variadic sequence of alternating keys and values. -// Implementations must be safe for concurrent use by multiple goroutines. In -// particular, any implementation of Logger that appends to keyvals or -// modifies or retains any of its elements must make a copy first. -type Logger interface { - Log(keyvals ...interface{}) error -} - -// ErrMissingValue is appended to keyvals slices with odd length to substitute -// the missing value. -var ErrMissingValue = errors.New("(MISSING)") - -// With returns a new contextual logger with keyvals prepended to those passed -// to calls to Log. If logger is also a contextual logger created by With or -// WithPrefix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func With(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - return &context{ - logger: l.logger, - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - keyvals: kvs[:len(kvs):len(kvs)], - hasValuer: l.hasValuer || containsValuer(keyvals), - } -} - -// WithPrefix returns a new contextual logger with keyvals prepended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With or WithPrefix, keyvals is prepended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithPrefix(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - n := len(l.keyvals) + len(keyvals) - if len(keyvals)%2 != 0 { - n++ - } - kvs := make([]interface{}, 0, n) - kvs = append(kvs, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - kvs = append(kvs, l.keyvals...) - return &context{ - logger: l.logger, - keyvals: kvs, - hasValuer: l.hasValuer || containsValuer(keyvals), - } -} - -// context is the Logger implementation returned by With and WithPrefix. It -// wraps a Logger and holds keyvals that it includes in all log events. Its -// Log method calls bindValues to generate values for each Valuer in the -// context keyvals. -// -// A context must always have the same number of stack frames between calls to -// its Log method and the eventual binding of Valuers to their value. This -// requirement comes from the functional requirement to allow a context to -// resolve application call site information for a Caller stored in the -// context. To do this we must be able to predict the number of logging -// functions on the stack when bindValues is called. -// -// Two implementation details provide the needed stack depth consistency. -// -// 1. newContext avoids introducing an additional layer when asked to -// wrap another context. -// 2. With and WithPrefix avoid introducing an additional layer by -// returning a newly constructed context with a merged keyvals rather -// than simply wrapping the existing context. -type context struct { - logger Logger - keyvals []interface{} - hasValuer bool -} - -func newContext(logger Logger) *context { - if c, ok := logger.(*context); ok { - return c - } - return &context{logger: logger} -} - -// Log replaces all value elements (odd indexes) containing a Valuer in the -// stored context with their generated value, appends keyvals, and passes the -// result to the wrapped Logger. -func (l *context) Log(keyvals ...interface{}) error { - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - if l.hasValuer { - // If no keyvals were appended above then we must copy l.keyvals so - // that future log events will reevaluate the stored Valuers. - if len(keyvals) == 0 { - kvs = append([]interface{}{}, l.keyvals...) - } - bindValues(kvs[:len(l.keyvals)]) - } - return l.logger.Log(kvs...) -} - -// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If -// f is a function with the appropriate signature, LoggerFunc(f) is a Logger -// object that calls f. -type LoggerFunc func(...interface{}) error - -// Log implements Logger by calling f(keyvals...). -func (f LoggerFunc) Log(keyvals ...interface{}) error { - return f(keyvals...) -} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go deleted file mode 100644 index a00305298b..0000000000 --- a/vendor/github.com/go-kit/kit/log/logfmt_logger.go +++ /dev/null @@ -1,62 +0,0 @@ -package log - -import ( - "bytes" - "io" - "sync" - - "github.com/go-logfmt/logfmt" -) - -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -func (l *logfmtEncoder) Reset() { - l.Encoder.Reset() - l.buf.Reset() -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -type logfmtLogger struct { - w io.Writer -} - -// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in -// logfmt format. Each log event produces no more than one call to w.Write. -// The passed Writer must be safe for concurrent use by multiple goroutines if -// the returned Logger will be used concurrently. -func NewLogfmtLogger(w io.Writer) Logger { - return &logfmtLogger{w} -} - -func (l logfmtLogger) Log(keyvals ...interface{}) error { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return err - } - - // The Logger interface requires implementations to be safe for concurrent - // use by multiple goroutines. For this implementation that means making - // only one call to l.w.Write() for each call to Log. - if _, err := l.w.Write(enc.buf.Bytes()); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go deleted file mode 100644 index 1047d626c4..0000000000 --- a/vendor/github.com/go-kit/kit/log/nop_logger.go +++ /dev/null @@ -1,8 +0,0 @@ -package log - -type nopLogger struct{} - -// NewNopLogger returns a logger that doesn't do anything. -func NewNopLogger() Logger { return nopLogger{} } - -func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go deleted file mode 100644 index ff96b5dee5..0000000000 --- a/vendor/github.com/go-kit/kit/log/stdlib.go +++ /dev/null @@ -1,116 +0,0 @@ -package log - -import ( - "io" - "log" - "regexp" - "strings" -) - -// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's -// designed to be passed to a Go kit logger as the writer, for cases where -// it's necessary to redirect all Go kit log output to the stdlib logger. -// -// If you have any choice in the matter, you shouldn't use this. Prefer to -// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. -type StdlibWriter struct{} - -// Write implements io.Writer. -func (w StdlibWriter) Write(p []byte) (int, error) { - log.Print(strings.TrimSpace(string(p))) - return len(p), nil -} - -// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib -// logger's SetOutput. It will extract date/timestamps, filenames, and -// messages, and place them under relevant keys. -type StdlibAdapter struct { - Logger - timestampKey string - fileKey string - messageKey string -} - -// StdlibAdapterOption sets a parameter for the StdlibAdapter. -type StdlibAdapterOption func(*StdlibAdapter) - -// TimestampKey sets the key for the timestamp field. By default, it's "ts". -func TimestampKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.timestampKey = key } -} - -// FileKey sets the key for the file and line field. By default, it's "caller". -func FileKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.fileKey = key } -} - -// MessageKey sets the key for the actual log message. By default, it's "msg". -func MessageKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.messageKey = key } -} - -// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed -// logger. It's designed to be passed to log.SetOutput. -func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { - a := StdlibAdapter{ - Logger: logger, - timestampKey: "ts", - fileKey: "caller", - messageKey: "msg", - } - for _, option := range options { - option(&a) - } - return a -} - -func (a StdlibAdapter) Write(p []byte) (int, error) { - result := subexps(p) - keyvals := []interface{}{} - var timestamp string - if date, ok := result["date"]; ok && date != "" { - timestamp = date - } - if time, ok := result["time"]; ok && time != "" { - if timestamp != "" { - timestamp += " " - } - timestamp += time - } - if timestamp != "" { - keyvals = append(keyvals, a.timestampKey, timestamp) - } - if file, ok := result["file"]; ok && file != "" { - keyvals = append(keyvals, a.fileKey, file) - } - if msg, ok := result["msg"]; ok { - keyvals = append(keyvals, a.messageKey, msg) - } - if err := a.Logger.Log(keyvals...); err != nil { - return 0, err - } - return len(p), nil -} - -const ( - logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` - logRegexpTime = `(?P