diff --git a/.chloggen/codeboten_make-otel-default.yaml b/.chloggen/codeboten_make-otel-default.yaml new file mode 100755 index 00000000000..a0bd1505c3e --- /dev/null +++ b/.chloggen/codeboten_make-otel-default.yaml @@ -0,0 +1,29 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: service + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Enable `telemetry.useOtelForInternalMetrics` by updating the flag to beta" + +# One or more tracking issues or pull requests related to the change +issues: [7454] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + The metrics generated should be consistent with the metrics generated + previously with OpenCensus. Users can disable the behaviour + by setting `--feature-gates -telemetry.useOtelForInternalMetrics` at + collector start. + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] \ No newline at end of file diff --git a/.chloggen/remove_noisy_log.yaml b/.chloggen/codeboten_semconv1.22.yaml similarity index 88% rename from .chloggen/remove_noisy_log.yaml rename to .chloggen/codeboten_semconv1.22.yaml index f9e3e1beb02..91ff410cff3 100755 --- a/.chloggen/remove_noisy_log.yaml +++ b/.chloggen/codeboten_semconv1.22.yaml @@ -1,16 +1,16 @@ # Use this changelog template to create an entry for release notes. # One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: bug_fix +change_type: enhancement # The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) -component: exporterhelper +component: semconv # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Remove noisy log +note: Generated Semantic conventions 1.22.0. # One or more tracking issues or pull requests related to the change -issues: [9017] +issues: [8686] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. @@ -22,4 +22,4 @@ subtext: # Include 'user' if the change is relevant to end users. # Include 'api' if there is a change to a library API. # Default: '[user]' -change_logs: [] \ No newline at end of file +change_logs: [] diff --git a/.chloggen/codeboten_enable-otel-by-default.yaml b/.chloggen/fix-running-as-service-detection.yaml old mode 100755 new mode 100644 similarity index 77% rename from .chloggen/codeboten_enable-otel-by-default.yaml rename to .chloggen/fix-running-as-service-detection.yaml index 08cb36bfe8e..d5f3909e6c2 --- a/.chloggen/codeboten_enable-otel-by-default.yaml +++ b/.chloggen/fix-running-as-service-detection.yaml @@ -1,25 +1,25 @@ # Use this changelog template to create an entry for release notes. # One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: enhancement +change_type: bug_fix # The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) -component: service +component: cmd/otelcorecol # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: "add resource attributes as labels to otel metrics to ensures backwards compatibility with OpenCensus metrics." +note: Fix the code detecting if the collector is running as a service on Windows. # One or more tracking issues or pull requests related to the change -issues: [9029] +issues: [7350] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. # Use pipe (|) for multiline entries. -subtext: +subtext: Removed the `NO_WINDOWS_SERVICE` environment variable given it is not needed anymore. # Optional: The change log or logs in which this entry should be included. # e.g. '[user]' or '[user, api]' # Include 'user' if the change is relevant to end users. # Include 'api' if there is a change to a library API. # Default: '[user]' -change_logs: [] \ No newline at end of file +change_logs: [user] \ No newline at end of file diff --git a/.chloggen/automated-status-on-start.yaml b/.chloggen/memory-limiter-update-config-validation-errors.yaml similarity index 77% rename from .chloggen/automated-status-on-start.yaml rename to .chloggen/memory-limiter-update-config-validation-errors.yaml index 8eb1df926b6..eb7730e8da6 100755 --- a/.chloggen/automated-status-on-start.yaml +++ b/.chloggen/memory-limiter-update-config-validation-errors.yaml @@ -4,22 +4,24 @@ change_type: enhancement # The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) -component: statusreporting +component: processor/memory_limiter # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Automates status reporting upon the completion of component.Start(). +note: Update config validation errors # One or more tracking issues or pull requests related to the change -issues: [7682] +issues: [9059] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. # Use pipe (|) for multiline entries. -subtext: +subtext: | + - Fix names of the config fields that are validated in the error messages + - Move the validation from start to the initialization phrase # Optional: The change log or logs in which this entry should be included. # e.g. '[user]' or '[user, api]' # Include 'user' if the change is relevant to end users. # Include 'api' if there is a change to a library API. # Default: '[user]' -change_logs: [] \ No newline at end of file +change_logs: [user] \ No newline at end of file diff --git a/.github/workflows/api-compatibility.yml b/.github/workflows/api-compatibility.yml index 1872911b271..2a38f4ccd99 100644 --- a/.github/workflows/api-compatibility.yml +++ b/.github/workflows/api-compatibility.yml @@ -32,9 +32,9 @@ jobs: path: ${{ github.head_ref }} - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 # Generate apidiff states of Main - name: Generate-States diff --git a/.github/workflows/build-and-test-windows.yaml b/.github/workflows/build-and-test-windows.yaml index b78e5e25018..732a026ebba 100644 --- a/.github/workflows/build-and-test-windows.yaml +++ b/.github/workflows/build-and-test-windows.yaml @@ -17,9 +17,9 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 cache: false - name: Cache Go uses: actions/cache@v3 diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index a9ee407ea3f..2400aa12b3c 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -17,9 +17,9 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 cache: false - name: Cache Go id: go-cache @@ -40,9 +40,9 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 cache: false - name: Cache Go id: go-cache @@ -64,9 +64,9 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 cache: false - name: Cache Go id: go-cache @@ -89,9 +89,9 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 cache: false - name: Cache Go id: go-cache @@ -133,14 +133,14 @@ jobs: unittest-matrix: strategy: matrix: - go-version: ["~1.21.4", "~1.20.11"] # 1.20 needs quotes otherwise it's interpreted as 1.2 + go-version: ["~1.21.5", "~1.20.12"] # 1.20 needs quotes otherwise it's interpreted as 1.2 runs-on: ubuntu-latest needs: [setup-environment] steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} cache: false @@ -183,7 +183,7 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ~1.20.6 cache: false @@ -251,9 +251,9 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 cache: false - name: Cache Go id: go-cache diff --git a/.github/workflows/builder-integration-test.yaml b/.github/workflows/builder-integration-test.yaml index 981897de5c6..0dd446c4aa6 100644 --- a/.github/workflows/builder-integration-test.yaml +++ b/.github/workflows/builder-integration-test.yaml @@ -28,8 +28,8 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 - name: Test run: make builder-integration-test diff --git a/.github/workflows/builder-release.yaml b/.github/workflows/builder-release.yaml index 42a8df6df47..5fb341a6e9c 100644 --- a/.github/workflows/builder-release.yaml +++ b/.github/workflows/builder-release.yaml @@ -14,9 +14,9 @@ jobs: with: fetch-depth: 0 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 - name: Run GoReleaser uses: goreleaser/goreleaser-action@v5 with: diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 9e9873b2364..71015ff5687 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -27,9 +27,9 @@ jobs: with: fetch-depth: 0 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 - name: Cache Go id: go-cache uses: actions/cache@v3 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 75b68eb7559..fe864f6a118 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -17,9 +17,9 @@ jobs: uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/contrib-tests.yml b/.github/workflows/contrib-tests.yml index 7003881e264..358a339af6f 100644 --- a/.github/workflows/contrib-tests.yml +++ b/.github/workflows/contrib-tests.yml @@ -21,9 +21,9 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 cache: false - name: Run Contrib Tests run: | diff --git a/.github/workflows/generate-semantic-conventions-pr.yaml b/.github/workflows/generate-semantic-conventions-pr.yaml new file mode 100644 index 00000000000..7543a044d4d --- /dev/null +++ b/.github/workflows/generate-semantic-conventions-pr.yaml @@ -0,0 +1,107 @@ +name: Generate Semantic Conventions PR + +on: + schedule: + # Daily at 01:30 (UTC) + - cron: '30 1 * * *' + workflow_dispatch: + +jobs: + check-versions: + runs-on: ubuntu-latest + outputs: + latest-version: ${{ steps.check-versions.outputs.latest-version }} + already-added: ${{ steps.check-versions.outputs.already-added }} + already-opened: ${{ steps.check-versions.outputs.already-opened }} + steps: + - uses: actions/checkout@v4 + + - id: check-versions + name: Check versions + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # version in this repo are prefixed with v + latest_version=v$(gh release view \ + --repo open-telemetry/semantic-conventions \ + --json tagName \ + --jq .tagName \ + | sed 's/^v//') + + rc=$(find semconv -name $latest_version | grep .) + if $rc == 0; then + already_added=true + fi + + matches=$(gh pr list \ + --author opentelemetrybot \ + --state open \ + --search "in:title \"Add semantic conventions version $latest_version\"") + if [ ! -z "$matches" ] + then + already_opened=true + fi + + echo "latest-version=$latest_version" >> $GITHUB_OUTPUT + echo "already-added=$already_added" >> $GITHUB_OUTPUT + echo "already-opened=$already_opened" >> $GITHUB_OUTPUT + + update-semantic-conventions: + runs-on: ubuntu-latest + if: | + needs.check-versions.outputs.already-added != 'true' && + needs.check-versions.outputs.already-opened != 'true' + needs: + - check-versions + steps: + - uses: actions/checkout@v4 + - name: Checkout semantic-convention + uses: actions/checkout@v4 + with: + repository: open-telemetry/semantic-convention + path: tmp-semantic-conventions + + - name: Update version + env: + VERSION: ${{ needs.check-versions.outputs.latest-version }} + run: | + make gensemconv SPECPATH=./tmp-semantic-conventions SPECTAG=$VERSION + git diff + + - name: Use CLA approved github bot + run: | + git config user.name opentelemetrybot + git config user.email 107717825+opentelemetrybot@users.noreply.github.com + + - name: Create pull request against main + env: + VERSION: ${{ needs.check-versions.outputs.latest-version }} + # not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows + GH_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }} + run: | + message="Add semantic conventions version $VERSION" + body="Add semantic conventions version \`$VERSION\`." + branch="opentelemetrybot/add-semantic-conventions-${VERSION}" + + git checkout -b $branch + git add semconv/ + git commit -m "$message" + git push --set-upstream origin $branch + url=$(gh pr create --title "$message" \ + --body "$body" \ + --base main) + + pull_request_number=${url//*\//} + + # see the template for change log entry file at blob/main/.chloggen/TEMPLATE.yaml + cat > .chloggen/semconv-$VERSION.yaml << EOF + change_type: enhancement + component: semconv + note: Add semantic conventions version $VERSION + issues: [ $pull_request_number ] + EOF + + git add .chloggen/semconv-$VERSION.yaml + + git commit -m "Add change log entry" + git push diff --git a/.github/workflows/perf.yml b/.github/workflows/perf.yml index c56e2e6f7ef..10055b629d4 100644 --- a/.github/workflows/perf.yml +++ b/.github/workflows/perf.yml @@ -11,9 +11,9 @@ jobs: - uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 - name: Run benchmark run: make gobenchmark diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml index b0e7f5bdb06..b51b1384d77 100644 --- a/.github/workflows/prepare-release.yml +++ b/.github/workflows/prepare-release.yml @@ -51,9 +51,9 @@ jobs: REPO: open-telemetry/opentelemetry-collector-contrib run: ./.github/workflows/scripts/release-check-build-status.sh - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 # Prepare Core for release. # - Update CHANGELOG.md file, this is done via chloggen # - Run make prepare-release PREVIOUS_VERSION=1.0.0 RELEASE_CANDIDATE=1.1.0 MODSET=stable diff --git a/.github/workflows/stale-pr.yaml b/.github/workflows/stale-pr.yaml index 355edc64da4..c361451b17d 100644 --- a/.github/workflows/stale-pr.yaml +++ b/.github/workflows/stale-pr.yaml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: 'This PR was marked stale due to lack of activity. It will be closed in 14 days.' diff --git a/.github/workflows/tidy-dependencies.yml b/.github/workflows/tidy-dependencies.yml index df374853a40..525880293d5 100644 --- a/.github/workflows/tidy-dependencies.yml +++ b/.github/workflows/tidy-dependencies.yml @@ -14,9 +14,9 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ github.head_ref }} - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: ~1.20.11 + go-version: ~1.20.12 cache: false - name: Cache Go id: go-cache diff --git a/CHANGELOG-API.md b/CHANGELOG-API.md index 4ef6591a436..64b6152d4dc 100644 --- a/CHANGELOG-API.md +++ b/CHANGELOG-API.md @@ -7,6 +7,8 @@ If you are looking for user-facing changes, check out [CHANGELOG.md](./CHANGELOG +## v0.91.0 + ## v1.0.0/v0.90.0 ### 🛑 Breaking changes 🛑 diff --git a/CHANGELOG.md b/CHANGELOG.md index ad44fca0b99..e95af0b586a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,25 @@ If you are looking for developer-facing changes, check out [CHANGELOG-API.md](./ +## v0.91.0 + +### 💡 Enhancements 💡 + +- `statusreporting`: Automates status reporting upon the completion of component.Start(). (#7682) +- `service`: add resource attributes as labels to otel metrics to ensures backwards compatibility with OpenCensus metrics. (#9029) +- `semconv`: Generated Semantic conventions 1.21. (#9056) +- `config/confighttp`: Exposes http/2 transport settings to enable health check and workaround golang http/2 issue https://github.com/golang/go/issues/59690 (#9022) +- `cmd/builder`: running builder version on binaries installed with `go install` will output the version specified at the suffix. (#8770) + +### 🧰 Bug fixes 🧰 + +- `exporterhelper`: fix missed metric aggregations (#9048) + This ensures that context cancellation in the exporter doesn't interfere with metric aggregation. The OTel + SDK currently returns if there's an error in the context used in `Add`. This means that if there's a + cancelled context in an export, the metrics are now recorded. + +- `service`: Fix bug where MutatesData would not correctly propagate through connectors. (#9053) + ## v0.90.1 ### 🧰 Bug fixes 🧰 diff --git a/Makefile b/Makefile index 426af0d5224..014988194d7 100644 --- a/Makefile +++ b/Makefile @@ -227,13 +227,14 @@ genpdata: $(MAKE) fmt # Generate semantic convention constants. Requires a clone of the opentelemetry-specification repo -gensemconv: +gensemconv: $(SEMCONVGEN) $(SEMCONVKIT) @[ "${SPECPATH}" ] || ( echo ">> env var SPECPATH is not set"; exit 1 ) @[ "${SPECTAG}" ] || ( echo ">> env var SPECTAG is not set"; exit 1 ) @echo "Generating semantic convention constants from specification version ${SPECTAG} at ${SPECPATH}" - semconvgen -o semconv/${SPECTAG} -t semconv/template.j2 -s ${SPECTAG} -i ${SPECPATH}/semantic_conventions/. --only=resource -p conventionType=resource -f generated_resource.go - semconvgen -o semconv/${SPECTAG} -t semconv/template.j2 -s ${SPECTAG} -i ${SPECPATH}/semantic_conventions/. --only=event -p conventionType=event -f generated_event.go - semconvgen -o semconv/${SPECTAG} -t semconv/template.j2 -s ${SPECTAG} -i ${SPECPATH}/semantic_conventions/. --only=span -p conventionType=trace -f generated_trace.go + $(SEMCONVGEN) -o semconv/${SPECTAG} -t semconv/template.j2 -s ${SPECTAG} -i ${SPECPATH}/model/. --only=resource -p conventionType=resource -f generated_resource.go + $(SEMCONVGEN) -o semconv/${SPECTAG} -t semconv/template.j2 -s ${SPECTAG} -i ${SPECPATH}/model/. --only=event -p conventionType=event -f generated_event.go + $(SEMCONVGEN) -o semconv/${SPECTAG} -t semconv/template.j2 -s ${SPECTAG} -i ${SPECPATH}/model/. --only=span -p conventionType=trace -f generated_trace.go + $(SEMCONVKIT) -output "semconv/$(SPECTAG)" -tag "$(SPECTAG)" # Checks that the HEAD of the contrib repo checked out in CONTRIB_PATH compiles # against the current version of this repo. @@ -382,12 +383,8 @@ push-tags: $(MULTIMOD) done; .PHONY: check-changes -check-changes: $(YQ) - # NOTE: ! inverses the return code of multimod diff. This is - # because prepare-release expects a 0 if there are diffs and - # non-0 if there are no diffs, which is the inverse of most - # diff tools - ! $(MULTIMOD) diff -p $(PREVIOUS_VERSION) -m $(MODSET) +check-changes: $(MULTIMOD) + $(MULTIMOD) diff -p $(PREVIOUS_VERSION) -m $(MODSET) .PHONY: prepare-release prepare-release: @@ -412,8 +409,6 @@ else endif # ensure a clean branch git diff -s --exit-code || (echo "local repository not clean"; exit 1) - # check if any modules have changed since the previous release - $(MAKE) check-changes # update files with new version sed -i.bak 's/$(PREVIOUS_VERSION)/$(RELEASE_CANDIDATE)/g' versions.yaml sed -i.bak 's/$(PREVIOUS_VERSION)/$(RELEASE_CANDIDATE)/g' ./cmd/builder/internal/builder/config.go diff --git a/Makefile.Common b/Makefile.Common index 36e0b527bf1..23c41826d2a 100644 --- a/Makefile.Common +++ b/Makefile.Common @@ -24,17 +24,17 @@ CHLOGGEN := $(TOOLS_BIN_DIR)/chloggen CROSSLINK := $(TOOLS_BIN_DIR)/crosslink ENVSUBST := $(TOOLS_BIN_DIR)/envsubst GOIMPORTS := $(TOOLS_BIN_DIR)/goimports -GOJSONSCHEMA := $(TOOLS_BIN_DIR)/gojsonschema GOVULNCHECK := $(TOOLS_BIN_DIR)/govulncheck LINT := $(TOOLS_BIN_DIR)/golangci-lint IMPI := $(TOOLS_BIN_DIR)/impi MISSPELL := $(TOOLS_BIN_DIR)/misspell MULTIMOD := $(TOOLS_BIN_DIR)/multimod PORTO := $(TOOLS_BIN_DIR)/porto -YQ := $(TOOLS_BIN_DIR)/yq +SEMCONVGEN := $(TOOLS_BIN_DIR)/semconvgen +SEMCONVKIT := $(TOOLS_BIN_DIR)/semconvkit .PHONY: install-tools -install-tools: $(TOOLS_BIN_NAMES) $(YQ) +install-tools: $(TOOLS_BIN_NAMES) $(TOOLS_BIN_DIR): mkdir -p $@ @@ -42,9 +42,6 @@ $(TOOLS_BIN_DIR): $(TOOLS_BIN_NAMES): $(TOOLS_BIN_DIR) $(TOOLS_MOD_DIR)/go.mod cd $(TOOLS_MOD_DIR) && $(GOCMD) build -o $@ -trimpath $(filter %/$(notdir $@),$(TOOLS_PKG_NAMES)) -$(YQ): $(TOOLS_BIN_DIR) $(TOOLS_MOD_DIR)/go.mod - cd $(TOOLS_MOD_DIR) && $(GOCMD) build -o $@ -trimpath github.com/mikefarah/yq/v4 - .PHONY: test test: $(GOTEST) $(GOTEST_OPT) ./... diff --git a/VERSIONING.md b/VERSIONING.md index 08e1b9ca547..e0dcc6781b5 100644 --- a/VERSIONING.md +++ b/VERSIONING.md @@ -5,7 +5,55 @@ is designed so that the following goal can be achieved: **Users are provided a codebase of value that is stable and secure.** -## Policy +## Public API expectations + +The following public API expectations apply to all modules in opentelemetry-collector and opentelemetry-collector-contrib. +As a general rule, stability guarantees of modules versioned as `v1` or higher are aligned with [Go 1 compatibility promise](https://go.dev/doc/go1compat). + +### General Go API considerations + +OpenTelemetry authors reserve the right to introduce API changes breaking compatibility between minor versions in the following scenarios: +* **Struct literals.** It may be necessary to add new fields to exported structs in the API. Code that uses unkeyed + struct literals (such as pkg.T{3, "x"}) to create values of these types would fail to compile after such a change. + However, code that uses keyed literals (pkg.T{A: 3, B: "x"}) will continue to compile. We therefore recommend + using OpenTelemetry collector structs with the keyed literals only. +* **Methods.** As with struct fields, it may be necessary to add methods to types. Under some circumstances, + such as when the type is embedded in a struct along with another type, the addition of the new method may + break the struct by creating a conflict with an existing method of the other embedded type. We cannot protect + against this rare case and do not guarantee compatibility in such scenarios. +* **Dot imports.** If a program imports a package using `import .`, additional names defined in the imported package + in future releases may conflict with other names defined in the program. We do not recommend the use of + `import .` with OpenTelemetry Collector modules. + +Unless otherwise specified in the documentation, the following may change in any way between minor versions: +* **String representation**. The `String` method of any struct is intended to be human-readable and may change its output + in any way. +* **Go version compatibility**. Removing support for an unsupported Go version is not considered a breaking change. +* **OS version compatibility**. Removing support for an unsupported OS version is not considered a breaking change. Upgrading or downgrading OS version support per the [platform support](docs/platform-support.md) document is not considered a breaking change. +* **Dependency updates**. Updating dependencies is not considered a breaking change except when their types are part of the +public API or the update may change the behavior of applications in an incompatible way. + +### Configuration structures + +Configuration structures are part of the public API and backwards +compatibility should be maintained through any changes made to configuration structures. + +Unless otherwise specified in the documentation, the following may change in any way between minor versions: +* **Adding new fields to configuration structures**. Because configuration structures are typically instantiated through +unmarshalling a serialized representation of the structure, and not through structure literals, additive changes to +the set of exported fields in a configuration structure are not considered to break backward compatibility. +* **Relaxing validation rules**. An invalid configuration struct as defined by its `Validate` method return value +may become valid after a change to the validation rules. + +The following are explicitly considered to be breaking changes: +* **Modifying struct tags related to serialization**. Struct tags used to configure serialization mechanisms (`yaml:`, +`mapstructure:`, etc) are part of the structure definition and must maintain compatibility to the same extent as the +structure. +* **Making validation rules more strict**. A valid configuration struct as defined by its `Validate` method return value +must continue to be valid after a change to the validation rules, except when the configuration struct would cause an error +on its intended usage (e.g. when calling a method or when passed to any method or function in any module under opentelemetry-collector). + +## Versioning and module schema * Versioning of this project will be idiomatic of a Go project using [Go modules](https://golang.org/ref/mod#versions). @@ -32,7 +80,7 @@ is designed so that the following goal can be achieved: * A single module should exist, rooted at the top level of this repository, that contains all packages provided for use outside this repository. * Additional modules may be created in this repository to provide for - isolation of build-time tools or other commands. Such modules should be + isolation of build-time tools, other commands or independent libraries. Such modules should be versioned in sync with the `go.opentelemetry.io/collector` module. * Experimental modules still under active development will be versioned with a major version of `v0` to imply the stability guarantee defined by @@ -40,16 +88,6 @@ is designed so that the following goal can be achieved: > Major version zero (0.y.z) is for initial development. Anything MAY > change at any time. The public API SHOULD NOT be considered stable. - - * Configuration structures should be considered part of the public API and backward - compatibility maintained through any changes made to configuration structures. - * Because configuration structures are typically instantiated through unmarshalling - a serialized representation of the structure, and not through structure literals, - additive changes to the set of exported fields in a configuration structure are - not considered to break backward compatibility. - * Struct tags used to configure serialization mechanisms (`yaml:`, `mapstructure:`, etc) - are to be considered part of the structure definition and must maintain compatibility - to the same extent as the structure. * Versioning of the associated [contrib repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) of this project will be idiomatic of a Go project using [Go @@ -72,14 +110,8 @@ is designed so that the following goal can be achieved: whenever you are using the module name). * If a module is version `v0` or `v1`, do not include the major version in either the module path or the import path. - * Configuration structures should be considered part of the public API and backward - compatibility maintained through any changes made to configuration structures. - * Because configuration structures are typically instantiated through unmarshalling - a serialized representation of the structure, and not through structure literals, - additive changes to the set of exported fields in a configuration structure are - not considered to break backward compatibility. - * Modules will be used to encapsulate receivers, processor, exporters, - extensions, and any other independent sets of related components. + * Modules will be used to encapsulate receivers, processors, exporters, + extensions, connectors and any other independent sets of related components. * Experimental modules still under active development will be versioned with a major version of `v0` to imply the stability guarantee defined by [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). @@ -102,17 +134,3 @@ is designed so that the following goal can be achieved: * Contrib modules will be kept up to date with this project's releases. * GitHub releases will be made for all releases. * Go modules will be made available at Go package mirrors. -* Stability guaranties of modules versioned as `v1` or higher are aligned with [Go 1 compatibility - promise](https://go.dev/doc/go1compat). OpenTelemetry authors reserve the right to introduce API changes breaking - compatibility between minor versions in the following scenarios: - * **Struct literals.** It may be necessary to add new fields to exported structs in the API. Code that uses unkeyed - struct literals (such as pkg.T{3, "x"}) to create values of these types would fail to compile after such a change. - However, code that uses keyed literals (pkg.T{A: 3, B: "x"}) will continue to compile. We therefore recommend - using OpenTelemetry collector structs with the keyed literals only. - * **Methods.** As with struct fields, it may be necessary to add methods to types. Under some circumstances, - such as when the type is embedded in a struct along with another type, the addition of the new method may - break the struct by creating a conflict with an existing method of the other embedded type. We cannot protect - against this rare case and do not guarantee compatibility in such scenarios. - * **Dot imports.** If a program imports a package using `import .`, additional names defined in the imported package - in future releases may conflict with other names defined in the program. We do not recommend the use of - `import .` with OpenTelemetry Collector modules. diff --git a/cmd/builder/internal/builder/config.go b/cmd/builder/internal/builder/config.go index 9020170dcbb..252c6b46207 100644 --- a/cmd/builder/internal/builder/config.go +++ b/cmd/builder/internal/builder/config.go @@ -16,7 +16,7 @@ import ( "go.uber.org/zap" ) -const defaultOtelColVersion = "0.90.1" +const defaultOtelColVersion = "0.91.0" // ErrInvalidGoMod indicates an invalid gomod var ErrInvalidGoMod = errors.New("invalid gomod specification for module") diff --git a/cmd/builder/internal/builder/templates/main_windows.go.tmpl b/cmd/builder/internal/builder/templates/main_windows.go.tmpl index ba327180578..3e001b250bf 100644 --- a/cmd/builder/internal/builder/templates/main_windows.go.tmpl +++ b/cmd/builder/internal/builder/templates/main_windows.go.tmpl @@ -6,41 +6,23 @@ package main import ( + "errors" "fmt" - "os" + "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" "go.opentelemetry.io/collector/otelcol" ) func run(params otelcol.CollectorSettings) error { - if useInteractiveMode, err := checkUseInteractiveMode(); err != nil { - return err - } else if useInteractiveMode { - return runInteractive(params) - } else { - return runService(params) - } -} - -func checkUseInteractiveMode() (bool, error) { - // If environment variable NO_WINDOWS_SERVICE is set with any value other - // than 0, use interactive mode instead of running as a service. This should - // be set in case running as a service is not possible or desired even - // though the current session is not detected to be interactive - if value, present := os.LookupEnv("NO_WINDOWS_SERVICE"); present && value != "0" { - return true, nil - } - - isInteractiveSession, err := svc.IsAnInteractiveSession() - if err != nil { - return false, fmt.Errorf("failed to determine if we are running in an interactive session: %w", err) - } - return isInteractiveSession, nil -} - -func runService(params otelcol.CollectorSettings) error { - // do not need to supply service name when startup is invoked through Service Control Manager directly + // No need to supply service name when startup is invoked through + // the Service Control Manager directly. if err := svc.Run("", otelcol.NewSvcHandler(params)); err != nil { + if errors.Is(err, windows.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT) { + // Per https://learn.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-startservicectrldispatchera#return-value + // this means that the process is not running as a service, so run interactively. + return runInteractive(params) + } + return fmt.Errorf("failed to start collector server: %w", err) } diff --git a/cmd/builder/internal/config/default.yaml b/cmd/builder/internal/config/default.yaml index cf8f9114ef0..85682ae9061 100644 --- a/cmd/builder/internal/config/default.yaml +++ b/cmd/builder/internal/config/default.yaml @@ -2,22 +2,22 @@ dist: module: go.opentelemetry.io/collector/cmd/otelcorecol name: otelcorecol description: Local OpenTelemetry Collector binary, testing only. - version: 0.90.0-dev - otelcol_version: 0.90.0 + version: 0.91.0-dev + otelcol_version: 0.91.0 receivers: - - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.90.0 + - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.91.0 exporters: - - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.90.0 - - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.90.0 - - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.90.0 - - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.90.0 + - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.91.0 + - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.91.0 + - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.91.0 + - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.91.0 extensions: - - gomod: go.opentelemetry.io/collector/extension/ballastextension v0.90.0 - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.90.0 + - gomod: go.opentelemetry.io/collector/extension/ballastextension v0.91.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.91.0 processors: - - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.90.0 - - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.90.0 + - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.91.0 + - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.91.0 connectors: - - gomod: go.opentelemetry.io/collector/connector/forwardconnector v0.90.0 + - gomod: go.opentelemetry.io/collector/connector/forwardconnector v0.91.0 diff --git a/cmd/builder/internal/version.go b/cmd/builder/internal/version.go index be1fe805b2b..7ffabed75fc 100644 --- a/cmd/builder/internal/version.go +++ b/cmd/builder/internal/version.go @@ -5,22 +5,42 @@ package internal // import "go.opentelemetry.io/collector/cmd/builder/internal" import ( "fmt" + "runtime/debug" "github.com/spf13/cobra" ) var ( - version = "dev" + version = "" date = "unknown" ) +// binVersion returns the version of the binary. +// If the version is not set, it attempts to read the build information. +// Returns an error if the build information cannot be read. +func binVersion() (string, error) { + if version != "" { + return version, nil + } + info, ok := debug.ReadBuildInfo() + if !ok { + return "", fmt.Errorf("failed to read build info") + } + return info.Main.Version, nil +} + func versionCommand() *cobra.Command { return &cobra.Command{ Use: "version", Short: "Version of ocb", Long: "Prints the version of the ocb binary", - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { + version, err := binVersion() + if err != nil { + return err + } cmd.Println(fmt.Sprintf("%s version %s", cmd.Parent().Name(), version)) + return nil }, } } diff --git a/cmd/builder/test/core.builder.yaml b/cmd/builder/test/core.builder.yaml index 356f10a73b1..4f792b2ee82 100644 --- a/cmd/builder/test/core.builder.yaml +++ b/cmd/builder/test/core.builder.yaml @@ -1,20 +1,20 @@ dist: module: go.opentelemetry.io/collector/builder/test/core - otelcol_version: 0.90.1 + otelcol_version: 0.91.0 extensions: - import: go.opentelemetry.io/collector/extension/zpagesextension - gomod: go.opentelemetry.io/collector v0.90.1 + gomod: go.opentelemetry.io/collector v0.91.0 path: ${WORKSPACE_DIR} receivers: - import: go.opentelemetry.io/collector/receiver/otlpreceiver - gomod: go.opentelemetry.io/collector v0.90.1 + gomod: go.opentelemetry.io/collector v0.91.0 path: ${WORKSPACE_DIR} exporters: - import: go.opentelemetry.io/collector/exporter/debugexporter - gomod: go.opentelemetry.io/collector v0.90.1 + gomod: go.opentelemetry.io/collector v0.91.0 path: ${WORKSPACE_DIR} replaces: diff --git a/cmd/otelcorecol/builder-config.yaml b/cmd/otelcorecol/builder-config.yaml index a0aff6c623a..60414f8d0d7 100644 --- a/cmd/otelcorecol/builder-config.yaml +++ b/cmd/otelcorecol/builder-config.yaml @@ -2,24 +2,24 @@ dist: module: go.opentelemetry.io/collector/cmd/otelcorecol name: otelcorecol description: Local OpenTelemetry Collector binary, testing only. - version: 0.90.1-dev - otelcol_version: 0.90.1 + version: 0.91.0-dev + otelcol_version: 0.91.0 receivers: - - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.90.1 + - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.91.0 exporters: - - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.90.1 - - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.90.1 - - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.90.1 - - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.90.1 + - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.91.0 + - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.91.0 + - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.91.0 + - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.91.0 extensions: - - gomod: go.opentelemetry.io/collector/extension/ballastextension v0.90.1 - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.90.1 + - gomod: go.opentelemetry.io/collector/extension/ballastextension v0.91.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.91.0 processors: - - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.90.1 - - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.90.1 + - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.91.0 + - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.91.0 connectors: - - gomod: go.opentelemetry.io/collector/connector/forwardconnector v0.90.1 + - gomod: go.opentelemetry.io/collector/connector/forwardconnector v0.91.0 replaces: - go.opentelemetry.io/collector => ../../ diff --git a/cmd/otelcorecol/go.mod b/cmd/otelcorecol/go.mod index 3e4f848e9f9..56d5749dc8d 100644 --- a/cmd/otelcorecol/go.mod +++ b/cmd/otelcorecol/go.mod @@ -6,23 +6,23 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/connector v0.90.0 - go.opentelemetry.io/collector/connector/forwardconnector v0.90.1 - go.opentelemetry.io/collector/exporter v0.90.0 - go.opentelemetry.io/collector/exporter/debugexporter v0.90.1 - go.opentelemetry.io/collector/exporter/loggingexporter v0.90.1 - go.opentelemetry.io/collector/exporter/otlpexporter v0.90.1 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.90.1 - go.opentelemetry.io/collector/extension v0.90.0 - go.opentelemetry.io/collector/extension/ballastextension v0.90.1 - go.opentelemetry.io/collector/extension/zpagesextension v0.90.1 - go.opentelemetry.io/collector/otelcol v0.90.1 - go.opentelemetry.io/collector/processor v0.90.0 - go.opentelemetry.io/collector/processor/batchprocessor v0.90.1 - go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.90.1 - go.opentelemetry.io/collector/receiver v0.90.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.90.1 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/connector v0.91.0 + go.opentelemetry.io/collector/connector/forwardconnector v0.91.0 + go.opentelemetry.io/collector/exporter v0.91.0 + go.opentelemetry.io/collector/exporter/debugexporter v0.91.0 + go.opentelemetry.io/collector/exporter/loggingexporter v0.91.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.91.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.91.0 + go.opentelemetry.io/collector/extension v0.91.0 + go.opentelemetry.io/collector/extension/ballastextension v0.91.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.91.0 + go.opentelemetry.io/collector/otelcol v0.91.0 + go.opentelemetry.io/collector/processor v0.91.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.91.0 + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.91.0 + go.opentelemetry.io/collector/receiver v0.91.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.91.0 golang.org/x/sys v0.15.0 ) @@ -49,7 +49,7 @@ require ( github.com/hashicorp/go-version v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.3 // indirect + github.com/klauspost/compress v1.17.4 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.0.1 // indirect @@ -69,7 +69,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/rs/cors v1.10.1 // indirect - github.com/shirou/gopsutil/v3 v3.23.10 // indirect + github.com/shirou/gopsutil/v3 v3.23.11 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -77,23 +77,23 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.90.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.90.0 // indirect - go.opentelemetry.io/collector/config/configcompression v0.90.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.90.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.90.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.90.0 // indirect - go.opentelemetry.io/collector/config/configopaque v0.90.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.90.0 // indirect - go.opentelemetry.io/collector/config/internal v0.90.0 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect - go.opentelemetry.io/collector/consumer v0.90.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.90.0 // indirect + go.opentelemetry.io/collector v0.91.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.91.0 // indirect + go.opentelemetry.io/collector/config/configcompression v0.91.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.91.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.91.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.91.0 // indirect + go.opentelemetry.io/collector/config/configopaque v0.91.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect + go.opentelemetry.io/collector/config/configtls v0.91.0 // indirect + go.opentelemetry.io/collector/config/internal v0.91.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect + go.opentelemetry.io/collector/consumer v0.91.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect - go.opentelemetry.io/collector/semconv v0.90.0 // indirect - go.opentelemetry.io/collector/service v0.90.0 // indirect + go.opentelemetry.io/collector/semconv v0.91.0 // indirect + go.opentelemetry.io/collector/service v0.91.0 // indirect go.opentelemetry.io/contrib/config v0.1.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect diff --git a/cmd/otelcorecol/go.sum b/cmd/otelcorecol/go.sum index 81799022399..5e76dc6950d 100644 --- a/cmd/otelcorecol/go.sum +++ b/cmd/otelcorecol/go.sum @@ -188,8 +188,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= @@ -271,8 +271,8 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v3 v3.23.10 h1:/N42opWlYzegYaVkWejXWJpbzKv2JDy3mrgGzKsh9hM= -github.com/shirou/gopsutil/v3 v3.23.10/go.mod h1:JIE26kpucQi+innVlAUnIEOSBhBUkirr5b44yr55+WE= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -498,7 +498,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/cmd/otelcorecol/main.go b/cmd/otelcorecol/main.go index 80e552effc5..76e2dbf1b9d 100644 --- a/cmd/otelcorecol/main.go +++ b/cmd/otelcorecol/main.go @@ -14,7 +14,7 @@ func main() { info := component.BuildInfo{ Command: "otelcorecol", Description: "Local OpenTelemetry Collector binary, testing only.", - Version: "0.90.1-dev", + Version: "0.91.0-dev", } if err := run(otelcol.CollectorSettings{BuildInfo: info, Factories: components}); err != nil { diff --git a/cmd/otelcorecol/main_windows.go b/cmd/otelcorecol/main_windows.go index ad96fd1e801..285aab81289 100644 --- a/cmd/otelcorecol/main_windows.go +++ b/cmd/otelcorecol/main_windows.go @@ -6,43 +6,25 @@ package main import ( + "errors" "fmt" - "os" + "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" "go.opentelemetry.io/collector/otelcol" ) func run(params otelcol.CollectorSettings) error { - if useInteractiveMode, err := checkUseInteractiveMode(); err != nil { - return err - } else if useInteractiveMode { - return runInteractive(params) - } else { - return runService(params) - } -} - -func checkUseInteractiveMode() (bool, error) { - // If environment variable NO_WINDOWS_SERVICE is set with any value other - // than 0, use interactive mode instead of running as a service. This should - // be set in case running as a service is not possible or desired even - // though the current session is not detected to be interactive - if value, present := os.LookupEnv("NO_WINDOWS_SERVICE"); present && value != "0" { - return true, nil - } - - isInteractiveSession, err := svc.IsAnInteractiveSession() - if err != nil { - return false, fmt.Errorf("failed to determine if we are running in an interactive session: %w", err) - } - return isInteractiveSession, nil -} - -func runService(params otelcol.CollectorSettings) error { - // do not need to supply service name when startup is invoked through Service Control Manager directly + // No need to supply service name when startup is invoked through + // the Service Control Manager directly. if err := svc.Run("", otelcol.NewSvcHandler(params)); err != nil { + if errors.Is(err, windows.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT) { + // Per https://learn.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-startservicectrldispatchera#return-value + // this means that the process is not running as a service, so run interactively. + return runInteractive(params) + } + return fmt.Errorf("failed to start collector server: %w", err) } diff --git a/component/go.mod b/component/go.mod index c58fe1f0e69..fdb6b4850c6 100644 --- a/component/go.mod +++ b/component/go.mod @@ -4,8 +4,8 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 diff --git a/config/configauth/go.mod b/config/configauth/go.mod index 30366c91dcc..f5e2fc296bb 100644 --- a/config/configauth/go.mod +++ b/config/configauth/go.mod @@ -4,9 +4,9 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/extension v0.90.0 - go.opentelemetry.io/collector/extension/auth v0.90.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/extension v0.91.0 + go.opentelemetry.io/collector/extension/auth v0.91.0 ) require ( @@ -21,8 +21,8 @@ require ( github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect diff --git a/config/configgrpc/go.mod b/config/configgrpc/go.mod index b268a2dc3d5..2655e2df495 100644 --- a/config/configgrpc/go.mod +++ b/config/configgrpc/go.mod @@ -5,15 +5,15 @@ go 1.20 require ( github.com/mostynb/go-grpc-compression v1.2.2 github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configauth v0.90.0 - go.opentelemetry.io/collector/config/configcompression v0.90.0 - go.opentelemetry.io/collector/config/confignet v0.90.0 - go.opentelemetry.io/collector/config/configopaque v0.90.0 - go.opentelemetry.io/collector/config/configtls v0.90.0 - go.opentelemetry.io/collector/config/internal v0.90.0 - go.opentelemetry.io/collector/extension/auth v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configauth v0.91.0 + go.opentelemetry.io/collector/config/configcompression v0.91.0 + go.opentelemetry.io/collector/config/confignet v0.91.0 + go.opentelemetry.io/collector/config/configopaque v0.91.0 + go.opentelemetry.io/collector/config/configtls v0.91.0 + go.opentelemetry.io/collector/config/internal v0.91.0 + go.opentelemetry.io/collector/extension/auth v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 go.opentelemetry.io/otel v1.21.0 @@ -55,9 +55,9 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect - go.opentelemetry.io/collector/extension v0.90.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect + go.opentelemetry.io/collector/extension v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect diff --git a/config/confighttp/README.md b/config/confighttp/README.md index a3794ceeed1..0cc0503f539 100644 --- a/config/confighttp/README.md +++ b/config/confighttp/README.md @@ -29,6 +29,8 @@ README](../configtls/README.md). - [`idle_conn_timeout`](https://golang.org/pkg/net/http/#Transport) - [`auth`](../configauth/README.md) - [`disable_keep_alives`](https://golang.org/pkg/net/http/#Transport) +- [`http2_read_idle_timeout`](https://pkg.go.dev/golang.org/x/net/http2#Transport) +- [`http2_ping_timeout`](https://pkg.go.dev/golang.org/x/net/http2#Transport) Example: diff --git a/config/confighttp/confighttp.go b/config/confighttp/confighttp.go index c83db25d4b6..e41a64cc124 100644 --- a/config/confighttp/confighttp.go +++ b/config/confighttp/confighttp.go @@ -6,6 +6,7 @@ package confighttp // import "go.opentelemetry.io/collector/config/confighttp" import ( "crypto/tls" "errors" + "fmt" "io" "net" "net/http" @@ -86,6 +87,16 @@ type HTTPClientSettings struct { // connection for every request. Before enabling this option please consider whether changes // to idle connection settings can achieve your goal. DisableKeepAlives bool `mapstructure:"disable_keep_alives"` + + // This is needed in case you run into + // https://github.com/golang/go/issues/59690 + // https://github.com/golang/go/issues/36026 + // HTTP2ReadIdleTimeout if the connection has been idle for the configured value send a ping frame for health check + // 0s means no health check will be performed. + HTTP2ReadIdleTimeout time.Duration `mapstructure:"http2_read_idle_timeout"` + // HTTP2PingTimeout if there's no response to the ping within the configured value, the connection will be closed. + // If not set or set to 0, it defaults to 15s. + HTTP2PingTimeout time.Duration `mapstructure:"http2_ping_timeout"` } // NewDefaultHTTPClientSettings returns HTTPClientSettings type object with @@ -147,6 +158,15 @@ func (hcs *HTTPClientSettings) ToClient(host component.Host, settings component. transport.DisableKeepAlives = hcs.DisableKeepAlives + if hcs.HTTP2ReadIdleTimeout > 0 { + transport2, transportErr := http2.ConfigureTransports(transport) + if transportErr != nil { + return nil, fmt.Errorf("failed to configure http2 transport: %w", transportErr) + } + transport2.ReadIdleTimeout = hcs.HTTP2ReadIdleTimeout + transport2.PingTimeout = hcs.HTTP2PingTimeout + } + clientTransport := (http.RoundTripper)(transport) // The Auth RoundTripper should always be the innermost to ensure that diff --git a/config/confighttp/confighttp_test.go b/config/confighttp/confighttp_test.go index 43a9b865010..3d3bc6d8bc7 100644 --- a/config/confighttp/confighttp_test.go +++ b/config/confighttp/confighttp_test.go @@ -53,6 +53,7 @@ func TestAllHTTPClientSettings(t *testing.T) { maxIdleConnsPerHost := 40 maxConnsPerHost := 45 idleConnTimeout := 30 * time.Second + http2PingTimeout := 5 * time.Second tests := []struct { name string settings HTTPClientSettings @@ -65,15 +66,17 @@ func TestAllHTTPClientSettings(t *testing.T) { TLSSetting: configtls.TLSClientSetting{ Insecure: false, }, - ReadBufferSize: 1024, - WriteBufferSize: 512, - MaxIdleConns: &maxIdleConns, - MaxIdleConnsPerHost: &maxIdleConnsPerHost, - MaxConnsPerHost: &maxConnsPerHost, - IdleConnTimeout: &idleConnTimeout, - CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { return next, nil }, - Compression: "", - DisableKeepAlives: true, + ReadBufferSize: 1024, + WriteBufferSize: 512, + MaxIdleConns: &maxIdleConns, + MaxIdleConnsPerHost: &maxIdleConnsPerHost, + MaxConnsPerHost: &maxConnsPerHost, + IdleConnTimeout: &idleConnTimeout, + CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { return next, nil }, + Compression: "", + DisableKeepAlives: true, + HTTP2ReadIdleTimeout: idleConnTimeout, + HTTP2PingTimeout: http2PingTimeout, }, shouldError: false, }, @@ -84,15 +87,17 @@ func TestAllHTTPClientSettings(t *testing.T) { TLSSetting: configtls.TLSClientSetting{ Insecure: false, }, - ReadBufferSize: 1024, - WriteBufferSize: 512, - MaxIdleConns: &maxIdleConns, - MaxIdleConnsPerHost: &maxIdleConnsPerHost, - MaxConnsPerHost: &maxConnsPerHost, - IdleConnTimeout: &idleConnTimeout, - CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { return next, nil }, - Compression: "none", - DisableKeepAlives: true, + ReadBufferSize: 1024, + WriteBufferSize: 512, + MaxIdleConns: &maxIdleConns, + MaxIdleConnsPerHost: &maxIdleConnsPerHost, + MaxConnsPerHost: &maxConnsPerHost, + IdleConnTimeout: &idleConnTimeout, + CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { return next, nil }, + Compression: "none", + DisableKeepAlives: true, + HTTP2ReadIdleTimeout: idleConnTimeout, + HTTP2PingTimeout: http2PingTimeout, }, shouldError: false, }, @@ -103,15 +108,38 @@ func TestAllHTTPClientSettings(t *testing.T) { TLSSetting: configtls.TLSClientSetting{ Insecure: false, }, - ReadBufferSize: 1024, - WriteBufferSize: 512, - MaxIdleConns: &maxIdleConns, - MaxIdleConnsPerHost: &maxIdleConnsPerHost, - MaxConnsPerHost: &maxConnsPerHost, - IdleConnTimeout: &idleConnTimeout, - CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { return next, nil }, - Compression: "gzip", - DisableKeepAlives: true, + ReadBufferSize: 1024, + WriteBufferSize: 512, + MaxIdleConns: &maxIdleConns, + MaxIdleConnsPerHost: &maxIdleConnsPerHost, + MaxConnsPerHost: &maxConnsPerHost, + IdleConnTimeout: &idleConnTimeout, + CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { return next, nil }, + Compression: "gzip", + DisableKeepAlives: true, + HTTP2ReadIdleTimeout: idleConnTimeout, + HTTP2PingTimeout: http2PingTimeout, + }, + shouldError: false, + }, + { + name: "all_valid_settings_http2_health_check", + settings: HTTPClientSettings{ + Endpoint: "localhost:1234", + TLSSetting: configtls.TLSClientSetting{ + Insecure: false, + }, + ReadBufferSize: 1024, + WriteBufferSize: 512, + MaxIdleConns: &maxIdleConns, + MaxIdleConnsPerHost: &maxIdleConnsPerHost, + MaxConnsPerHost: &maxConnsPerHost, + IdleConnTimeout: &idleConnTimeout, + CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { return next, nil }, + Compression: "gzip", + DisableKeepAlives: true, + HTTP2ReadIdleTimeout: idleConnTimeout, + HTTP2PingTimeout: http2PingTimeout, }, shouldError: false, }, diff --git a/config/confighttp/go.mod b/config/confighttp/go.mod index 341910e76ae..a9887e39494 100644 --- a/config/confighttp/go.mod +++ b/config/confighttp/go.mod @@ -4,18 +4,18 @@ go 1.20 require ( github.com/golang/snappy v0.0.4 - github.com/klauspost/compress v1.17.3 + github.com/klauspost/compress v1.17.4 github.com/rs/cors v1.10.1 github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configauth v0.90.0 - go.opentelemetry.io/collector/config/configcompression v0.90.0 - go.opentelemetry.io/collector/config/configopaque v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/config/configtls v0.90.0 - go.opentelemetry.io/collector/config/internal v0.90.0 - go.opentelemetry.io/collector/extension/auth v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configauth v0.91.0 + go.opentelemetry.io/collector/config/configcompression v0.91.0 + go.opentelemetry.io/collector/config/configopaque v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/config/configtls v0.91.0 + go.opentelemetry.io/collector/config/internal v0.91.0 + go.opentelemetry.io/collector/extension/auth v0.91.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 go.opentelemetry.io/otel v1.21.0 go.uber.org/zap v1.26.0 @@ -38,8 +38,8 @@ require ( github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect - go.opentelemetry.io/collector/extension v0.90.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect + go.opentelemetry.io/collector/extension v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect diff --git a/config/confighttp/go.sum b/config/confighttp/go.sum index 721181594d7..cd1707d6675 100644 --- a/config/confighttp/go.sum +++ b/config/confighttp/go.sum @@ -23,8 +23,8 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= diff --git a/config/configtls/go.mod b/config/configtls/go.mod index 6113e3f2f04..8bc328c8961 100644 --- a/config/configtls/go.mod +++ b/config/configtls/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/fsnotify/fsnotify v1.7.0 github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/config/configopaque v0.90.0 + go.opentelemetry.io/collector/config/configopaque v0.91.0 ) require ( diff --git a/connector/connector.go b/connector/connector.go index 3a85add44a6..dccf21fa49d 100644 --- a/connector/connector.go +++ b/connector/connector.go @@ -61,7 +61,7 @@ type MetricsRouter interface { // A Logs connector acts as an exporter from a logs pipeline and a receiver // to one or more traces, metrics, or logs pipelines. -// Logs feeds a consumer.Logs, consumer.Metrics, or consumer.Logs with data. +// Logs feeds a consumer.Traces, consumer.Metrics, or consumer.Logs with data. // // Examples: // - Structured logs containing span information could be consumed and emitted as traces. diff --git a/connector/forwardconnector/go.mod b/connector/forwardconnector/go.mod index 47f25f45125..39e070a4773 100644 --- a/connector/forwardconnector/go.mod +++ b/connector/forwardconnector/go.mod @@ -4,9 +4,9 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/connector v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/connector v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 ) @@ -25,9 +25,9 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/collector v0.90.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect + go.opentelemetry.io/collector v0.91.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect diff --git a/connector/go.mod b/connector/go.mod index 27046adaad5..f11308b2495 100644 --- a/connector/go.mod +++ b/connector/go.mod @@ -4,9 +4,9 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 go.uber.org/zap v1.26.0 ) @@ -26,8 +26,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect diff --git a/consumer/go.mod b/consumer/go.mod index 4f4a164a102..7d42f9e326a 100644 --- a/consumer/go.mod +++ b/consumer/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 + go.opentelemetry.io/collector v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 ) diff --git a/docs/release.md b/docs/release.md index c93cbcf939e..574ce122e77 100644 --- a/docs/release.md +++ b/docs/release.md @@ -27,8 +27,9 @@ It is possible that a core approver isn't a contrib approver. In that case, the 1. Update Contrib to use the latest in development version of Core. Run `make update-otel` in Contrib root directory and if it results in any changes submit a draft PR to Contrib. Ensure the CI passes before proceeding. This is to ensure that the latest core does not break contrib in any way. We’ll update it once more to the final release number later. -2. Determine the version number that will be assigned to the release. During the beta phase, we increment the minor version number and set the patch number to 0. In this document, we are using `v0.85.0` as the version to be released, following `v0.84.0`. - Check if stable modules have any changes since the last release by running `make check-changes PREVIOUS_VERSION=v1.0.0-rcv0014 MODSET=stable`. If there are no changes, there is no need to release new version for stable modules. +2. Determine the version number that will be assigned to the release. Usually, we increment the minor version number and set the patch number to 0. In this document, we are using `v0.85.0` as the version to be released, following `v0.84.0`. + Check if stable modules have any changes since the last release by running `make check-changes PREVIOUS_VERSION=v1.0.0 MODSET=stable`. If there are no changes, there is no need to release new version for stable modules. + If there are changes found but .chloggen directory doesn't have any corresponding entries, add missing changelog entries. If the changes are insignificant, consider not releasing a new version for stable modules. 3. Manually run the action [Automation - Prepare Release](https://github.com/open-telemetry/opentelemetry-collector/actions/workflows/prepare-release.yml). This action will create an issue to track the progress of the release and a pull request to update the changelog and version numbers in the repo. **While this PR is open all merging in Core should be haulted**. - When prompted, enter the version numbers determined in Step 2, but do not include a leading `v`. @@ -49,7 +50,7 @@ It is possible that a core approver isn't a contrib approver. In that case, the 7. A new `v0.85.0` release should be automatically created on Github by now. Edit it and use the contents from the CHANGELOG.md and CHANGELOG-API.md as the release's description. 8. Update the draft PR to Contrib created in step 1 to use the newly released Core version and set it to Ready for Review. - - Run `make update-otel OTEL_VERSION=v0.85.0 OTEL_RC_VERSION=v1.0.0-rcv0014` + - Run `make update-otel OTEL_VERSION=v0.85.0 OTEL_STABLE_VERSION=v1.1.0` - Manually update `cmd/otelcontribcol/builder-config.yaml` - Manually update `cmd/oteltestbedcol/builder-config.yaml` - Run `make genotelcontribcol` @@ -156,10 +157,10 @@ Once a module is ready to be released under the `1.x` version scheme, file a PR | Date | Version | Release manager | |------------|---------|-----------------| -| 2023-12-11 | v0.91.0 | @dmitryax | | 2024-01-08 | v0.92.0 | @codeboten | | 2024-01-22 | v0.93.0 | @bogdandrutu | | 2024-02-05 | v0.94.0 | @Aneurysm9 | | 2024-02-19 | v0.95.0 | @mx-psi | | 2024-03-04 | v0.96.0 | @jpkrohling | | 2024-03-18 | v0.97.0 | @djaglowski | +| 2024-04-01 | v0.98.0 | @dmitryax | \ No newline at end of file diff --git a/docs/scraping-receivers.md b/docs/scraping-receivers.md index e9fa31f44a2..91e0bd9f292 100644 --- a/docs/scraping-receivers.md +++ b/docs/scraping-receivers.md @@ -24,7 +24,7 @@ defines stability guarantees and provides guidelines for metric updates. Each built-in scraping metrics receiver has a `metadata.yaml` file that MUST define all the metrics emitted by the receiver. The file is being used to generate an API for metrics recording, user settings to customize the emitted metrics and user documentation. The file schema is defined in -https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/cmd/mdatagen/metric-metadata.yaml. +https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/cmd/mdatagen/metadata-schema.yaml Defining a metric in `metadata.yaml` DOES NOT guarantee that the metric will always be produced by the receiver. In some cases it may be impossible to fetch particular metrics from a system in a particular state. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 24e20bdfe8b..21caec042fc 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -283,7 +283,7 @@ configuration issue. This could be due to a firewall, DNS, or proxy issue. Note that the Collector does have [proxy support](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support). -### Startup failing in Windows Docker containers +### Startup failing in Windows Docker containers (v0.90.1 and earlier) The process may fail to start in a Windows Docker container with the following error: `The service process could not connect to the service controller`. In diff --git a/examples/k8s/otel-config.yaml b/examples/k8s/otel-config.yaml index 0bc658fd302..f9e51a8d477 100644 --- a/examples/k8s/otel-config.yaml +++ b/examples/k8s/otel-config.yaml @@ -68,7 +68,7 @@ spec: - command: - "/otelcol" - "--config=/conf/otel-agent-config.yaml" - image: otel/opentelemetry-collector:0.90.1 + image: otel/opentelemetry-collector:0.91.0 name: otel-agent resources: limits: @@ -187,7 +187,7 @@ spec: - command: - "/otelcol" - "--config=/conf/otel-collector-config.yaml" - image: otel/opentelemetry-collector:0.90.1 + image: otel/opentelemetry-collector:0.91.0 name: otel-collector resources: limits: diff --git a/exporter/debugexporter/go.mod b/exporter/debugexporter/go.mod index 8074fbc0690..1ae440033da 100644 --- a/exporter/debugexporter/go.mod +++ b/exporter/debugexporter/go.mod @@ -4,10 +4,10 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/exporter v0.90.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/exporter v0.91.0 ) require ( @@ -27,12 +27,12 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.90.0 // indirect - go.opentelemetry.io/collector/consumer v0.90.0 // indirect - go.opentelemetry.io/collector/extension v0.90.0 // indirect + go.opentelemetry.io/collector v0.91.0 // indirect + go.opentelemetry.io/collector/consumer v0.91.0 // indirect + go.opentelemetry.io/collector/extension v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect - go.opentelemetry.io/collector/receiver v0.90.0 // indirect + go.opentelemetry.io/collector/receiver v0.91.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect diff --git a/exporter/exporterhelper/internal/bounded_memory_queue.go b/exporter/exporterhelper/internal/bounded_memory_queue.go index b963a73d127..a643f8e6c61 100644 --- a/exporter/exporterhelper/internal/bounded_memory_queue.go +++ b/exporter/exporterhelper/internal/bounded_memory_queue.go @@ -7,7 +7,6 @@ package internal // import "go.opentelemetry.io/collector/exporter/exporterhelpe import ( "context" - "sync/atomic" "go.opentelemetry.io/collector/component" ) @@ -17,25 +16,19 @@ import ( // the producer are dropped. type boundedMemoryQueue[T any] struct { component.StartFunc - stopped *atomic.Bool - items chan queueRequest[T] + items chan queueRequest[T] } // NewBoundedMemoryQueue constructs the new queue of specified capacity, and with an optional // callback for dropped items (e.g. useful to emit metrics). func NewBoundedMemoryQueue[T any](capacity int) Queue[T] { return &boundedMemoryQueue[T]{ - items: make(chan queueRequest[T], capacity), - stopped: &atomic.Bool{}, + items: make(chan queueRequest[T], capacity), } } -// Offer is used by the producer to submit new item to the queue. +// Offer is used by the producer to submit new item to the queue. Calling this method on a stopped queue will panic. func (q *boundedMemoryQueue[T]) Offer(ctx context.Context, req T) error { - if q.stopped.Load() { - return ErrQueueIsStopped - } - select { case q.items <- queueRequest[T]{ctx: ctx, req: req}: return nil @@ -46,8 +39,8 @@ func (q *boundedMemoryQueue[T]) Offer(ctx context.Context, req T) error { // Consume applies the provided function on the head of queue. // The call blocks until there is an item available or the queue is stopped. -// The function returns true when an item is consumed or false if the queue is stopped. -func (q *boundedMemoryQueue[T]) Consume(consumeFunc func(context.Context, T)) bool { +// The function returns true when an item is consumed or false if the queue is stopped and emptied. +func (q *boundedMemoryQueue[T]) Consume(consumeFunc func(context.Context, T) bool) bool { item, ok := <-q.items if !ok { return false @@ -56,9 +49,8 @@ func (q *boundedMemoryQueue[T]) Consume(consumeFunc func(context.Context, T)) bo return true } -// Shutdown stops accepting items, and stops all consumers. It blocks until all consumers have stopped. +// Shutdown closes the queue channel to initiate draining of the queue. func (q *boundedMemoryQueue[T]) Shutdown(context.Context) error { - q.stopped.Store(true) // disable producer close(q.items) return nil } diff --git a/exporter/exporterhelper/internal/bounded_memory_queue_test.go b/exporter/exporterhelper/internal/bounded_memory_queue_test.go index 6526ddf7e88..4c8d08714b8 100644 --- a/exporter/exporterhelper/internal/bounded_memory_queue_test.go +++ b/exporter/exporterhelper/internal/bounded_memory_queue_test.go @@ -30,9 +30,10 @@ func TestBoundedQueue(t *testing.T) { consumerState := newConsumerState(t) - consumers := NewQueueConsumers(q, 1, func(_ context.Context, item string) { + consumers := NewQueueConsumers(q, 1, func(_ context.Context, item string) bool { consumerState.record(item) <-waitCh + return true }) assert.NoError(t, consumers.Start(context.Background(), componenttest.NewNopHost())) @@ -74,7 +75,6 @@ func TestBoundedQueue(t *testing.T) { } assert.NoError(t, consumers.Shutdown(context.Background())) - assert.ErrorIs(t, q.Offer(context.Background(), "x"), ErrQueueIsStopped) } // In this test we run a queue with many items and a slow consumer. @@ -89,9 +89,10 @@ func TestShutdownWhileNotEmpty(t *testing.T) { consumerState := newConsumerState(t) waitChan := make(chan struct{}) - consumers := NewQueueConsumers(q, 5, func(_ context.Context, item string) { + consumers := NewQueueConsumers(q, 5, func(_ context.Context, item string) bool { <-waitChan consumerState.record(item) + return true }) assert.NoError(t, consumers.Start(context.Background(), componenttest.NewNopHost())) @@ -106,17 +107,13 @@ func TestShutdownWhileNotEmpty(t *testing.T) { assert.NoError(t, q.Offer(context.Background(), "i")) assert.NoError(t, q.Offer(context.Background(), "j")) - // we block the workers and wait for the queue to start rejecting new items to release the lock. - // This ensures that we test that the queue has been called to shutdown while items are still in the queue. go func() { - require.EventuallyWithT(t, func(c *assert.CollectT) { - // ensure the request is rejected due to closed queue - assert.ErrorIs(t, q.Offer(context.Background(), "x"), ErrQueueIsStopped) - }, 1*time.Second, 10*time.Millisecond) - close(waitChan) + assert.NoError(t, consumers.Shutdown(context.Background())) }() - assert.NoError(t, consumers.Shutdown(context.Background())) + // wait a bit to ensure shutdown is called and unblock the consumers. + time.Sleep(100 * time.Millisecond) + close(waitChan) consumerState.assertConsumed(map[string]bool{ "a": true, @@ -179,8 +176,9 @@ func queueUsage(b *testing.B, capacity int, numConsumers int, numberOfItems int) b.ReportAllocs() for i := 0; i < b.N; i++ { q := NewBoundedMemoryQueue[string](capacity) - consumers := NewQueueConsumers(q, numConsumers, func(context.Context, string) { + consumers := NewQueueConsumers(q, numConsumers, func(context.Context, string) bool { time.Sleep(1 * time.Millisecond) + return true }) require.NoError(b, consumers.Start(context.Background(), componenttest.NewNopHost())) for j := 0; j < numberOfItems; j++ { @@ -238,7 +236,7 @@ func (s *consumerState) assertConsumed(expected map[string]bool) { func TestZeroSizeWithConsumers(t *testing.T) { q := NewBoundedMemoryQueue[string](0) - consumers := NewQueueConsumers(q, 1, func(context.Context, string) {}) + consumers := NewQueueConsumers(q, 1, func(context.Context, string) bool { return true }) assert.NoError(t, consumers.Start(context.Background(), componenttest.NewNopHost())) assert.NoError(t, q.Offer(context.Background(), "a")) // in process diff --git a/exporter/exporterhelper/internal/consumers.go b/exporter/exporterhelper/internal/consumers.go index 004b1c4e0e4..91f6a244115 100644 --- a/exporter/exporterhelper/internal/consumers.go +++ b/exporter/exporterhelper/internal/consumers.go @@ -13,11 +13,11 @@ import ( type QueueConsumers[T any] struct { queue Queue[T] numConsumers int - consumeFunc func(context.Context, T) + consumeFunc func(context.Context, T) bool stopWG sync.WaitGroup } -func NewQueueConsumers[T any](q Queue[T], numConsumers int, consumeFunc func(context.Context, T)) *QueueConsumers[T] { +func NewQueueConsumers[T any](q Queue[T], numConsumers int, consumeFunc func(context.Context, T) bool) *QueueConsumers[T] { return &QueueConsumers[T]{ queue: q, numConsumers: numConsumers, diff --git a/exporter/exporterhelper/internal/persistent_queue.go b/exporter/exporterhelper/internal/persistent_queue.go index 17bc2fe642e..f58016b5853 100644 --- a/exporter/exporterhelper/internal/persistent_queue.go +++ b/exporter/exporterhelper/internal/persistent_queue.go @@ -141,7 +141,7 @@ func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Contex // Consume applies the provided function on the head of queue. // The call blocks until there is an item available or the queue is stopped. // The function returns true when an item is consumed or false if the queue is stopped. -func (pq *persistentQueue[T]) Consume(consumeFunc func(context.Context, T)) bool { +func (pq *persistentQueue[T]) Consume(consumeFunc func(context.Context, T) bool) bool { var ( req T onProcessingFinished func() @@ -157,8 +157,9 @@ func (pq *persistentQueue[T]) Consume(consumeFunc func(context.Context, T)) bool } if consumed { - consumeFunc(context.Background(), req) - onProcessingFinished() + if ok := consumeFunc(context.Background(), req); ok { + onProcessingFinished() + } return true } } diff --git a/exporter/exporterhelper/internal/persistent_queue_test.go b/exporter/exporterhelper/internal/persistent_queue_test.go index 5fb87e6bc10..75d2a8f7ad7 100644 --- a/exporter/exporterhelper/internal/persistent_queue_test.go +++ b/exporter/exporterhelper/internal/persistent_queue_test.go @@ -39,7 +39,7 @@ func (nh *mockHost) GetExtensions() map[component.ID]component.Component { } // createAndStartTestPersistentQueue creates and starts a fake queue with the given capacity and number of consumers. -func createAndStartTestPersistentQueue(t *testing.T, capacity, numConsumers int, consumeFunc func(_ context.Context, item ptrace.Traces)) Queue[ptrace.Traces] { +func createAndStartTestPersistentQueue(t *testing.T, capacity, numConsumers int, consumeFunc func(_ context.Context, item ptrace.Traces) bool) Queue[ptrace.Traces] { pq := NewPersistentQueue[ptrace.Traces](capacity, component.DataTypeTraces, component.ID{}, marshaler.MarshalTraces, unmarshaler.UnmarshalTraces, exportertest.NewNopCreateSettings()) host := &mockHost{ext: map[component.ID]component.Component{ @@ -73,9 +73,10 @@ func createTestPersistentQueue(client storage.Client) *persistentQueue[ptrace.Tr func TestPersistentQueue_FullCapacity(t *testing.T) { start := make(chan struct{}) done := make(chan struct{}) - pq := createAndStartTestPersistentQueue(t, 5, 1, func(context.Context, ptrace.Traces) { + pq := createAndStartTestPersistentQueue(t, 5, 1, func(context.Context, ptrace.Traces) bool { <-start <-done + return true }) assert.Equal(t, 0, pq.Size()) @@ -99,7 +100,7 @@ func TestPersistentQueue_FullCapacity(t *testing.T) { } func TestPersistentQueue_Shutdown(t *testing.T) { - pq := createAndStartTestPersistentQueue(t, 1001, 100, func(context.Context, ptrace.Traces) {}) + pq := createAndStartTestPersistentQueue(t, 1001, 100, func(context.Context, ptrace.Traces) bool { return true }) req := newTraces(1, 10) for i := 0; i < 1000; i++ { @@ -139,8 +140,9 @@ func TestPersistentQueue_ConsumersProducers(t *testing.T) { req := newTraces(1, 10) numMessagesConsumed := &atomic.Int32{} - pq := createAndStartTestPersistentQueue(t, 1000, c.numConsumers, func(context.Context, ptrace.Traces) { + pq := createAndStartTestPersistentQueue(t, 1000, c.numConsumers, func(context.Context, ptrace.Traces) bool { numMessagesConsumed.Add(int32(1)) + return true }) for i := 0; i < c.numMessagesProduced; i++ { @@ -522,7 +524,7 @@ func BenchmarkPersistentQueue_TraceSpans(b *testing.B) { } for i := 0; i < bb.N; i++ { - require.True(bb, ps.Consume(func(context.Context, ptrace.Traces) {})) + require.True(bb, ps.Consume(func(context.Context, ptrace.Traces) bool { return true })) } require.NoError(b, ext.Shutdown(context.Background())) }) @@ -641,7 +643,7 @@ func TestPersistentQueue_StorageFull(t *testing.T) { // Subsequent items succeed, as deleting the first item frees enough space for the state update reqCount-- for i := reqCount; i > 0; i-- { - require.True(t, ps.Consume(func(context.Context, ptrace.Traces) {})) + require.True(t, ps.Consume(func(context.Context, ptrace.Traces) bool { return true })) } // We should be able to put a new item in diff --git a/exporter/exporterhelper/internal/queue.go b/exporter/exporterhelper/internal/queue.go index 906caa8c830..535db30eff2 100644 --- a/exporter/exporterhelper/internal/queue.go +++ b/exporter/exporterhelper/internal/queue.go @@ -30,7 +30,8 @@ type Queue[T any] interface { // Consume applies the provided function on the head of queue. // The call blocks until there is an item available or the queue is stopped. // The function returns true when an item is consumed or false if the queue is stopped. - Consume(func(ctx context.Context, item T)) bool + // The provided callback function returns true if the item was consumed or false if the consumer is stopped. + Consume(func(ctx context.Context, item T) bool) bool // Size returns the current Size of the queue Size() int // Capacity returns the capacity of the queue. diff --git a/exporter/exporterhelper/obsexporter.go b/exporter/exporterhelper/obsexporter.go index d04a41f7510..094fc50b165 100644 --- a/exporter/exporterhelper/obsexporter.go +++ b/exporter/exporterhelper/obsexporter.go @@ -154,7 +154,7 @@ func (or *ObsReport) StartTracesOp(ctx context.Context) context.Context { // EndTracesOp completes the export operation that was started with StartTracesOp. func (or *ObsReport) EndTracesOp(ctx context.Context, numSpans int, err error) { numSent, numFailedToSend := toNumItems(numSpans, err) - or.recordMetrics(ctx, component.DataTypeTraces, numSent, numFailedToSend) + or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeTraces, numSent, numFailedToSend) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentSpansKey, obsmetrics.FailedToSendSpansKey) } @@ -169,7 +169,7 @@ func (or *ObsReport) StartMetricsOp(ctx context.Context) context.Context { // StartMetricsOp. func (or *ObsReport) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { numSent, numFailedToSend := toNumItems(numMetricPoints, err) - or.recordMetrics(ctx, component.DataTypeMetrics, numSent, numFailedToSend) + or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeMetrics, numSent, numFailedToSend) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentMetricPointsKey, obsmetrics.FailedToSendMetricPointsKey) } @@ -183,7 +183,7 @@ func (or *ObsReport) StartLogsOp(ctx context.Context) context.Context { // EndLogsOp completes the export operation that was started with StartLogsOp. func (or *ObsReport) EndLogsOp(ctx context.Context, numLogRecords int, err error) { numSent, numFailedToSend := toNumItems(numLogRecords, err) - or.recordMetrics(ctx, component.DataTypeLogs, numSent, numFailedToSend) + or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeLogs, numSent, numFailedToSend) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentLogRecordsKey, obsmetrics.FailedToSendLogRecordsKey) } diff --git a/exporter/exporterhelper/obsexporter_test.go b/exporter/exporterhelper/obsexporter_test.go index 0bab9828360..769406741d8 100644 --- a/exporter/exporterhelper/obsexporter_test.go +++ b/exporter/exporterhelper/obsexporter_test.go @@ -180,6 +180,11 @@ type testParams struct { func testTelemetry(t *testing.T, id component.ID, testFunc func(t *testing.T, tt obsreporttest.TestTelemetry, useOtel bool)) { t.Run("WithOC", func(t *testing.T) { + originalValue := obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled() + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), false)) + defer func() { + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), originalValue)) + }() tt, err := obsreporttest.SetupTelemetry(id) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) @@ -188,11 +193,6 @@ func testTelemetry(t *testing.T, id component.ID, testFunc func(t *testing.T, tt }) t.Run("WithOTel", func(t *testing.T) { - originalValue := obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled() - require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), true)) - defer func() { - require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), originalValue)) - }() tt, err := obsreporttest.SetupTelemetry(id) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) diff --git a/exporter/exporterhelper/obsreport_test.go b/exporter/exporterhelper/obsreport_test.go index 8424c7f784e..b4c2e54d73e 100644 --- a/exporter/exporterhelper/obsreport_test.go +++ b/exporter/exporterhelper/obsreport_test.go @@ -11,6 +11,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/internal/obsreportconfig" "go.opentelemetry.io/collector/obsreport/obsreporttest" ) @@ -26,6 +28,12 @@ func TestExportEnqueueFailure(t *testing.T) { }) require.NoError(t, err) + originalValue := obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled() + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), false)) + defer func() { + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), originalValue)) + }() + logRecords := int64(7) obsrep.recordEnqueueFailureWithOC(context.Background(), component.DataTypeLogs, logRecords) require.NoError(t, tt.CheckExporterEnqueueFailedLogs(logRecords)) diff --git a/exporter/exporterhelper/queue_sender.go b/exporter/exporterhelper/queue_sender.go index c8e554fce56..865ff730253 100644 --- a/exporter/exporterhelper/queue_sender.go +++ b/exporter/exporterhelper/queue_sender.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "sync/atomic" "time" "go.opencensus.io/metric/metricdata" @@ -81,6 +82,7 @@ type queueSender struct { meter otelmetric.Meter consumers *internal.QueueConsumers[Request] requeuingEnabled bool + stopped *atomic.Bool metricCapacity otelmetric.Int64ObservableGauge metricSize otelmetric.Int64ObservableGauge @@ -105,18 +107,24 @@ func newQueueSender(config QueueSettings, set exporter.CreateSettings, signal co meter: set.TelemetrySettings.MeterProvider.Meter(scopeName), // TODO: this can be further exposed as a config param rather than relying on a type of queue requeuingEnabled: isPersistent, + stopped: &atomic.Bool{}, } qs.consumers = internal.NewQueueConsumers(queue, config.NumConsumers, qs.consume) return qs } // consume is the function that is executed by the queue consumers to send the data to the next consumerSender. -func (qs *queueSender) consume(ctx context.Context, req Request) { +func (qs *queueSender) consume(ctx context.Context, req Request) bool { err := qs.nextSender.send(ctx, req) // Nothing to do if the error is nil or permanent. Permanent errors are already logged by retrySender. if err == nil || consumererror.IsPermanent(err) { - return + return true + } + + // Do not requeue if the queue sender is stopped. + if qs.stopped.Load() { + return false } if !qs.requeuingEnabled { @@ -125,7 +133,7 @@ func (qs *queueSender) consume(ctx context.Context, req Request) { zap.Error(err), zap.Int("dropped_items", req.ItemsCount()), ) - return + return true } if qs.queue.Offer(ctx, extractPartialRequest(req, err)) == nil { @@ -140,6 +148,7 @@ func (qs *queueSender) consume(ctx context.Context, req Request) { zap.Int("dropped_items", req.ItemsCount()), ) } + return true } // Start is invoked during service startup. @@ -203,6 +212,8 @@ func (qs *queueSender) recordWithOC() error { // Shutdown is invoked during service shutdown. func (qs *queueSender) Shutdown(ctx context.Context) error { + qs.stopped.Store(true) + // Cleanup queue metrics reporting _ = globalInstruments.queueSize.UpsertEntry(func() int64 { return int64(0) diff --git a/exporter/exporterhelper/queue_sender_test.go b/exporter/exporterhelper/queue_sender_test.go index 75a30226e25..3fb8e47af33 100644 --- a/exporter/exporterhelper/queue_sender_test.go +++ b/exporter/exporterhelper/queue_sender_test.go @@ -144,6 +144,8 @@ func setFeatureGateForTest(t testing.TB, gate *featuregate.Gate, enabled bool) f } func TestQueuedRetry_QueueMetricsReported(t *testing.T) { + resetFlag := setFeatureGateForTest(t, obsreportconfig.UseOtelForInternalMetricsfeatureGate, false) + defer resetFlag() tt, err := obsreporttest.SetupTelemetry(defaultID) require.NoError(t, err) @@ -166,9 +168,6 @@ func TestQueuedRetry_QueueMetricsReported(t *testing.T) { } func TestQueuedRetry_QueueMetricsReportedUsingOTel(t *testing.T) { - resetFlag := setFeatureGateForTest(t, obsreportconfig.UseOtelForInternalMetricsfeatureGate, true) - defer resetFlag() - tt, err := obsreporttest.SetupTelemetry(defaultID) require.NoError(t, err) @@ -424,11 +423,18 @@ func TestQueuedRetryPersistentEnabled_shutdown_dataIsRequeued(t *testing.T) { return be.queueSender.(*queueSender).queue.Size() == 0 }, time.Second, 1*time.Millisecond) - // shuts down and ensure the item is produced in the queue again + // shuts down the exporter, unsent data should be preserved as in-flight data in the persistent queue. require.NoError(t, be.Shutdown(context.Background())) - assert.Eventually(t, func() bool { - return be.queueSender.(*queueSender).queue.Size() == 1 - }, time.Second, 1*time.Millisecond) + + // start the exporter again replacing the preserved mockRequest in the unmarshaler with a new one that doesn't fail. + replacedReq := newMockRequest(1, nil) + be, err = newBaseExporter(defaultSettings, "", false, mockRequestMarshaler, mockRequestUnmarshaler(replacedReq), + newNoopObsrepSender, WithRetry(rCfg), WithQueue(qCfg)) + require.NoError(t, err) + require.NoError(t, be.Start(context.Background(), host)) + + // wait for the item to be consumed from the queue + replacedReq.checkNumRequests(t, 1) } func TestQueueSenderNoStartShutdown(t *testing.T) { diff --git a/exporter/exporterhelper/retry_sender_test.go b/exporter/exporterhelper/retry_sender_test.go index 89a7ab577ce..9ecbb71906b 100644 --- a/exporter/exporterhelper/retry_sender_test.go +++ b/exporter/exporterhelper/retry_sender_test.go @@ -32,7 +32,7 @@ func mockRequestUnmarshaler(mr Request) RequestUnmarshaler { } func mockRequestMarshaler(_ Request) ([]byte, error) { - return nil, nil + return []byte("mockRequest"), nil } func TestQueuedRetry_DropOnPermanentError(t *testing.T) { diff --git a/exporter/go.mod b/exporter/go.mod index fb830253e2b..3ca2071e731 100644 --- a/exporter/go.mod +++ b/exporter/go.mod @@ -6,14 +6,14 @@ require ( github.com/cenkalti/backoff/v4 v4.2.1 github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 - go.opentelemetry.io/collector/extension v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 + go.opentelemetry.io/collector/extension v0.91.0 go.opentelemetry.io/collector/featuregate v1.0.0 go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/receiver v0.90.0 + go.opentelemetry.io/collector/receiver v0.91.0 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 @@ -53,7 +53,7 @@ require ( github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2 // indirect go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect golang.org/x/net v0.18.0 // indirect diff --git a/exporter/loggingexporter/go.mod b/exporter/loggingexporter/go.mod index 0326312a5e3..8149aa882c5 100644 --- a/exporter/loggingexporter/go.mod +++ b/exporter/loggingexporter/go.mod @@ -5,10 +5,10 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/exporter v0.90.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/exporter v0.91.0 go.uber.org/zap v1.26.0 ) @@ -29,12 +29,12 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.90.0 // indirect - go.opentelemetry.io/collector/consumer v0.90.0 // indirect - go.opentelemetry.io/collector/extension v0.90.0 // indirect + go.opentelemetry.io/collector v0.91.0 // indirect + go.opentelemetry.io/collector/consumer v0.91.0 // indirect + go.opentelemetry.io/collector/extension v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect - go.opentelemetry.io/collector/receiver v0.90.0 // indirect + go.opentelemetry.io/collector/receiver v0.91.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect diff --git a/exporter/otlpexporter/go.mod b/exporter/otlpexporter/go.mod index 68a566bd3fe..3b5d4f1408b 100644 --- a/exporter/otlpexporter/go.mod +++ b/exporter/otlpexporter/go.mod @@ -4,18 +4,18 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configauth v0.90.0 - go.opentelemetry.io/collector/config/configcompression v0.90.0 - go.opentelemetry.io/collector/config/configgrpc v0.90.0 - go.opentelemetry.io/collector/config/configopaque v0.90.0 - go.opentelemetry.io/collector/config/configtls v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 - go.opentelemetry.io/collector/exporter v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configauth v0.91.0 + go.opentelemetry.io/collector/config/configcompression v0.91.0 + go.opentelemetry.io/collector/config/configgrpc v0.91.0 + go.opentelemetry.io/collector/config/configopaque v0.91.0 + go.opentelemetry.io/collector/config/configtls v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 + go.opentelemetry.io/collector/exporter v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.90.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.91.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 @@ -34,7 +34,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.3 // indirect + github.com/klauspost/compress v1.17.4 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.0.1 // indirect @@ -47,14 +47,14 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.10.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.90.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.90.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect - go.opentelemetry.io/collector/config/internal v0.90.0 // indirect - go.opentelemetry.io/collector/extension v0.90.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.90.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.91.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.91.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect + go.opentelemetry.io/collector/config/internal v0.91.0 // indirect + go.opentelemetry.io/collector/extension v0.91.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect - go.opentelemetry.io/collector/receiver v0.90.0 // indirect + go.opentelemetry.io/collector/receiver v0.91.0 // indirect go.opentelemetry.io/contrib/config v0.1.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect diff --git a/exporter/otlpexporter/go.sum b/exporter/otlpexporter/go.sum index 004f05c9295..967095032b5 100644 --- a/exporter/otlpexporter/go.sum +++ b/exporter/otlpexporter/go.sum @@ -68,8 +68,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= diff --git a/exporter/otlphttpexporter/go.mod b/exporter/otlphttpexporter/go.mod index 8908029f0d3..62fca98fa4c 100644 --- a/exporter/otlphttpexporter/go.mod +++ b/exporter/otlphttpexporter/go.mod @@ -4,18 +4,18 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configcompression v0.90.0 - go.opentelemetry.io/collector/config/confighttp v0.90.0 - go.opentelemetry.io/collector/config/configopaque v0.90.0 - go.opentelemetry.io/collector/config/configtls v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 - go.opentelemetry.io/collector/exporter v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configcompression v0.91.0 + go.opentelemetry.io/collector/config/confighttp v0.91.0 + go.opentelemetry.io/collector/config/configopaque v0.91.0 + go.opentelemetry.io/collector/config/configtls v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 + go.opentelemetry.io/collector/exporter v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/receiver v0.90.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.90.0 + go.opentelemetry.io/collector/receiver v0.91.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.91.0 go.uber.org/zap v1.26.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 google.golang.org/grpc v1.59.0 @@ -35,7 +35,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.3 // indirect + github.com/klauspost/compress v1.17.4 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.0.1 // indirect @@ -48,13 +48,13 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.10.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.90.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.90.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.90.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect - go.opentelemetry.io/collector/config/internal v0.90.0 // indirect - go.opentelemetry.io/collector/extension v0.90.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.90.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.91.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.91.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.91.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect + go.opentelemetry.io/collector/config/internal v0.91.0 // indirect + go.opentelemetry.io/collector/extension v0.91.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/contrib/config v0.1.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect diff --git a/exporter/otlphttpexporter/go.sum b/exporter/otlphttpexporter/go.sum index 004f05c9295..967095032b5 100644 --- a/exporter/otlphttpexporter/go.sum +++ b/exporter/otlphttpexporter/go.sum @@ -68,8 +68,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= diff --git a/extension/auth/go.mod b/extension/auth/go.mod index af743158a04..47ef6bbd0b9 100644 --- a/extension/auth/go.mod +++ b/extension/auth/go.mod @@ -4,8 +4,8 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/extension v0.90.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/extension v0.91.0 google.golang.org/grpc v1.59.0 ) @@ -21,8 +21,8 @@ require ( github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect diff --git a/extension/ballastextension/go.mod b/extension/ballastextension/go.mod index 12b31d4778d..69283446091 100644 --- a/extension/ballastextension/go.mod +++ b/extension/ballastextension/go.mod @@ -4,10 +4,10 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/extension v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/extension v0.91.0 go.uber.org/zap v1.26.0 ) @@ -26,11 +26,11 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/shirou/gopsutil/v3 v3.23.10 // indirect + github.com/shirou/gopsutil/v3 v3.23.11 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect diff --git a/extension/ballastextension/go.sum b/extension/ballastextension/go.sum index ca39b8f14b6..c846c8bcb83 100644 --- a/extension/ballastextension/go.sum +++ b/extension/ballastextension/go.sum @@ -40,8 +40,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/shirou/gopsutil/v3 v3.23.10 h1:/N42opWlYzegYaVkWejXWJpbzKv2JDy3mrgGzKsh9hM= -github.com/shirou/gopsutil/v3 v3.23.10/go.mod h1:JIE26kpucQi+innVlAUnIEOSBhBUkirr5b44yr55+WE= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -91,7 +91,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/extension/go.mod b/extension/go.mod index 1cc7e26225d..c8efd6df6ad 100644 --- a/extension/go.mod +++ b/extension/go.mod @@ -4,8 +4,8 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 ) require ( @@ -20,7 +20,7 @@ require ( github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect diff --git a/extension/zpagesextension/go.mod b/extension/zpagesextension/go.mod index b8068c922f4..a1aa571d93d 100644 --- a/extension/zpagesextension/go.mod +++ b/extension/zpagesextension/go.mod @@ -4,11 +4,11 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/confignet v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/extension v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/confignet v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/extension v0.91.0 go.opentelemetry.io/contrib/zpages v0.46.1 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 @@ -29,7 +29,7 @@ require ( github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect go.opentelemetry.io/contrib/config v0.1.1 // indirect diff --git a/go.mod b/go.mod index 526b2f74ec5..4d5ed16051a 100644 --- a/go.mod +++ b/go.mod @@ -7,18 +7,18 @@ require ( github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 - github.com/shirou/gopsutil/v3 v3.23.10 + github.com/shirou/gopsutil/v3 v3.23.11 github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/connector v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 - go.opentelemetry.io/collector/exporter v0.90.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/connector v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 + go.opentelemetry.io/collector/exporter v0.91.0 go.opentelemetry.io/collector/featuregate v1.0.0 go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/processor v0.90.0 - go.opentelemetry.io/collector/receiver v0.90.0 + go.opentelemetry.io/collector/processor v0.91.0 + go.opentelemetry.io/collector/receiver v0.91.0 go.opentelemetry.io/contrib/config v0.1.1 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2 @@ -59,8 +59,8 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect - go.opentelemetry.io/collector/extension v0.90.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect + go.opentelemetry.io/collector/extension v0.91.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect go.uber.org/goleak v1.2.1 // indirect diff --git a/go.sum b/go.sum index 629c196c0e3..949bf77509d 100644 --- a/go.sum +++ b/go.sum @@ -244,8 +244,8 @@ github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/shirou/gopsutil/v3 v3.23.10 h1:/N42opWlYzegYaVkWejXWJpbzKv2JDy3mrgGzKsh9hM= -github.com/shirou/gopsutil/v3 v3.23.10/go.mod h1:JIE26kpucQi+innVlAUnIEOSBhBUkirr5b44yr55+WE= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -437,7 +437,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/internal/fanoutconsumer/logs.go b/internal/fanoutconsumer/logs.go index 2d1e0336dc0..5047fbbf70a 100644 --- a/internal/fanoutconsumer/logs.go +++ b/internal/fanoutconsumer/logs.go @@ -44,7 +44,8 @@ type logsConsumer struct { } func (lsc *logsConsumer) Capabilities() consumer.Capabilities { - return consumer.Capabilities{MutatesData: false} + // If all consumers are mutating, then the original data will be passed to one of them. + return consumer.Capabilities{MutatesData: len(lsc.mutable) > 0 && len(lsc.readonly) == 0} } // ConsumeLogs exports the plog.Logs to all consumers wrapped by the current one. diff --git a/internal/fanoutconsumer/logs_test.go b/internal/fanoutconsumer/logs_test.go index c2cd5c42bd9..472239b3456 100644 --- a/internal/fanoutconsumer/logs_test.go +++ b/internal/fanoutconsumer/logs_test.go @@ -26,6 +26,12 @@ func TestLogsNotMultiplexing(t *testing.T) { assert.Same(t, nop, lfc) } +func TestLogsNotMultiplexingMutating(t *testing.T) { + p := &mutatingLogsSink{LogsSink: new(consumertest.LogsSink)} + lfc := NewLogs([]consumer.Logs{p}) + assert.True(t, lfc.Capabilities().MutatesData) +} + func TestLogsMultiplexingNonMutating(t *testing.T) { p1 := new(consumertest.LogsSink) p2 := new(consumertest.LogsSink) @@ -68,7 +74,7 @@ func TestLogsMultiplexingMutating(t *testing.T) { p3 := &mutatingLogsSink{LogsSink: new(consumertest.LogsSink)} lfc := NewLogs([]consumer.Logs{p1, p2, p3}) - assert.False(t, lfc.Capabilities().MutatesData) + assert.True(t, lfc.Capabilities().MutatesData) ld := testdata.GenerateLogs(1) for i := 0; i < 2; i++ { @@ -105,7 +111,7 @@ func TestReadOnlyLogsMultiplexingMutating(t *testing.T) { p3 := &mutatingLogsSink{LogsSink: new(consumertest.LogsSink)} lfc := NewLogs([]consumer.Logs{p1, p2, p3}) - assert.False(t, lfc.Capabilities().MutatesData) + assert.True(t, lfc.Capabilities().MutatesData) ldOrig := testdata.GenerateLogs(1) ld := testdata.GenerateLogs(1) ld.MarkReadOnly() diff --git a/internal/fanoutconsumer/metrics.go b/internal/fanoutconsumer/metrics.go index ddc3761240b..023db2a70e3 100644 --- a/internal/fanoutconsumer/metrics.go +++ b/internal/fanoutconsumer/metrics.go @@ -42,7 +42,8 @@ type metricsConsumer struct { } func (msc *metricsConsumer) Capabilities() consumer.Capabilities { - return consumer.Capabilities{MutatesData: false} + // If all consumers are mutating, then the original data will be passed to one of them. + return consumer.Capabilities{MutatesData: len(msc.mutable) > 0 && len(msc.readonly) == 0} } // ConsumeMetrics exports the pmetric.Metrics to all consumers wrapped by the current one. diff --git a/internal/fanoutconsumer/metrics_test.go b/internal/fanoutconsumer/metrics_test.go index a58a7480c92..cf6455d0d07 100644 --- a/internal/fanoutconsumer/metrics_test.go +++ b/internal/fanoutconsumer/metrics_test.go @@ -26,6 +26,12 @@ func TestMetricsNotMultiplexing(t *testing.T) { assert.Same(t, nop, mfc) } +func TestMetricssNotMultiplexingMutating(t *testing.T) { + p := &mutatingMetricsSink{MetricsSink: new(consumertest.MetricsSink)} + lfc := NewMetrics([]consumer.Metrics{p}) + assert.True(t, lfc.Capabilities().MutatesData) +} + func TestMetricsMultiplexingNonMutating(t *testing.T) { p1 := new(consumertest.MetricsSink) p2 := new(consumertest.MetricsSink) @@ -68,7 +74,7 @@ func TestMetricsMultiplexingMutating(t *testing.T) { p3 := &mutatingMetricsSink{MetricsSink: new(consumertest.MetricsSink)} mfc := NewMetrics([]consumer.Metrics{p1, p2, p3}) - assert.False(t, mfc.Capabilities().MutatesData) + assert.True(t, mfc.Capabilities().MutatesData) md := testdata.GenerateMetrics(1) for i := 0; i < 2; i++ { @@ -105,7 +111,7 @@ func TestReadOnlyMetricsMultiplexingMixFirstMutating(t *testing.T) { p3 := &mutatingMetricsSink{MetricsSink: new(consumertest.MetricsSink)} mfc := NewMetrics([]consumer.Metrics{p1, p2, p3}) - assert.False(t, mfc.Capabilities().MutatesData) + assert.True(t, mfc.Capabilities().MutatesData) mdOrig := testdata.GenerateMetrics(1) md := testdata.GenerateMetrics(1) md.MarkReadOnly() diff --git a/internal/fanoutconsumer/traces.go b/internal/fanoutconsumer/traces.go index c8d0871d0a2..e13068a656f 100644 --- a/internal/fanoutconsumer/traces.go +++ b/internal/fanoutconsumer/traces.go @@ -42,7 +42,8 @@ type tracesConsumer struct { } func (tsc *tracesConsumer) Capabilities() consumer.Capabilities { - return consumer.Capabilities{MutatesData: false} + // If all consumers are mutating, then the original data will be passed to one of them. + return consumer.Capabilities{MutatesData: len(tsc.mutable) > 0 && len(tsc.readonly) == 0} } // ConsumeTraces exports the ptrace.Traces to all consumers wrapped by the current one. diff --git a/internal/fanoutconsumer/traces_test.go b/internal/fanoutconsumer/traces_test.go index 872f4986ef2..966fdcf627b 100644 --- a/internal/fanoutconsumer/traces_test.go +++ b/internal/fanoutconsumer/traces_test.go @@ -26,6 +26,12 @@ func TestTracesNotMultiplexing(t *testing.T) { assert.Same(t, nop, tfc) } +func TestTracesNotMultiplexingMutating(t *testing.T) { + p := &mutatingTracesSink{TracesSink: new(consumertest.TracesSink)} + lfc := NewTraces([]consumer.Traces{p}) + assert.True(t, lfc.Capabilities().MutatesData) +} + func TestTracesMultiplexingNonMutating(t *testing.T) { p1 := new(consumertest.TracesSink) p2 := new(consumertest.TracesSink) @@ -68,7 +74,7 @@ func TestTracesMultiplexingMutating(t *testing.T) { p3 := &mutatingTracesSink{TracesSink: new(consumertest.TracesSink)} tfc := NewTraces([]consumer.Traces{p1, p2, p3}) - assert.False(t, tfc.Capabilities().MutatesData) + assert.True(t, tfc.Capabilities().MutatesData) td := testdata.GenerateTraces(1) for i := 0; i < 2; i++ { @@ -105,7 +111,7 @@ func TestReadOnlyTracesMultiplexingMutating(t *testing.T) { p3 := &mutatingTracesSink{TracesSink: new(consumertest.TracesSink)} tfc := NewTraces([]consumer.Traces{p1, p2, p3}) - assert.False(t, tfc.Capabilities().MutatesData) + assert.True(t, tfc.Capabilities().MutatesData) tdOrig := testdata.GenerateTraces(1) td := testdata.GenerateTraces(1) diff --git a/internal/obsreportconfig/obsreportconfig.go b/internal/obsreportconfig/obsreportconfig.go index 7b970d81bf9..28ec8510ddb 100644 --- a/internal/obsreportconfig/obsreportconfig.go +++ b/internal/obsreportconfig/obsreportconfig.go @@ -17,7 +17,7 @@ import ( // telemetrySettings for internal metrics. var UseOtelForInternalMetricsfeatureGate = featuregate.GlobalRegistry().MustRegister( "telemetry.useOtelForInternalMetrics", - featuregate.StageAlpha, + featuregate.StageBeta, featuregate.WithRegisterDescription("controls whether the collector uses OpenTelemetry for internal metrics")) // DisableHighCardinalityMetricsfeatureGate is the feature gate that controls whether the collector should enable diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 1783fe5cf1d..2cbcd712d22 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -8,7 +8,6 @@ require ( github.com/golangci/golangci-lint v1.55.2 github.com/google/addlicense v1.1.1 github.com/jcchavezs/porto v0.6.0 - github.com/mikefarah/yq/v4 v4.40.3 github.com/pavius/impi v0.0.3 go.opentelemetry.io/build-tools/checkfile v0.12.0 go.opentelemetry.io/build-tools/chloggen v0.12.0 @@ -37,8 +36,8 @@ require ( github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/acomagu/bufpipe v1.0.4 // indirect + github.com/alecthomas/assert/v2 v2.3.0 // indirect github.com/alecthomas/go-check-sumtype v0.1.3 // indirect - github.com/alecthomas/participle/v2 v2.1.0 // indirect github.com/alexkohler/nakedret/v2 v2.0.2 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect @@ -64,8 +63,6 @@ require ( github.com/daixiang0/gci v0.11.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect - github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/elliotchance/orderedmap v1.5.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect @@ -88,8 +85,6 @@ require ( github.com/go-toolsmith/typep v1.1.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.2 // indirect - github.com/goccy/go-yaml v1.11.2 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect @@ -116,7 +111,6 @@ require ( github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jgautheron/goconst v1.6.0 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect - github.com/jinzhu/copier v0.4.0 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/julz/importas v0.1.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect @@ -202,7 +196,6 @@ require ( github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect github.com/ykadowak/zerologlint v0.1.3 // indirect - github.com/yuin/gopher-lua v1.1.0 // indirect gitlab.com/bosi/decorder v0.4.1 // indirect go-simpler.org/sloglint v0.1.2 // indirect go.opentelemetry.io/build-tools v0.12.0 // indirect @@ -216,10 +209,8 @@ require ( golang.org/x/sync v0.5.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/internal/tools/go.sum b/internal/tools/go.sum index db441af5172..cdd312c3f55 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -74,11 +74,10 @@ github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGt github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= +github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/go-check-sumtype v0.1.3 h1:M+tqMxB68hcgccRXBMVCPI4UJ+QUfdSx0xdbypKCqA8= github.com/alecthomas/go-check-sumtype v0.1.3/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= -github.com/alecthomas/participle/v2 v2.1.0 h1:z7dElHRrOEEq45F2TG5cbQihMtNTv8vwldytDj7Wrz4= -github.com/alecthomas/participle/v2 v2.1.0/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= -github.com/alecthomas/repr v0.3.0 h1:NeYzUPfjjlqHY4KtzgKJiWd6sVq2eNUPTi34PiFGjY8= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -152,11 +151,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= -github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= -github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= -github.com/elliotchance/orderedmap v1.5.1 h1:G1X4PYlljzimbdQ3RXmtIZiQ9d6aRQ3sH1nzjq5mECE= -github.com/elliotchance/orderedmap v1.5.1/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -202,9 +197,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= @@ -228,10 +220,6 @@ github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80 github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-yaml v1.11.2 h1:joq77SxuyIs9zzxEjgyLBugMQ9NEgTWxXfz2wVqwAaQ= -github.com/goccy/go-yaml v1.11.2/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -362,8 +350,6 @@ github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1AD github.com/jgautheron/goconst v1.6.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= -github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -404,7 +390,6 @@ github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUc github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= @@ -436,8 +421,6 @@ github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwg github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mgechev/revive v1.3.4 h1:k/tO3XTaWY4DEHal9tWBkkUMJYO/dLDVyMmAQxmIMDc= github.com/mgechev/revive v1.3.4/go.mod h1:W+pZCMu9qj8Uhfs1iJMQsEFLRozUfvwFwqVvRbSNLVw= -github.com/mikefarah/yq/v4 v4.40.3 h1:pSJua3IGua3O+HFXp5LkMheqEOHMRQAkkCcgVhqi4VY= -github.com/mikefarah/yq/v4 v4.40.3/go.mod h1:TSqdLNAFlwmIGQBQYIzeOX+wDWkCTWfRGWDrxQwGCaQ= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= @@ -475,7 +458,6 @@ github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6 github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -632,8 +614,6 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= -github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E= @@ -960,8 +940,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1062,8 +1040,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE= -gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internal/tools/jsonschema_patch.sed b/internal/tools/jsonschema_patch.sed deleted file mode 100644 index 04090d912d0..00000000000 --- a/internal/tools/jsonschema_patch.sed +++ /dev/null @@ -1,4 +0,0 @@ -# go-jsonschema always generates patternProperties as -# map[string]interface{}, for more specific types, they must -# be replaced here -s+type Headers.*+type Headers map[string]string+g \ No newline at end of file diff --git a/internal/tools/semconvkit/main.go b/internal/tools/semconvkit/main.go new file mode 100644 index 00000000000..2dc73c37c05 --- /dev/null +++ b/internal/tools/semconvkit/main.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "embed" + "flag" + "log" + "os" + "path/filepath" + "strings" + "text/template" +) + +var ( + out = flag.String("output", "./", "output directory") + tag = flag.String("tag", "", "OpenTelemetry tagged version") + + //go:embed templates/*.tmpl + rootFS embed.FS +) + +// SemanticConventions are information about the semantic conventions being +// generated. +type SemanticConventions struct { + // TagVer is the tagged version (i.e. v1.7.0 and not 1.7.0). + TagVer string +} + +func (sc SemanticConventions) SemVer() string { + return strings.TrimPrefix(*tag, "v") +} + +// render renders all templates to the dest directory using the data. +func render(src, dest string, data *SemanticConventions) error { + tmpls, err := template.ParseFS(rootFS, src) + if err != nil { + return err + } + for _, tmpl := range tmpls.Templates() { + target := filepath.Join(dest, strings.TrimSuffix(tmpl.Name(), ".tmpl")) + // nolint: gosec + wr, err := os.Create(target) + if err != nil { + return err + } + + err = tmpl.Execute(wr, data) + if err != nil { + return err + } + } + + return nil +} + +func main() { + flag.Parse() + + if *tag == "" { + log.Fatalf("invalid tag: %q", *tag) + } + + sc := &SemanticConventions{TagVer: *tag} + + if err := render("templates/*.tmpl", *out, sc); err != nil { + log.Fatal(err) + } +} diff --git a/internal/tools/semconvkit/templates/doc.go.tmpl b/internal/tools/semconvkit/templates/doc.go.tmpl new file mode 100644 index 00000000000..0085e604fae --- /dev/null +++ b/internal/tools/semconvkit/templates/doc.go.tmpl @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the {{.TagVer}} +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/{{.TagVer}}" diff --git a/internal/tools/semconvkit/templates/schema.go.tmpl b/internal/tools/semconvkit/templates/schema.go.tmpl new file mode 100644 index 00000000000..41006fa206b --- /dev/null +++ b/internal/tools/semconvkit/templates/schema.go.tmpl @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/{{.TagVer}}" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/{{.SemVer}}" diff --git a/internal/tools/tools.go b/internal/tools/tools.go index 07b2eaf7c54..821dfa74f7a 100644 --- a/internal/tools/tools.go +++ b/internal/tools/tools.go @@ -17,7 +17,6 @@ import ( _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/google/addlicense" _ "github.com/jcchavezs/porto/cmd/porto" - _ "github.com/mikefarah/yq/v4" _ "github.com/pavius/impi/cmd/impi" _ "go.opentelemetry.io/build-tools/checkfile" _ "go.opentelemetry.io/build-tools/chloggen" @@ -27,4 +26,6 @@ import ( _ "golang.org/x/exp/cmd/apidiff" _ "golang.org/x/tools/cmd/goimports" _ "golang.org/x/vuln/cmd/govulncheck" + + _ "go.opentelemetry.io/collector/internal/tools/semconvkit" ) diff --git a/otelcol/go.mod b/otelcol/go.mod index ae937521476..8adb62d4a7a 100644 --- a/otelcol/go.mod +++ b/otelcol/go.mod @@ -5,16 +5,16 @@ go 1.20 require ( github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/connector v0.90.0 - go.opentelemetry.io/collector/exporter v0.90.0 - go.opentelemetry.io/collector/extension v0.90.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/connector v0.91.0 + go.opentelemetry.io/collector/exporter v0.91.0 + go.opentelemetry.io/collector/extension v0.91.0 go.opentelemetry.io/collector/featuregate v1.0.0 - go.opentelemetry.io/collector/processor v0.90.0 - go.opentelemetry.io/collector/receiver v0.90.0 - go.opentelemetry.io/collector/service v0.90.0 + go.opentelemetry.io/collector/processor v0.91.0 + go.opentelemetry.io/collector/receiver v0.91.0 + go.opentelemetry.io/collector/service v0.91.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 golang.org/x/sys v0.15.0 @@ -58,17 +58,17 @@ require ( github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect - github.com/shirou/gopsutil/v3 v3.23.10 // indirect + github.com/shirou/gopsutil/v3 v3.23.11 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.90.0 // indirect - go.opentelemetry.io/collector/consumer v0.90.0 // indirect + go.opentelemetry.io/collector v0.91.0 // indirect + go.opentelemetry.io/collector/consumer v0.91.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect - go.opentelemetry.io/collector/semconv v0.90.0 // indirect + go.opentelemetry.io/collector/semconv v0.91.0 // indirect go.opentelemetry.io/contrib/config v0.1.1 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.21.1 // indirect go.opentelemetry.io/otel v1.21.0 // indirect diff --git a/otelcol/go.sum b/otelcol/go.sum index 4b37695d47e..72fde5f571c 100644 --- a/otelcol/go.sum +++ b/otelcol/go.sum @@ -253,8 +253,8 @@ github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v3 v3.23.10 h1:/N42opWlYzegYaVkWejXWJpbzKv2JDy3mrgGzKsh9hM= -github.com/shirou/gopsutil/v3 v3.23.10/go.mod h1:JIE26kpucQi+innVlAUnIEOSBhBUkirr5b44yr55+WE= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -473,7 +473,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/pdata/pcommon/value.go b/pdata/pcommon/value.go index 2e0443dd3b2..77a84e51758 100644 --- a/pdata/pcommon/value.go +++ b/pdata/pcommon/value.go @@ -129,6 +129,8 @@ func (v Value) getState() *internal.State { return internal.GetValueState(internal.Value(v)) } +// FromRaw sets the value from the given raw value. +// Calling this function on zero-initialized Value will cause a panic. func (v Value) FromRaw(iv any) error { switch tv := iv.(type) { case nil: @@ -198,37 +200,31 @@ func (v Value) Type() ValueType { // Str returns the string value associated with this Value. // The shorter name is used instead of String to avoid implementing fmt.Stringer interface. // If the Type() is not ValueTypeStr then returns empty string. -// Calling this function on zero-initialized Value will cause a panic. func (v Value) Str() string { return v.getOrig().GetStringValue() } // Int returns the int64 value associated with this Value. // If the Type() is not ValueTypeInt then returns int64(0). -// Calling this function on zero-initialized Value will cause a panic. func (v Value) Int() int64 { return v.getOrig().GetIntValue() } // Double returns the float64 value associated with this Value. // If the Type() is not ValueTypeDouble then returns float64(0). -// Calling this function on zero-initialized Value will cause a panic. func (v Value) Double() float64 { return v.getOrig().GetDoubleValue() } // Bool returns the bool value associated with this Value. // If the Type() is not ValueTypeBool then returns false. -// Calling this function on zero-initialized Value will cause a panic. func (v Value) Bool() bool { return v.getOrig().GetBoolValue() } // Map returns the map value associated with this Value. -// If the Type() is not ValueTypeMap then returns an invalid map. Note that using -// such map can cause panic. -// -// Calling this function on zero-initialized Value will cause a panic. +// If the function is called on zero-initialized Value or if the Type() is not ValueTypeMap +// then it returns an invalid map. Note that using such map can cause panic. func (v Value) Map() Map { kvlist := v.getOrig().GetKvlistValue() if kvlist == nil { @@ -238,10 +234,8 @@ func (v Value) Map() Map { } // Slice returns the slice value associated with this Value. -// If the Type() is not ValueTypeSlice then returns an invalid slice. Note that using -// such slice can cause panic. -// -// Calling this function on zero-initialized Value will cause a panic. +// If the function is called on zero-initialized Value or if the Type() is not ValueTypeSlice +// then returns an invalid slice. Note that using such slice can cause panic. func (v Value) Slice() Slice { arr := v.getOrig().GetArrayValue() if arr == nil { @@ -251,10 +245,8 @@ func (v Value) Slice() Slice { } // Bytes returns the ByteSlice value associated with this Value. -// If the Type() is not ValueTypeBytes then returns an invalid ByteSlice object. Note that using -// such slice can cause panic. -// -// Calling this function on zero-initialized Value will cause a panic. +// If the function is called on zero-initialized Value or if the Type() is not ValueTypeBytes +// then returns an invalid ByteSlice object. Note that using such slice can cause panic. func (v Value) Bytes() ByteSlice { bv, ok := v.getOrig().GetValue().(*otlpcommon.AnyValue_BytesValue) if !ok { @@ -325,6 +317,7 @@ func (v Value) SetEmptySlice() Slice { } // CopyTo copies the Value instance overriding the destination. +// Calling this function on zero-initialized Value will cause a panic. func (v Value) CopyTo(dest Value) { dest.getState().AssertMutable() destOrig := dest.getOrig() @@ -370,6 +363,7 @@ func (v Value) CopyTo(dest Value) { // AsString converts an OTLP Value object of any type to its equivalent string // representation. This differs from Str which only returns a non-empty value // if the ValueType is ValueTypeStr. +// Calling this function on zero-initialized Value will cause a panic. func (v Value) AsString() string { switch v.Type() { case ValueTypeEmpty: diff --git a/pdata/pcommon/value_test.go b/pdata/pcommon/value_test.go index 9d2e5f439ee..920bd0b6e96 100644 --- a/pdata/pcommon/value_test.go +++ b/pdata/pcommon/value_test.go @@ -556,6 +556,29 @@ func TestNewValueFromRawInvalid(t *testing.T) { assert.EqualError(t, actual.FromRaw(ValueTypeDouble), "") } +func TestInvalidValue(t *testing.T) { + v := Value{} + assert.Equal(t, false, v.Bool()) + assert.Equal(t, int64(0), v.Int()) + assert.Equal(t, float64(0), v.Double()) + assert.Equal(t, "", v.Str()) + assert.Equal(t, ByteSlice{}, v.Bytes()) + assert.Equal(t, Map{}, v.Map()) + assert.Equal(t, Slice{}, v.Slice()) + assert.Panics(t, func() { v.AsString() }) + assert.Panics(t, func() { v.AsRaw() }) + assert.Panics(t, func() { _ = v.FromRaw(1) }) + assert.Panics(t, func() { v.Type() }) + assert.Panics(t, func() { v.SetStr("") }) + assert.Panics(t, func() { v.SetInt(0) }) + assert.Panics(t, func() { v.SetDouble(0) }) + assert.Panics(t, func() { v.SetBool(false) }) + assert.Panics(t, func() { v.SetEmptyBytes() }) + assert.Panics(t, func() { v.SetEmptyMap() }) + assert.Panics(t, func() { v.SetEmptySlice() }) + assert.Panics(t, func() { v.CopyTo(NewValueEmpty()) }) +} + func generateTestValueMap() Value { ret := NewValueMap() attrMap := ret.Map() diff --git a/processor/batchprocessor/go.mod b/processor/batchprocessor/go.mod index cdd94538af6..f4dd3f16cc9 100644 --- a/processor/batchprocessor/go.mod +++ b/processor/batchprocessor/go.mod @@ -9,13 +9,13 @@ require ( github.com/prometheus/common v0.45.0 github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/processor v0.90.0 + go.opentelemetry.io/collector/processor v0.91.0 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2 go.opentelemetry.io/otel/metric v1.21.0 diff --git a/processor/go.mod b/processor/go.mod index 34e2df1c366..5fbce367b1c 100644 --- a/processor/go.mod +++ b/processor/go.mod @@ -5,10 +5,10 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 go.opentelemetry.io/collector/featuregate v1.0.0 go.opentelemetry.io/collector/pdata v1.0.0 go.opentelemetry.io/otel v1.21.0 @@ -47,7 +47,7 @@ require ( github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2 // indirect go.opentelemetry.io/otel/sdk v1.21.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect diff --git a/processor/memorylimiterprocessor/config.go b/processor/memorylimiterprocessor/config.go index e16ebeb67ee..3f526fb4f4f 100644 --- a/processor/memorylimiterprocessor/config.go +++ b/processor/memorylimiterprocessor/config.go @@ -39,5 +39,20 @@ var _ component.Config = (*Config)(nil) // Validate checks if the processor configuration is valid func (cfg *Config) Validate() error { + if cfg.CheckInterval <= 0 { + return errCheckIntervalOutOfRange + } + if cfg.MemoryLimitMiB == 0 && cfg.MemoryLimitPercentage == 0 { + return errLimitOutOfRange + } + if cfg.MemoryLimitPercentage > 100 || cfg.MemorySpikePercentage > 100 { + return errPercentageLimitOutOfRange + } + if cfg.MemoryLimitMiB > 0 && cfg.MemoryLimitMiB <= cfg.MemorySpikeLimitMiB { + return errMemSpikeLimitOutOfRange + } + if cfg.MemoryLimitPercentage > 0 && cfg.MemoryLimitPercentage <= cfg.MemorySpikePercentage { + return errMemSpikePercentageLimitOutOfRange + } return nil } diff --git a/processor/memorylimiterprocessor/config_test.go b/processor/memorylimiterprocessor/config_test.go index 076d037dba0..37f9f7a51c5 100644 --- a/processor/memorylimiterprocessor/config_test.go +++ b/processor/memorylimiterprocessor/config_test.go @@ -36,3 +36,71 @@ func TestUnmarshalConfig(t *testing.T) { MemorySpikeLimitMiB: 500, }, cfg) } + +func TestConfigValidate(t *testing.T) { + tests := []struct { + name string + cfg *Config + err error + }{ + { + name: "valid", + cfg: func() *Config { + cfg := createDefaultConfig().(*Config) + cfg.MemoryLimitMiB = 5722 + cfg.MemorySpikeLimitMiB = 1907 + cfg.CheckInterval = 100 * time.Millisecond + return cfg + }(), + err: nil, + }, + { + name: "zero check interval", + cfg: &Config{ + CheckInterval: 0, + }, + err: errCheckIntervalOutOfRange, + }, + { + name: "unset memory limit", + cfg: &Config{ + CheckInterval: 1 * time.Second, + MemoryLimitMiB: 0, + MemoryLimitPercentage: 0, + }, + err: errLimitOutOfRange, + }, + { + name: "invalid memory spike limit", + cfg: &Config{ + CheckInterval: 1 * time.Second, + MemoryLimitMiB: 10, + MemorySpikeLimitMiB: 10, + }, + err: errMemSpikeLimitOutOfRange, + }, + { + name: "invalid memory percentage limit", + cfg: &Config{ + CheckInterval: 1 * time.Second, + MemoryLimitPercentage: 101, + }, + err: errPercentageLimitOutOfRange, + }, + { + name: "invalid memory spike percentage limit", + cfg: &Config{ + CheckInterval: 1 * time.Second, + MemoryLimitPercentage: 50, + MemorySpikePercentage: 60, + }, + err: errMemSpikePercentageLimitOutOfRange, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.cfg.Validate() + assert.Equal(t, tt.err, err) + }) + } +} diff --git a/processor/memorylimiterprocessor/factory_test.go b/processor/memorylimiterprocessor/factory_test.go index e7a274dba06..29816264b64 100644 --- a/processor/memorylimiterprocessor/factory_test.go +++ b/processor/memorylimiterprocessor/factory_test.go @@ -31,38 +31,25 @@ func TestCreateProcessor(t *testing.T) { cfg := factory.CreateDefaultConfig() - // This processor can't be created with the default config. - tp, err := factory.CreateTracesProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) - assert.Nil(t, tp) - assert.Error(t, err, "created processor with invalid settings") - - mp, err := factory.CreateMetricsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) - assert.Nil(t, mp) - assert.Error(t, err, "created processor with invalid settings") - - lp, err := factory.CreateLogsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) - assert.Nil(t, lp) - assert.Error(t, err, "created processor with invalid settings") - // Create processor with a valid config. pCfg := cfg.(*Config) pCfg.MemoryLimitMiB = 5722 pCfg.MemorySpikeLimitMiB = 1907 pCfg.CheckInterval = 100 * time.Millisecond - tp, err = factory.CreateTracesProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + tp, err := factory.CreateTracesProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, tp) // test if we can shutdown a monitoring routine that has not started assert.ErrorIs(t, tp.Shutdown(context.Background()), errShutdownNotStarted) assert.NoError(t, tp.Start(context.Background(), componenttest.NewNopHost())) - mp, err = factory.CreateMetricsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + mp, err := factory.CreateMetricsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, mp) assert.NoError(t, mp.Start(context.Background(), componenttest.NewNopHost())) - lp, err = factory.CreateLogsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + lp, err := factory.CreateLogsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, lp) assert.NoError(t, lp.Start(context.Background(), componenttest.NewNopHost())) diff --git a/processor/memorylimiterprocessor/go.mod b/processor/memorylimiterprocessor/go.mod index 2e534afade8..62b7249a502 100644 --- a/processor/memorylimiterprocessor/go.mod +++ b/processor/memorylimiterprocessor/go.mod @@ -4,13 +4,13 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/processor v0.90.0 + go.opentelemetry.io/collector/processor v0.91.0 go.uber.org/zap v1.26.0 ) @@ -32,7 +32,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/shirou/gopsutil/v3 v3.23.10 // indirect + github.com/shirou/gopsutil/v3 v3.23.11 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect diff --git a/processor/memorylimiterprocessor/go.sum b/processor/memorylimiterprocessor/go.sum index de6b998fc84..5d02440cfbf 100644 --- a/processor/memorylimiterprocessor/go.sum +++ b/processor/memorylimiterprocessor/go.sum @@ -89,8 +89,8 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/shirou/gopsutil/v3 v3.23.10 h1:/N42opWlYzegYaVkWejXWJpbzKv2JDy3mrgGzKsh9hM= -github.com/shirou/gopsutil/v3 v3.23.10/go.mod h1:JIE26kpucQi+innVlAUnIEOSBhBUkirr5b44yr55+WE= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -160,7 +160,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/processor/memorylimiterprocessor/memorylimiter.go b/processor/memorylimiterprocessor/memorylimiter.go index 0520e13339a..6cf4a3e67f6 100644 --- a/processor/memorylimiterprocessor/memorylimiter.go +++ b/processor/memorylimiterprocessor/memorylimiter.go @@ -34,18 +34,16 @@ var ( // Construction errors - errCheckIntervalOutOfRange = errors.New( - "checkInterval must be greater than zero") + errCheckIntervalOutOfRange = errors.New("check_interval must be greater than zero") - errLimitOutOfRange = errors.New( - "memAllocLimit or memoryLimitPercentage must be greater than zero") + errLimitOutOfRange = errors.New("limit_mib or limit_percentage must be greater than zero") - errMemSpikeLimitOutOfRange = errors.New( - "memSpikeLimit must be smaller than memAllocLimit") + errMemSpikeLimitOutOfRange = errors.New("spike_limit_mib must be smaller than limit_mib") + + errMemSpikePercentageLimitOutOfRange = errors.New("spike_limit_percentage must be smaller than limit_percentage") errPercentageLimitOutOfRange = errors.New( - "memoryLimitPercentage and memorySpikePercentage must be greater than zero and less than or equal to hundred", - ) + "limit_percentage and spike_limit_percentage must be greater than zero and less than or equal to hundred") errShutdownNotStarted = errors.New("no existing monitoring routine is running") ) @@ -86,13 +84,6 @@ const minGCIntervalWhenSoftLimited = 10 * time.Second // newMemoryLimiter returns a new memorylimiter processor. func newMemoryLimiter(set processor.CreateSettings, cfg *Config) (*memoryLimiter, error) { - if cfg.CheckInterval <= 0 { - return nil, errCheckIntervalOutOfRange - } - if cfg.MemoryLimitMiB == 0 && cfg.MemoryLimitPercentage == 0 { - return nil, errLimitOutOfRange - } - logger := set.Logger usageChecker, err := getMemUsageChecker(cfg, logger) if err != nil { @@ -129,7 +120,7 @@ func getMemUsageChecker(cfg *Config, logger *zap.Logger) (*memUsageChecker, erro memAllocLimit := uint64(cfg.MemoryLimitMiB) * mibBytes memSpikeLimit := uint64(cfg.MemorySpikeLimitMiB) * mibBytes if cfg.MemoryLimitMiB != 0 { - return newFixedMemUsageChecker(memAllocLimit, memSpikeLimit) + return newFixedMemUsageChecker(memAllocLimit, memSpikeLimit), nil } totalMemory, err := getMemoryFn() if err != nil { @@ -139,7 +130,8 @@ func getMemUsageChecker(cfg *Config, logger *zap.Logger) (*memUsageChecker, erro zap.Uint64("total_memory_mib", totalMemory/mibBytes), zap.Uint32("limit_percentage", cfg.MemoryLimitPercentage), zap.Uint32("spike_limit_percentage", cfg.MemorySpikePercentage)) - return newPercentageMemUsageChecker(totalMemory, uint64(cfg.MemoryLimitPercentage), uint64(cfg.MemorySpikePercentage)) + return newPercentageMemUsageChecker(totalMemory, uint64(cfg.MemoryLimitPercentage), + uint64(cfg.MemorySpikePercentage)), nil } func (ml *memoryLimiter) start(_ context.Context, host component.Host) error { @@ -319,10 +311,7 @@ func (d memUsageChecker) aboveHardLimit(ms *runtime.MemStats) bool { return ms.Alloc >= d.memAllocLimit } -func newFixedMemUsageChecker(memAllocLimit, memSpikeLimit uint64) (*memUsageChecker, error) { - if memSpikeLimit >= memAllocLimit { - return nil, errMemSpikeLimitOutOfRange - } +func newFixedMemUsageChecker(memAllocLimit, memSpikeLimit uint64) *memUsageChecker { if memSpikeLimit == 0 { // If spike limit is unspecified use 20% of mem limit. memSpikeLimit = memAllocLimit / 5 @@ -330,12 +319,9 @@ func newFixedMemUsageChecker(memAllocLimit, memSpikeLimit uint64) (*memUsageChec return &memUsageChecker{ memAllocLimit: memAllocLimit, memSpikeLimit: memSpikeLimit, - }, nil + } } -func newPercentageMemUsageChecker(totalMemory uint64, percentageLimit, percentageSpike uint64) (*memUsageChecker, error) { - if percentageLimit > 100 || percentageLimit <= 0 || percentageSpike > 100 || percentageSpike <= 0 { - return nil, errPercentageLimitOutOfRange - } +func newPercentageMemUsageChecker(totalMemory uint64, percentageLimit, percentageSpike uint64) *memUsageChecker { return newFixedMemUsageChecker(percentageLimit*totalMemory/100, percentageSpike*totalMemory/100) } diff --git a/processor/memorylimiterprocessor/memorylimiter_test.go b/processor/memorylimiterprocessor/memorylimiter_test.go index c13e1b90b8b..c1afdc17bcf 100644 --- a/processor/memorylimiterprocessor/memorylimiter_test.go +++ b/processor/memorylimiterprocessor/memorylimiter_test.go @@ -17,7 +17,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/internal/iruntime" "go.opentelemetry.io/collector/pdata/plog" @@ -29,71 +28,6 @@ import ( "go.opentelemetry.io/collector/processor/processortest" ) -func TestNew(t *testing.T) { - type args struct { - nextConsumer consumer.Traces - checkInterval time.Duration - memoryLimitMiB uint32 - memorySpikeLimitMiB uint32 - } - sink := new(consumertest.TracesSink) - tests := []struct { - name string - args args - wantErr error - }{ - { - name: "zero_checkInterval", - args: args{ - nextConsumer: sink, - }, - wantErr: errCheckIntervalOutOfRange, - }, - { - name: "zero_memAllocLimit", - args: args{ - nextConsumer: sink, - checkInterval: 100 * time.Millisecond, - }, - wantErr: errLimitOutOfRange, - }, - { - name: "memSpikeLimit_gt_memAllocLimit", - args: args{ - nextConsumer: sink, - checkInterval: 100 * time.Millisecond, - memoryLimitMiB: 1, - memorySpikeLimitMiB: 2, - }, - wantErr: errMemSpikeLimitOutOfRange, - }, - { - name: "success", - args: args{ - nextConsumer: sink, - checkInterval: 100 * time.Millisecond, - memoryLimitMiB: 1024, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := createDefaultConfig().(*Config) - cfg.CheckInterval = tt.args.checkInterval - cfg.MemoryLimitMiB = tt.args.memoryLimitMiB - cfg.MemorySpikeLimitMiB = tt.args.memorySpikeLimitMiB - got, err := newMemoryLimiter(processortest.NewNopCreateSettings(), cfg) - if tt.wantErr != nil { - assert.ErrorIs(t, err, tt.wantErr) - return - } - assert.NoError(t, err) - assert.NoError(t, got.start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, got.shutdown(context.Background())) - }) - } -} - // TestMetricsMemoryPressureResponse manipulates results from querying memory and // check expected side effects. func TestMetricsMemoryPressureResponse(t *testing.T) { @@ -309,11 +243,6 @@ func TestGetDecision(t *testing.T) { memSpikeLimit: 20 * mibBytes, }, d) }) - t.Run("fixed_limit_error", func(t *testing.T) { - d, err := getMemUsageChecker(&Config{MemoryLimitMiB: 20, MemorySpikeLimitMiB: 100}, zap.NewNop()) - require.Error(t, err) - assert.Nil(t, d) - }) t.Cleanup(func() { getMemoryFn = iruntime.TotalMemory @@ -329,26 +258,12 @@ func TestGetDecision(t *testing.T) { memSpikeLimit: 10 * mibBytes, }, d) }) - t.Run("percentage_limit_error", func(t *testing.T) { - d, err := getMemUsageChecker(&Config{MemoryLimitPercentage: 101, MemorySpikePercentage: 10}, zap.NewNop()) - require.Error(t, err) - assert.Nil(t, d) - d, err = getMemUsageChecker(&Config{MemoryLimitPercentage: 99, MemorySpikePercentage: 101}, zap.NewNop()) - require.Error(t, err) - assert.Nil(t, d) - }) } func TestRefuseDecision(t *testing.T) { - decison1000Limit30Spike30, err := newPercentageMemUsageChecker(1000, 60, 30) - require.NoError(t, err) - decison1000Limit60Spike50, err := newPercentageMemUsageChecker(1000, 60, 50) - require.NoError(t, err) - decison1000Limit40Spike20, err := newPercentageMemUsageChecker(1000, 40, 20) - require.NoError(t, err) - decison1000Limit40Spike60, err := newPercentageMemUsageChecker(1000, 40, 60) - require.Error(t, err) - assert.Nil(t, decison1000Limit40Spike60) + decison1000Limit30Spike30 := newPercentageMemUsageChecker(1000, 60, 30) + decison1000Limit60Spike50 := newPercentageMemUsageChecker(1000, 60, 50) + decison1000Limit40Spike20 := newPercentageMemUsageChecker(1000, 40, 20) tests := []struct { name string diff --git a/processor/processorhelper/obsreport_test.go b/processor/processorhelper/obsreport_test.go index d251bc9052b..3ba5418d050 100644 --- a/processor/processorhelper/obsreport_test.go +++ b/processor/processorhelper/obsreport_test.go @@ -101,6 +101,11 @@ func TestProcessorLogRecords(t *testing.T) { func testTelemetry(t *testing.T, id component.ID, testFunc func(t *testing.T, tt obsreporttest.TestTelemetry, useOtel bool)) { t.Run("WithOC", func(t *testing.T) { + originalValue := obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled() + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), false)) + defer func() { + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), originalValue)) + }() tt, err := obsreporttest.SetupTelemetry(id) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) @@ -109,11 +114,6 @@ func testTelemetry(t *testing.T, id component.ID, testFunc func(t *testing.T, tt }) t.Run("WithOTel", func(t *testing.T) { - originalValue := obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled() - require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), true)) - defer func() { - require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), originalValue)) - }() tt, err := obsreporttest.SetupTelemetry(id) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) diff --git a/receiver/go.mod b/receiver/go.mod index 7f59aa433af..7a93d809989 100644 --- a/receiver/go.mod +++ b/receiver/go.mod @@ -5,10 +5,10 @@ go 1.20 require ( github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 go.opentelemetry.io/collector/featuregate v1.0.0 go.opentelemetry.io/collector/pdata v1.0.0 go.opentelemetry.io/otel v1.21.0 @@ -48,7 +48,7 @@ require ( github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect - go.opentelemetry.io/collector/confmap v0.90.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2 // indirect go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect golang.org/x/net v0.18.0 // indirect diff --git a/receiver/otlpreceiver/go.mod b/receiver/otlpreceiver/go.mod index 98c3bb8462c..007ade23b2c 100644 --- a/receiver/otlpreceiver/go.mod +++ b/receiver/otlpreceiver/go.mod @@ -4,20 +4,20 @@ go 1.20 require ( github.com/gogo/protobuf v1.3.2 - github.com/klauspost/compress v1.17.3 + github.com/klauspost/compress v1.17.4 github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/configgrpc v0.90.0 - go.opentelemetry.io/collector/config/confighttp v0.90.0 - go.opentelemetry.io/collector/config/confignet v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/config/configtls v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configgrpc v0.91.0 + go.opentelemetry.io/collector/config/confighttp v0.91.0 + go.opentelemetry.io/collector/config/confignet v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/config/configtls v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/receiver v0.90.0 - go.opentelemetry.io/collector/semconv v0.90.0 + go.opentelemetry.io/collector/receiver v0.91.0 + go.opentelemetry.io/collector/semconv v0.91.0 go.uber.org/zap v1.26.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 google.golang.org/grpc v1.59.0 @@ -59,12 +59,12 @@ require ( github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/rs/cors v1.10.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.90.0 // indirect - go.opentelemetry.io/collector/config/configcompression v0.90.0 // indirect - go.opentelemetry.io/collector/config/configopaque v0.90.0 // indirect - go.opentelemetry.io/collector/config/internal v0.90.0 // indirect - go.opentelemetry.io/collector/extension v0.90.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.90.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.91.0 // indirect + go.opentelemetry.io/collector/config/configcompression v0.91.0 // indirect + go.opentelemetry.io/collector/config/configopaque v0.91.0 // indirect + go.opentelemetry.io/collector/config/internal v0.91.0 // indirect + go.opentelemetry.io/collector/extension v0.91.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.91.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/contrib/config v0.1.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect diff --git a/receiver/otlpreceiver/go.sum b/receiver/otlpreceiver/go.sum index cf51932ee87..89b106af39c 100644 --- a/receiver/otlpreceiver/go.sum +++ b/receiver/otlpreceiver/go.sum @@ -174,8 +174,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= diff --git a/receiver/receiverhelper/obsreport_test.go b/receiver/receiverhelper/obsreport_test.go index a99912f4ceb..02a8788161c 100644 --- a/receiver/receiverhelper/obsreport_test.go +++ b/receiver/receiverhelper/obsreport_test.go @@ -237,6 +237,11 @@ func TestReceiveWithLongLivedCtx(t *testing.T) { func testTelemetry(t *testing.T, id component.ID, testFunc func(t *testing.T, tt obsreporttest.TestTelemetry, useOtel bool)) { t.Run("WithOC", func(t *testing.T) { + originalValue := obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled() + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), false)) + defer func() { + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), originalValue)) + }() tt, err := obsreporttest.SetupTelemetry(id) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) @@ -245,11 +250,6 @@ func testTelemetry(t *testing.T, id component.ID, testFunc func(t *testing.T, tt }) t.Run("WithOTel", func(t *testing.T) { - originalValue := obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled() - require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), true)) - defer func() { - require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), originalValue)) - }() tt, err := obsreporttest.SetupTelemetry(id) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) diff --git a/receiver/scraperhelper/obsreport_test.go b/receiver/scraperhelper/obsreport_test.go index 4adbf981654..f109a05364c 100644 --- a/receiver/scraperhelper/obsreport_test.go +++ b/receiver/scraperhelper/obsreport_test.go @@ -94,6 +94,11 @@ func TestScrapeMetricsDataOp(t *testing.T) { func testTelemetry(t *testing.T, id component.ID, testFunc func(t *testing.T, tt obsreporttest.TestTelemetry, useOtel bool)) { t.Run("WithOC", func(t *testing.T) { + originalValue := obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled() + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), false)) + defer func() { + require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), originalValue)) + }() tt, err := obsreporttest.SetupTelemetry(id) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) @@ -102,11 +107,6 @@ func testTelemetry(t *testing.T, id component.ID, testFunc func(t *testing.T, tt }) t.Run("WithOTel", func(t *testing.T) { - originalValue := obsreportconfig.UseOtelForInternalMetricsfeatureGate.IsEnabled() - require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), true)) - defer func() { - require.NoError(t, featuregate.GlobalRegistry().Set(obsreportconfig.UseOtelForInternalMetricsfeatureGate.ID(), originalValue)) - }() tt, err := obsreporttest.SetupTelemetry(id) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) diff --git a/semconv/README.md b/semconv/README.md index dcbcc71c797..a453bbafd27 100644 --- a/semconv/README.md +++ b/semconv/README.md @@ -6,17 +6,13 @@ from definitions in the specification. ## Generation To generate the constants you can use the `gensemconv` make target. You must provide the path to the root of a clone of -the `opentelemetry-specification` repository in the `SPECPATH` variable and the version of the conventions to generate +the `semantic-conventions` repository in the `SPECPATH` variable and the version of the conventions to generate in the `SPECTAG` variable. ```console -$ make gensemconv SPECPATH=~/dev/opentelemetry-specification SPECTAG=v1.5.0 -Generating semantic convention constants from specification version v1.5.0 at ~/dev/opentelemetry-specification -semconvgen -o semconv/v1.5.0 -t semconv/template.j2 -s v1.5.0 -i ~/dev/opentelemetry-specification/semantic_conventions/resource -p conventionType=resource -semconvgen -o semconv/v1.5.0 -t semconv/template.j2 -s v1.5.0 -i ~/dev/opentelemetry-specification/semantic_conventions/trace -p conventionType=trace +$ make gensemconv SPECPATH=/tmp/semantic-conventions SPECTAG=v1.22.0 +Generating semantic convention constants from specification version v1.22.0 at /tmp/semantic-conventions +.tools/semconvgen -o semconv/v1.22.0 -t semconv/template.j2 -s v1.22.0 -i /tmp/semantic-conventions/model/. --only=resource -p conventionType=resource -f generated_resource.go +.tools/semconvgen -o semconv/v1.22.0 -t semconv/template.j2 -s v1.22.0 -i /tmp/semantic-conventions/model/. --only=event -p conventionType=event -f generated_event.go +.tools/semconvgen -o semconv/v1.22.0 -t semconv/template.j2 -s v1.22.0 -i /tmp/semantic-conventions/model/. --only=span -p conventionType=trace -f generated_trace.go ``` - -When generating the constants for a new version ot the specification it is important to note that only -`generated_trace.go` and `generated_resource.go` are generated automatically. The `schema.go` and `nonstandard.go` -files should be copied from a prior version's package and updated as appropriate. Most important will be to update -the `SchemaURL` constant in `schema.go`. diff --git a/semconv/template.j2 b/semconv/template.j2 index 4d614187849..a91e8a554c4 100644 --- a/semconv/template.j2 +++ b/semconv/template.j2 @@ -36,6 +36,9 @@ Examples: {{ attr.examples | pprint | trim("[]") }} Note: {{ attr.note | render_markdown(paragraph="{0}", code="{0}", link="{1}", emphasis="{0}", strong="{0}") }} {%- endif %} {%- endmacro -%} +{%- macro sentence_case(text) -%} + {{ text[0]|upper}}{{text[1:] }} +{%- endmacro -%} // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -45,7 +48,7 @@ package semconv {% for semconv in semconvs -%} {%- if semconvs[semconv].attributes | rejectattr("ref") | selectattr("is_local") | sort(attribute=fqn) | length > 0 -%} -// {{ semconvs[semconv].brief }} +// {{ sentence_case(semconvs[semconv].brief | replace("This document defines ", "")) | wordwrap(76, break_long_words=false, break_on_hyphens=false, wrapstring="\n// ") }} const ( {% for attr in semconvs[semconv].attributes if attr.is_local and not attr.ref -%} // {{ godoc(attr) | wordwrap | indent(3) | replace(" ", "\t// ") | replace("// //", "//") }} diff --git a/semconv/v1.21.0/doc.go b/semconv/v1.21.0/doc.go new file mode 100644 index 00000000000..a189e79efbc --- /dev/null +++ b/semconv/v1.21.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.21.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/v1.21.0" diff --git a/semconv/v1.21.0/generated_event.go b/semconv/v1.21.0/generated_event.go new file mode 100644 index 00000000000..ec6c733ecd4 --- /dev/null +++ b/semconv/v1.21.0/generated_event.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // The unique identifier of the feature flag. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'logo-color' + AttributeFeatureFlagKey = "feature_flag.key" + // The name of the service provider that performs the flag evaluation. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'Flag Manager' + AttributeFeatureFlagProviderName = "feature_flag.provider_name" + // SHOULD be a semantic identifier for a value. If one is unavailable, a + // stringified version of the value can be used. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant red maybe be used for the value #c05543.A stringified + // version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + AttributeFeatureFlagVariant = "feature_flag.variant" +) + +// RPC received/sent message. +const ( + // Compressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageCompressedSize = "message.compressed_size" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeMessageID = "message.id" + // Whether this is a received or sent message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessageType = "message.type" + // Uncompressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageUncompressedSize = "message.uncompressed_size" +) + +const ( + // sent + AttributeMessageTypeSent = "SENT" + // received + AttributeMessageTypeReceived = "RECEIVED" +) + +// The attributes used to report a single exception associated with a span. +const ( + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example above.It follows that an exception may still escape the + // scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" +) + +func GetEventSemanticConventionAttributeNames() []string { + return []string{ + AttributeFeatureFlagKey, + AttributeFeatureFlagProviderName, + AttributeFeatureFlagVariant, + AttributeMessageCompressedSize, + AttributeMessageID, + AttributeMessageType, + AttributeMessageUncompressedSize, + AttributeExceptionEscaped, + } +} diff --git a/semconv/v1.21.0/generated_resource.go b/semconv/v1.21.0/generated_resource.go new file mode 100644 index 00000000000..b1a66142afe --- /dev/null +++ b/semconv/v1.21.0/generated_resource.go @@ -0,0 +1,1342 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeCloudProvider = "cloud.provider" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud provider-specific native identifier of the monitored cloud resource (e.g. + // an ARN on AWS, a fully qualified resource ID on Azure, a full resource name on + // GCP) + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', '//run.googl + // eapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', '/sub + // scriptions//resourceGroups//providers/Microsoft.Web/sites + // //functions/' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so it may be necessary to set cloud.resource_id as a span attribute instead.The + // exact value to use for cloud.resource_id depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute and + // they apply:
    + //
  • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invokable with + // multiple different aliases.
  • + //
  • GCP: The URI of the resource
  • + //
  • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
  • + //
+ AttributeCloudResourceID = "cloud.resource_id" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Bare Metal Solution (BMS) + AttributeCloudPlatformGCPBareMetalSolution = "gcp_bare_metal_solution" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGCPOpenshift = "gcp_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Heroku Platform as a Service + AttributeCloudProviderHeroku = "heroku" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of an ECS task definition. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The task definition family this task definition is a member of. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for this task definition. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" +) + +// Resource used by Google Cloud Run. +const ( + // The name of the Cloud Run execution being run for the Job, as set by the + // CLOUD_RUN_EXECUTION environment variable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + AttributeGCPCloudRunJobExecution = "gcp.cloud_run.job.execution" + // The index for a task within an execution as provided by the + // CLOUD_RUN_TASK_INDEX environment variable. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1 + AttributeGCPCloudRunJobTaskIndex = "gcp.cloud_run.job.task_index" +) + +// Resources used by Google Compute Engine (GCE). +const ( + // The hostname of a GCE instance. This is the full value of the default or custom + // hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', 'sample-vm.us-west1-b.c.my- + // project.internal' + AttributeGCPGceInstanceHostname = "gcp.gce.instance.hostname" + // The instance name of a GCE instance. This is the value provided by host.name, + // the visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the default internal DNS + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + AttributeGCPGceInstanceName = "gcp.gce.instance.name" +) + +// Heroku dyno metadata +const ( + // Unique identifier for the application + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + AttributeHerokuAppID = "heroku.app.id" + // Commit hash for the current release + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + AttributeHerokuReleaseCommit = "heroku.release.commit" + // Time and date the release was created + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + AttributeHerokuReleaseCreationTimestamp = "heroku.release.creation_timestamp" +) + +// A container instance. +const ( + // The command used to run the container (i.e. the command name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + AttributeContainerCommand = "container.command" + // All the command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + AttributeContainerCommandArgs = "container.command_args" + // The full command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + AttributeContainerCommandLine = "container.command_line" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; container.image.id corresponds + // to the Image field from the Docker container inspect API endpoint. + // K8S defines a link to the container registry repository with digest "imageID": + // "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e + // 8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625". + // OCI defines a digest of manifest. + AttributeContainerImageID = "container.image.id" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Container image tag. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeContainerImageTag = "container.image.tag" + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" +) + +// The software deployment. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'staging', 'production' + AttributeDeploymentEnvironment = "deployment.environment" +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// A serverless instance. +const ( + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
    + //
  • AWS Lambda: Use the (full) log stream name.
  • + //
+ AttributeFaaSInstance = "faas.instance" + // The amount of memory available to the serverless function converted to Bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information (which must be multiplied by 1,048,576). + AttributeFaaSMaxMemory = "faas.max_memory" + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
    + //
  • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the cloud.resource_id attribute).
  • + //
+ AttributeFaaSName = "faas.name" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
    + //
  • AWS Lambda: The function version + // (an integer represented as a decimal string).
  • + //
  • Google Cloud Run (Services): The revision + // (i.e., the function name plus the revision suffix).
  • + //
  • Google Cloud Functions: The value of the + // K_REVISION environment variable.
  • + //
  • Azure Functions: Not applicable. Do not set this attribute.
  • + //
+ AttributeFaaSVersion = "faas.version" +) + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeHostArch = "host.arch" + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized systems, this should be the machine-id. See the + // table below for the sources to use to determine the machine-id based on + // operating system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // VM image ID or host OS image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // The version string of the VM image or host OS as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" + // A pseudo-ID for the cluster, set to the UID of the kube-system namespace. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S does not have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the k8s.cluster.uid through the + // official APIs. In the meantime, we are able to use the uid of the + // kube-system namespace as a proxy for cluster ID. Read on for the + // rationale.Every object created in a K8S cluster is assigned a distinct UID. The + // kube-system namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the uid of the kube-system + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // ISO/IEC 9834-8 and ITU-T X.667. + // Which states:
+ // If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism + // chosen).Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + AttributeK8SClusterUID = "k8s.cluster.uid" +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" +) + +// A Kubernetes Pod object. +const ( + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" +) + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeK8SContainerRestartCount = "k8s.container.restart_count" +) + +// A Kubernetes ReplicaSet object. +const ( + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" +) + +// A Kubernetes Deployment object. +const ( + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" +) + +// A Kubernetes StatefulSet object. +const ( + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" +) + +// A Kubernetes DaemonSet object. +const ( + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" +) + +// A Kubernetes Job object. +const ( + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" +) + +// A Kubernetes CronJob object. +const ( + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The operating system type. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeOSType = "os.type" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessOwner = "process.owner" + // Parent Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1234 + AttributeProcessPID = "process.pid" +) + +// The single (language) runtime instance which is monitored. +const ( + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // The version string of the service API or implementation. The format is not + // defined by these conventions. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2.0.0', 'a01dbef8a' + AttributeServiceVersion = "service.version" +) + +// A service instance. +const ( + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-k8s-pod-deployment-1', '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + AttributeServiceInstanceID = "service.instance.id" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" +) + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the telemetry.sdk.name attribute to + // opentelemetry. + // If another SDK, like a fork or a vendor-provided implementation, is used, this + // SDK MUST set the + // telemetry.sdk.name attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier opentelemetry is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // rust + AttributeTelemetrySDKLanguageRust = "rust" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetryAutoVersion = "telemetry.auto.version" +) + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" + // The name of the web engine. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0.0' + AttributeOTelScopeVersion = "otel.scope.version" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // Deprecated, use the otel.scope.name attribute. + // + // Type: string + // Requirement Level: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelLibraryName = "otel.library.name" + // Deprecated, use the otel.scope.version attribute. + // + // Type: string + // Requirement Level: Optional + // Stability: deprecated + // Examples: '1.0.0' + AttributeOTelLibraryVersion = "otel.library.version" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeBrowserBrands, + AttributeBrowserLanguage, + AttributeBrowserMobile, + AttributeBrowserPlatform, + AttributeCloudAccountID, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudResourceID, + AttributeAWSECSClusterARN, + AttributeAWSECSContainerARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupARNs, + AttributeAWSLogGroupNames, + AttributeAWSLogStreamARNs, + AttributeAWSLogStreamNames, + AttributeGCPCloudRunJobExecution, + AttributeGCPCloudRunJobTaskIndex, + AttributeGCPGceInstanceHostname, + AttributeGCPGceInstanceName, + AttributeHerokuAppID, + AttributeHerokuReleaseCommit, + AttributeHerokuReleaseCreationTimestamp, + AttributeContainerCommand, + AttributeContainerCommandArgs, + AttributeContainerCommandLine, + AttributeContainerID, + AttributeContainerImageID, + AttributeContainerImageName, + AttributeContainerImageTag, + AttributeContainerName, + AttributeContainerRuntime, + AttributeDeploymentEnvironment, + AttributeDeviceID, + AttributeDeviceManufacturer, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeFaaSInstance, + AttributeFaaSMaxMemory, + AttributeFaaSName, + AttributeFaaSVersion, + AttributeHostArch, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostName, + AttributeHostType, + AttributeK8SClusterName, + AttributeK8SClusterUID, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SNamespaceName, + AttributeK8SPodName, + AttributeK8SPodUID, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SReplicaSetName, + AttributeK8SReplicaSetUID, + AttributeK8SDeploymentName, + AttributeK8SDeploymentUID, + AttributeK8SStatefulSetName, + AttributeK8SStatefulSetUID, + AttributeK8SDaemonSetName, + AttributeK8SDaemonSetUID, + AttributeK8SJobName, + AttributeK8SJobUID, + AttributeK8SCronJobName, + AttributeK8SCronJobUID, + AttributeOSDescription, + AttributeOSName, + AttributeOSType, + AttributeOSVersion, + AttributeProcessCommand, + AttributeProcessCommandArgs, + AttributeProcessCommandLine, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessOwner, + AttributeProcessParentPID, + AttributeProcessPID, + AttributeProcessRuntimeDescription, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeServiceName, + AttributeServiceVersion, + AttributeServiceInstanceID, + AttributeServiceNamespace, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + AttributeTelemetryAutoVersion, + AttributeWebEngineDescription, + AttributeWebEngineName, + AttributeWebEngineVersion, + AttributeOTelScopeName, + AttributeOTelScopeVersion, + AttributeOTelLibraryName, + AttributeOTelLibraryVersion, + } +} diff --git a/semconv/v1.21.0/generated_trace.go b/semconv/v1.21.0/generated_trace.go new file mode 100644 index 00000000000..9bbd3cfbf5f --- /dev/null +++ b/semconv/v1.21.0/generated_trace.go @@ -0,0 +1,1536 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" +) + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from cloud.resource_id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" +) + +// Semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span does not depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// The attributes used to perform database client calls. +const ( + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // The fully-qualified class name of the Java Database Connectivity (JDBC) driver + // used to connect. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Requirement Level: Conditionally Required - If applicable. + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema name), the + // database name to be used is the more specific layer (e.g. Oracle schema name). + AttributeDBName = "db.name" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Requirement Level: Conditionally Required - If `db.statement` is not + // applicable. + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" + // The database statement being executed. + // + // Type: string + // Requirement Level: Recommended - Should be collected by default only if there + // is sanitization that excludes sensitive information. + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + AttributeDBStatement = "db.statement" + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeDBSystem = "db.system" + // Username for accessing the database. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // Microsoft SQL Server Compact + AttributeDBSystemMssqlcompact = "mssqlcompact" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" + // ClickHouse + AttributeDBSystemClickhouse = "clickhouse" + // Cloud Spanner + AttributeDBSystemSpanner = "spanner" + // Trino + AttributeDBSystemTrino = "trino" +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, server.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" +) + +// Call-level attributes for Cassandra +const ( + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Requirement Level: Conditionally Required - If other than the default database + // (`0`). + // Stability: experimental + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in db.name. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" +) + +// Call-level attributes for Cosmos DB. +const ( + // Unique Cosmos client instance id. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + AttributeDBCosmosDBClientID = "db.cosmosdb.client_id" + // Cosmos client connection mode. + // + // Type: Enum + // Requirement Level: Conditionally Required - if not `direct` (or pick gw as + // default) + // Stability: experimental + AttributeDBCosmosDBConnectionMode = "db.cosmosdb.connection_mode" + // Cosmos DB container name. + // + // Type: string + // Requirement Level: Conditionally Required - if available + // Stability: experimental + // Examples: 'anystring' + AttributeDBCosmosDBContainer = "db.cosmosdb.container" + // CosmosDB Operation Type. + // + // Type: Enum + // Requirement Level: Conditionally Required - when performing one of the + // operations in this list + // Stability: experimental + AttributeDBCosmosDBOperationType = "db.cosmosdb.operation_type" + // RU consumed for that operation + // + // Type: double + // Requirement Level: Conditionally Required - when available + // Stability: experimental + // Examples: 46.18, 1.0 + AttributeDBCosmosDBRequestCharge = "db.cosmosdb.request_charge" + // Request payload size in bytes + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBRequestContentLength = "db.cosmosdb.request_content_length" + // Cosmos DB status code. + // + // Type: int + // Requirement Level: Conditionally Required - if response was received + // Stability: experimental + // Examples: 200, 201 + AttributeDBCosmosDBStatusCode = "db.cosmosdb.status_code" + // Cosmos DB sub status code. + // + // Type: int + // Requirement Level: Conditionally Required - when response was received and + // contained sub-code. + // Stability: experimental + // Examples: 1000, 1002 + AttributeDBCosmosDBSubStatusCode = "db.cosmosdb.sub_status_code" +) + +const ( + // Gateway (HTTP) connections mode + AttributeDBCosmosDBConnectionModeGateway = "gateway" + // Direct connection + AttributeDBCosmosDBConnectionModeDirect = "direct" +) + +const ( + // invalid + AttributeDBCosmosDBOperationTypeInvalid = "Invalid" + // create + AttributeDBCosmosDBOperationTypeCreate = "Create" + // patch + AttributeDBCosmosDBOperationTypePatch = "Patch" + // read + AttributeDBCosmosDBOperationTypeRead = "Read" + // read_feed + AttributeDBCosmosDBOperationTypeReadFeed = "ReadFeed" + // delete + AttributeDBCosmosDBOperationTypeDelete = "Delete" + // replace + AttributeDBCosmosDBOperationTypeReplace = "Replace" + // execute + AttributeDBCosmosDBOperationTypeExecute = "Execute" + // query + AttributeDBCosmosDBOperationTypeQuery = "Query" + // head + AttributeDBCosmosDBOperationTypeHead = "Head" + // head_feed + AttributeDBCosmosDBOperationTypeHeadFeed = "HeadFeed" + // upsert + AttributeDBCosmosDBOperationTypeUpsert = "Upsert" + // batch + AttributeDBCosmosDBOperationTypeBatch = "Batch" + // query_plan + AttributeDBCosmosDBOperationTypeQueryPlan = "QueryPlan" + // execute_javascript + AttributeDBCosmosDBOperationTypeExecuteJavascript = "ExecuteJavaScript" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeOTelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'resource not found' + AttributeOTelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOTelStatusCodeOk = "OK" + // The operation contains an error + AttributeOTelStatusCodeError = "ERROR" +) + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // The invocation ID of the current function invocation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSInvocationID = "faas.invocation_id" + // Type of the trigger which caused this function invocation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: For the server/consumer span on the incoming side, + // faas.trigger MUST be set.Clients invoking FaaS instances usually cannot set + // faas.trigger, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + AttributeFaaSTrigger = "faas.trigger" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSColdstart = "faas.coldstart" +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Conditionally Required - For some cloud providers, like AWS + // or GCP, the region in which a function is hosted is essential to uniquely + // identify the function and also part of its endpoint. Since it's part of the + // endpoint being called, the region is always known to clients. In these cases, + // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the + // client or not required for identifying the invoked function, setting + // `faas.invoked_region` is optional. + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // The column number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 16 + AttributeCodeColumn = "code.column" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" +) + +// Semantic Convention for HTTP Client +const ( + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Recommended - if and only if request was retried. + // Stability: experimental + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPResendCount = "http.resend_count" +) + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // The AWS request ID as returned in the response headers x-amz-request-id or + // x-amz-requestid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AttributeAWSRequestID = "aws.request_id" +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" +) + +// DynamoDB.ListTables +const ( + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The the number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" +) + +// DynamoDB.Query +const ( + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" +) + +// DynamoDB.Scan +const ( + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The JSON-serialized value of each item in the the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" +) + +// Attributes that exist for S3 request types. +const ( + // The S3 bucket name the request refers to. Corresponds to the --bucket parameter + // of the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The bucket attribute is applicable to all S3 operations that reference a + // bucket, i.e. that require the bucket name as a mandatory parameter. + // This applies to almost all S3 operations except list-buckets. + AttributeAWSS3Bucket = "aws.s3.bucket" + // The source object (in the form bucket/key) for the copy operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The copy_source attribute applies to S3 copy operations and corresponds + // to the --copy-source parameter + // of the copy-object operation within the S3 API. + // This applies in particular to the following operations:
    + //
  • copy-object
  • + //
  • upload-part-copy
  • + //
+ AttributeAWSS3CopySource = "aws.s3.copy_source" + // The delete request container that specifies the objects to be deleted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string} + // ],Quiet=boolean' + // Note: The delete attribute is only applicable to the delete-object operation. + // The delete attribute corresponds to the --delete parameter of the + // delete-objects operation within the S3 API. + AttributeAWSS3Delete = "aws.s3.delete" + // The S3 object key the request refers to. Corresponds to the --key parameter of + // the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The key attribute is applicable to all object-related S3 operations, i.e. + // that require the object key as a mandatory parameter. + // This applies in particular to the following operations:
    + //
  • copy-object
  • + //
  • delete-object
  • + //
  • get-object
  • + //
  • head-object
  • + //
  • put-object
  • + //
  • restore-object
  • + //
  • select-object-content
  • + //
  • abort-multipart-upload
  • + //
  • complete-multipart-upload
  • + //
  • create-multipart-upload
  • + //
  • list-parts
  • + //
  • upload-part
  • + //
  • upload-part-copy
  • + //
+ AttributeAWSS3Key = "aws.s3.key" + // The part number of the part being uploaded in a multipart-upload operation. + // This is a positive integer between 1 and 10,000. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3456 + // Note: The part_number attribute is only applicable to the upload-part + // and upload-part-copy operations. + // The part_number attribute corresponds to the --part-number parameter of the + // upload-part operation within the S3 API. + AttributeAWSS3PartNumber = "aws.s3.part_number" + // Upload ID that identifies the multipart upload. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The upload_id attribute applies to S3 multipart-upload operations and + // corresponds to the --upload-id parameter + // of the S3 API multipart operations. + // This applies in particular to the following operations:
    + //
  • abort-multipart-upload
  • + //
  • complete-multipart-upload
  • + //
  • list-parts
  • + //
  • upload-part
  • + //
  • upload-part-copy
  • + //
+ AttributeAWSS3UploadID = "aws.s3.upload_id" +) + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +// General attributes used in messaging systems. +const ( + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // Requirement Level: Conditionally Required - If the span describes an operation + // on a batch of messages. + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set messaging.batch.message_count on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use messaging.batch.message_count for batching APIs and SHOULD NOT use + // it for single-message APIs. + AttributeMessagingBatchMessageCount = "messaging.batch.message_count" + // A unique identifier for the client that consumes or produces a message. + // + // Type: string + // Requirement Level: Recommended - If a client id is available + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + AttributeMessagingClientID = "messaging.client_id" + // A string identifying the kind of messaging operation as defined in the + // Operation names section above. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + AttributeMessagingOperation = "messaging.operation" + // A string identifying the messaging system. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + AttributeMessagingSystem = "messaging.system" +) + +const ( + // publish + AttributeMessagingOperationPublish = "publish" + // receive + AttributeMessagingOperationReceive = "receive" + // process + AttributeMessagingOperationProcess = "process" +) + +// Semantic conventions for remote procedure calls. +const ( + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeRPCSystem = "rpc.system" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" + // Connect RPC + AttributeRPCSystemConnectRPC = "connect_rpc" +) + +// Tech-specific attributes for gRPC. +const ( + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Conditionally Required - If response is not successful. + // Stability: experimental + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Conditionally Required - If other than the default version + // (`1.0`) + // Stability: experimental + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" +) + +// Tech-specific attributes for Connect RPC. +const ( + // The error codes of the Connect request. Error codes are always string values. + // + // Type: Enum + // Requirement Level: Conditionally Required - If response is not successful and + // if error code available. + // Stability: experimental + AttributeRPCConnectRPCErrorCode = "rpc.connect_rpc.error_code" +) + +const ( + // cancelled + AttributeRPCConnectRPCErrorCodeCancelled = "cancelled" + // unknown + AttributeRPCConnectRPCErrorCodeUnknown = "unknown" + // invalid_argument + AttributeRPCConnectRPCErrorCodeInvalidArgument = "invalid_argument" + // deadline_exceeded + AttributeRPCConnectRPCErrorCodeDeadlineExceeded = "deadline_exceeded" + // not_found + AttributeRPCConnectRPCErrorCodeNotFound = "not_found" + // already_exists + AttributeRPCConnectRPCErrorCodeAlreadyExists = "already_exists" + // permission_denied + AttributeRPCConnectRPCErrorCodePermissionDenied = "permission_denied" + // resource_exhausted + AttributeRPCConnectRPCErrorCodeResourceExhausted = "resource_exhausted" + // failed_precondition + AttributeRPCConnectRPCErrorCodeFailedPrecondition = "failed_precondition" + // aborted + AttributeRPCConnectRPCErrorCodeAborted = "aborted" + // out_of_range + AttributeRPCConnectRPCErrorCodeOutOfRange = "out_of_range" + // unimplemented + AttributeRPCConnectRPCErrorCodeUnimplemented = "unimplemented" + // internal + AttributeRPCConnectRPCErrorCodeInternal = "internal" + // unavailable + AttributeRPCConnectRPCErrorCodeUnavailable = "unavailable" + // data_loss + AttributeRPCConnectRPCErrorCodeDataLoss = "data_loss" + // unauthenticated + AttributeRPCConnectRPCErrorCodeUnauthenticated = "unauthenticated" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionType, + AttributeAWSLambdaInvokedARN, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventSubject, + AttributeCloudeventsEventType, + AttributeOpentracingRefType, + AttributeDBConnectionString, + AttributeDBJDBCDriverClassname, + AttributeDBName, + AttributeDBOperation, + AttributeDBStatement, + AttributeDBSystem, + AttributeDBUser, + AttributeDBMSSQLInstanceName, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraCoordinatorDC, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraIdempotence, + AttributeDBCassandraPageSize, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraTable, + AttributeDBRedisDBIndex, + AttributeDBMongoDBCollection, + AttributeDBSQLTable, + AttributeDBCosmosDBClientID, + AttributeDBCosmosDBConnectionMode, + AttributeDBCosmosDBContainer, + AttributeDBCosmosDBOperationType, + AttributeDBCosmosDBRequestCharge, + AttributeDBCosmosDBRequestContentLength, + AttributeDBCosmosDBStatusCode, + AttributeDBCosmosDBSubStatusCode, + AttributeOTelStatusCode, + AttributeOTelStatusDescription, + AttributeFaaSInvocationID, + AttributeFaaSTrigger, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentName, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSCron, + AttributeFaaSTime, + AttributeFaaSColdstart, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributePeerService, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeThreadID, + AttributeThreadName, + AttributeCodeColumn, + AttributeCodeFilepath, + AttributeCodeFunction, + AttributeCodeLineNumber, + AttributeCodeNamespace, + AttributeHTTPResendCount, + AttributeAWSRequestID, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeAWSS3Bucket, + AttributeAWSS3CopySource, + AttributeAWSS3Delete, + AttributeAWSS3Key, + AttributeAWSS3PartNumber, + AttributeAWSS3UploadID, + AttributeGraphqlDocument, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + AttributeMessagingBatchMessageCount, + AttributeMessagingClientID, + AttributeMessagingOperation, + AttributeMessagingSystem, + AttributeRPCMethod, + AttributeRPCService, + AttributeRPCSystem, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcVersion, + AttributeRPCConnectRPCErrorCode, + } +} diff --git a/semconv/v1.21.0/schema.go b/semconv/v1.21.0/schema.go new file mode 100644 index 00000000000..6db9125c204 --- /dev/null +++ b/semconv/v1.21.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.21.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.21.0" diff --git a/semconv/v1.22.0/doc.go b/semconv/v1.22.0/doc.go new file mode 100644 index 00000000000..87554f9da80 --- /dev/null +++ b/semconv/v1.22.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.22.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/v1.22.0" diff --git a/semconv/v1.22.0/generated_event.go b/semconv/v1.22.0/generated_event.go new file mode 100644 index 00000000000..ec6c733ecd4 --- /dev/null +++ b/semconv/v1.22.0/generated_event.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // The unique identifier of the feature flag. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'logo-color' + AttributeFeatureFlagKey = "feature_flag.key" + // The name of the service provider that performs the flag evaluation. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'Flag Manager' + AttributeFeatureFlagProviderName = "feature_flag.provider_name" + // SHOULD be a semantic identifier for a value. If one is unavailable, a + // stringified version of the value can be used. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant red maybe be used for the value #c05543.A stringified + // version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + AttributeFeatureFlagVariant = "feature_flag.variant" +) + +// RPC received/sent message. +const ( + // Compressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageCompressedSize = "message.compressed_size" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeMessageID = "message.id" + // Whether this is a received or sent message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessageType = "message.type" + // Uncompressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageUncompressedSize = "message.uncompressed_size" +) + +const ( + // sent + AttributeMessageTypeSent = "SENT" + // received + AttributeMessageTypeReceived = "RECEIVED" +) + +// The attributes used to report a single exception associated with a span. +const ( + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example above.It follows that an exception may still escape the + // scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" +) + +func GetEventSemanticConventionAttributeNames() []string { + return []string{ + AttributeFeatureFlagKey, + AttributeFeatureFlagProviderName, + AttributeFeatureFlagVariant, + AttributeMessageCompressedSize, + AttributeMessageID, + AttributeMessageType, + AttributeMessageUncompressedSize, + AttributeExceptionEscaped, + } +} diff --git a/semconv/v1.22.0/generated_resource.go b/semconv/v1.22.0/generated_resource.go new file mode 100644 index 00000000000..9c86ae85a73 --- /dev/null +++ b/semconv/v1.22.0/generated_resource.go @@ -0,0 +1,1473 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The Android platform on which the Android application is running. +const ( + // Uniquely identifies the framework API revision offered by a version + // (os.version) of the android operating system. More information can be found + // here. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '33', '32' + AttributeAndroidOSAPILevel = "android.os.api_level" +) + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeCloudProvider = "cloud.provider" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud provider-specific native identifier of the monitored cloud resource (e.g. + // an ARN on AWS, a fully qualified resource ID on Azure, a full resource name on + // GCP) + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', '//run.googl + // eapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', '/sub + // scriptions//resourceGroups//providers/Microsoft.Web/sites + // //functions/' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so it may be necessary to set cloud.resource_id as a span attribute instead.The + // exact value to use for cloud.resource_id depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute and + // they apply:
    + //
  • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invokable with + // multiple different aliases.
  • + //
  • GCP: The URI of the resource
  • + //
  • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
  • + //
+ AttributeCloudResourceID = "cloud.resource_id" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Bare Metal Solution (BMS) + AttributeCloudPlatformGCPBareMetalSolution = "gcp_bare_metal_solution" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGCPOpenshift = "gcp_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Heroku Platform as a Service + AttributeCloudProviderHeroku = "heroku" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of an ECS task definition. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The task definition family this task definition is a member of. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for this task definition. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" +) + +// Resource used by Google Cloud Run. +const ( + // The name of the Cloud Run execution being run for the Job, as set by the + // CLOUD_RUN_EXECUTION environment variable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + AttributeGCPCloudRunJobExecution = "gcp.cloud_run.job.execution" + // The index for a task within an execution as provided by the + // CLOUD_RUN_TASK_INDEX environment variable. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1 + AttributeGCPCloudRunJobTaskIndex = "gcp.cloud_run.job.task_index" +) + +// Resources used by Google Compute Engine (GCE). +const ( + // The hostname of a GCE instance. This is the full value of the default or custom + // hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', 'sample-vm.us-west1-b.c.my- + // project.internal' + AttributeGCPGceInstanceHostname = "gcp.gce.instance.hostname" + // The instance name of a GCE instance. This is the value provided by host.name, + // the visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the default internal DNS + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + AttributeGCPGceInstanceName = "gcp.gce.instance.name" +) + +// Heroku dyno metadata +const ( + // Unique identifier for the application + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + AttributeHerokuAppID = "heroku.app.id" + // Commit hash for the current release + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + AttributeHerokuReleaseCommit = "heroku.release.commit" + // Time and date the release was created + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + AttributeHerokuReleaseCreationTimestamp = "heroku.release.creation_timestamp" +) + +// A container instance. +const ( + // The command used to run the container (i.e. the command name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + AttributeContainerCommand = "container.command" + // All the command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + AttributeContainerCommandArgs = "container.command_args" + // The full command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + AttributeContainerCommandLine = "container.command_line" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; container.image.id corresponds + // to the Image field from the Docker container inspect API endpoint. + // K8S defines a link to the container registry repository with digest "imageID": + // "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e + // 8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625". + // The ID is assinged by the container runtime and can vary in different + // environments. Consider using oci.manifest.digest if it is important to identify + // the same image in different environments/runtimes. + AttributeContainerImageID = "container.image.id" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Repo digests of the container image as provided by the container runtime. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d7 + // 02d249a0ccb', 'internal.registry.example.com:5000/example@sha256:b69959407d21e8 + // a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: Docker and CRI report those under the RepoDigests field. + AttributeContainerImageRepoDigests = "container.image.repo_digests" + // Container image tags. An example can be found in Docker Image Inspect. Should + // be only the section of the full name for example from + // registry.example.com/my-org/my-image:. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + AttributeContainerImageTags = "container.image.tags" + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" +) + +// The software deployment. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'staging', 'production' + AttributeDeploymentEnvironment = "deployment.environment" +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// A serverless instance. +const ( + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
    + //
  • AWS Lambda: Use the (full) log stream name.
  • + //
+ AttributeFaaSInstance = "faas.instance" + // The amount of memory available to the serverless function converted to Bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information (which must be multiplied by 1,048,576). + AttributeFaaSMaxMemory = "faas.max_memory" + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
    + //
  • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the cloud.resource_id attribute).
  • + //
+ AttributeFaaSName = "faas.name" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
    + //
  • AWS Lambda: The function version + // (an integer represented as a decimal string).
  • + //
  • Google Cloud Run (Services): The revision + // (i.e., the function name plus the revision suffix).
  • + //
  • Google Cloud Functions: The value of the + // K_REVISION environment variable.
  • + //
  • Azure Functions: Not applicable. Do not set this attribute.
  • + //
+ AttributeFaaSVersion = "faas.version" +) + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeHostArch = "host.arch" + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized systems, this should be the machine-id. See the + // table below for the sources to use to determine the machine-id based on + // operating system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // VM image ID or host OS image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // The version string of the VM image or host OS as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" + // Available IP addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses + // MUST be specified in the RFC 5952 format. + AttributeHostIP = "host.ip" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// A host's CPU information +const ( + // The amount of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12288000 + AttributeHostCPUCacheL2Size = "host.cpu.cache.l2.size" + // Numeric value specifying the family or generation of the CPU. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 6 + AttributeHostCPUFamily = "host.cpu.family" + // Model identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 6 + AttributeHostCPUModelID = "host.cpu.model.id" + // Model designation of the processor. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + AttributeHostCPUModelName = "host.cpu.model.name" + // Stepping or core revisions. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1 + AttributeHostCPUStepping = "host.cpu.stepping" + // Processor manufacturer identifier. A maximum 12-character string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: CPUID command returns the vendor ID string in EBX, EDX and ECX registers. + // Writing these to memory in this order results in a 12-character string. + AttributeHostCPUVendorID = "host.cpu.vendor.id" +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" + // A pseudo-ID for the cluster, set to the UID of the kube-system namespace. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S does not have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the k8s.cluster.uid through the + // official APIs. In the meantime, we are able to use the uid of the + // kube-system namespace as a proxy for cluster ID. Read on for the + // rationale.Every object created in a K8S cluster is assigned a distinct UID. The + // kube-system namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the uid of the kube-system + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // ISO/IEC 9834-8 and ITU-T X.667. + // Which states:
+ // If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism + // chosen).Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + AttributeK8SClusterUID = "k8s.cluster.uid" +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" +) + +// A Kubernetes Pod object. +const ( + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" +) + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeK8SContainerRestartCount = "k8s.container.restart_count" +) + +// A Kubernetes ReplicaSet object. +const ( + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" +) + +// A Kubernetes Deployment object. +const ( + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" +) + +// A Kubernetes StatefulSet object. +const ( + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" +) + +// A Kubernetes DaemonSet object. +const ( + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" +) + +// A Kubernetes Job object. +const ( + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" +) + +// A Kubernetes CronJob object. +const ( + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" +) + +// An OCI image manifest. +const ( + // The digest of the OCI image manifest. For container images specifically is the + // digest by which the container image is known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows OCI Image Manifest Specification, and specifically the Digest + // property. + // An example can be found in Example Image Manifest. + AttributeOciManifestDigest = "oci.manifest.digest" +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // Unique identifier for a particular build or compilation of the operating + // system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + AttributeOSBuildID = "os.build_id" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The operating system type. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeOSType = "os.type" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessOwner = "process.owner" + // Parent Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1234 + AttributeProcessPID = "process.pid" +) + +// The single (language) runtime instance which is monitored. +const ( + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // The version string of the service API or implementation. The format is not + // defined by these conventions. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2.0.0', 'a01dbef8a' + AttributeServiceVersion = "service.version" +) + +// A service instance. +const ( + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-k8s-pod-deployment-1', '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + AttributeServiceInstanceID = "service.instance.id" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" +) + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the telemetry.sdk.name attribute to + // opentelemetry. + // If another SDK, like a fork or a vendor-provided implementation, is used, this + // SDK MUST set the + // telemetry.sdk.name attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier opentelemetry is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // rust + AttributeTelemetrySDKLanguageRust = "rust" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // The name of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set the + // telemetry.distro.name attribute to + // a string starting with opentelemetry-, e.g. opentelemetry-java-instrumentation. + AttributeTelemetryDistroName = "telemetry.distro.name" + // The version string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetryDistroVersion = "telemetry.distro.version" +) + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" + // The name of the web engine. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0.0' + AttributeOTelScopeVersion = "otel.scope.version" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // Deprecated, use the otel.scope.name attribute. + // + // Type: string + // Requirement Level: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelLibraryName = "otel.library.name" + // Deprecated, use the otel.scope.version attribute. + // + // Type: string + // Requirement Level: Optional + // Stability: deprecated + // Examples: '1.0.0' + AttributeOTelLibraryVersion = "otel.library.version" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeAndroidOSAPILevel, + AttributeBrowserBrands, + AttributeBrowserLanguage, + AttributeBrowserMobile, + AttributeBrowserPlatform, + AttributeCloudAccountID, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudResourceID, + AttributeAWSECSClusterARN, + AttributeAWSECSContainerARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupARNs, + AttributeAWSLogGroupNames, + AttributeAWSLogStreamARNs, + AttributeAWSLogStreamNames, + AttributeGCPCloudRunJobExecution, + AttributeGCPCloudRunJobTaskIndex, + AttributeGCPGceInstanceHostname, + AttributeGCPGceInstanceName, + AttributeHerokuAppID, + AttributeHerokuReleaseCommit, + AttributeHerokuReleaseCreationTimestamp, + AttributeContainerCommand, + AttributeContainerCommandArgs, + AttributeContainerCommandLine, + AttributeContainerID, + AttributeContainerImageID, + AttributeContainerImageName, + AttributeContainerImageRepoDigests, + AttributeContainerImageTags, + AttributeContainerName, + AttributeContainerRuntime, + AttributeDeploymentEnvironment, + AttributeDeviceID, + AttributeDeviceManufacturer, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeFaaSInstance, + AttributeFaaSMaxMemory, + AttributeFaaSName, + AttributeFaaSVersion, + AttributeHostArch, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostIP, + AttributeHostName, + AttributeHostType, + AttributeHostCPUCacheL2Size, + AttributeHostCPUFamily, + AttributeHostCPUModelID, + AttributeHostCPUModelName, + AttributeHostCPUStepping, + AttributeHostCPUVendorID, + AttributeK8SClusterName, + AttributeK8SClusterUID, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SNamespaceName, + AttributeK8SPodName, + AttributeK8SPodUID, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SReplicaSetName, + AttributeK8SReplicaSetUID, + AttributeK8SDeploymentName, + AttributeK8SDeploymentUID, + AttributeK8SStatefulSetName, + AttributeK8SStatefulSetUID, + AttributeK8SDaemonSetName, + AttributeK8SDaemonSetUID, + AttributeK8SJobName, + AttributeK8SJobUID, + AttributeK8SCronJobName, + AttributeK8SCronJobUID, + AttributeOciManifestDigest, + AttributeOSBuildID, + AttributeOSDescription, + AttributeOSName, + AttributeOSType, + AttributeOSVersion, + AttributeProcessCommand, + AttributeProcessCommandArgs, + AttributeProcessCommandLine, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessOwner, + AttributeProcessParentPID, + AttributeProcessPID, + AttributeProcessRuntimeDescription, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeServiceName, + AttributeServiceVersion, + AttributeServiceInstanceID, + AttributeServiceNamespace, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + AttributeTelemetryDistroName, + AttributeTelemetryDistroVersion, + AttributeWebEngineDescription, + AttributeWebEngineName, + AttributeWebEngineVersion, + AttributeOTelScopeName, + AttributeOTelScopeVersion, + AttributeOTelLibraryName, + AttributeOTelLibraryVersion, + } +} diff --git a/semconv/v1.22.0/generated_trace.go b/semconv/v1.22.0/generated_trace.go new file mode 100644 index 00000000000..2630302dae1 --- /dev/null +++ b/semconv/v1.22.0/generated_trace.go @@ -0,0 +1,1471 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // Whether the thread is daemon or not. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeThreadDaemon = "thread.daemon" + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // The column number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 16 + AttributeCodeColumn = "code.column" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" +) + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from cloud.resource_id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" +) + +// Semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span does not depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// The attributes used to perform database client calls. +const ( + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // The fully-qualified class name of the Java Database Connectivity (JDBC) driver + // used to connect. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Requirement Level: Conditionally Required - If applicable. + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema name), the + // database name to be used is the more specific layer (e.g. Oracle schema name). + AttributeDBName = "db.name" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Requirement Level: Conditionally Required - If `db.statement` is not + // applicable. + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" + // The database statement being executed. + // + // Type: string + // Requirement Level: Recommended - Should be collected by default only if there + // is sanitization that excludes sensitive information. + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + AttributeDBStatement = "db.statement" + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeDBSystem = "db.system" + // Username for accessing the database. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // Microsoft SQL Server Compact + AttributeDBSystemMssqlcompact = "mssqlcompact" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" + // ClickHouse + AttributeDBSystemClickhouse = "clickhouse" + // Cloud Spanner + AttributeDBSystemSpanner = "spanner" + // Trino + AttributeDBSystemTrino = "trino" +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, server.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" +) + +// Call-level attributes for Cassandra +const ( + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Requirement Level: Conditionally Required - If other than the default database + // (`0`). + // Stability: experimental + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in db.name. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" +) + +// Call-level attributes for Elasticsearch +const ( + // Represents the identifier of an Elasticsearch cluster. + // + // Type: string + // Requirement Level: Recommended - When communicating with an Elastic Cloud + // deployment, this should be collected from the "X-Found-Handling-Cluster" HTTP + // response header. + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + AttributeDBElasticsearchClusterName = "db.elasticsearch.cluster.name" + // Represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // Requirement Level: Recommended - When communicating with an Elastic Cloud + // deployment, this should be collected from the "X-Found-Handling-Instance" HTTP + // response header. + // Stability: experimental + // Examples: 'instance-0000000001' + AttributeDBElasticsearchNodeName = "db.elasticsearch.node.name" +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" +) + +// Call-level attributes for Cosmos DB. +const ( + // Unique Cosmos client instance id. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + AttributeDBCosmosDBClientID = "db.cosmosdb.client_id" + // Cosmos client connection mode. + // + // Type: Enum + // Requirement Level: Conditionally Required - if not `direct` (or pick gw as + // default) + // Stability: experimental + AttributeDBCosmosDBConnectionMode = "db.cosmosdb.connection_mode" + // Cosmos DB container name. + // + // Type: string + // Requirement Level: Conditionally Required - if available + // Stability: experimental + // Examples: 'anystring' + AttributeDBCosmosDBContainer = "db.cosmosdb.container" + // CosmosDB Operation Type. + // + // Type: Enum + // Requirement Level: Conditionally Required - when performing one of the + // operations in this list + // Stability: experimental + AttributeDBCosmosDBOperationType = "db.cosmosdb.operation_type" + // RU consumed for that operation + // + // Type: double + // Requirement Level: Conditionally Required - when available + // Stability: experimental + // Examples: 46.18, 1.0 + AttributeDBCosmosDBRequestCharge = "db.cosmosdb.request_charge" + // Request payload size in bytes + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBRequestContentLength = "db.cosmosdb.request_content_length" + // Cosmos DB status code. + // + // Type: int + // Requirement Level: Conditionally Required - if response was received + // Stability: experimental + // Examples: 200, 201 + AttributeDBCosmosDBStatusCode = "db.cosmosdb.status_code" + // Cosmos DB sub status code. + // + // Type: int + // Requirement Level: Conditionally Required - when response was received and + // contained sub-code. + // Stability: experimental + // Examples: 1000, 1002 + AttributeDBCosmosDBSubStatusCode = "db.cosmosdb.sub_status_code" +) + +const ( + // Gateway (HTTP) connections mode + AttributeDBCosmosDBConnectionModeGateway = "gateway" + // Direct connection + AttributeDBCosmosDBConnectionModeDirect = "direct" +) + +const ( + // invalid + AttributeDBCosmosDBOperationTypeInvalid = "Invalid" + // create + AttributeDBCosmosDBOperationTypeCreate = "Create" + // patch + AttributeDBCosmosDBOperationTypePatch = "Patch" + // read + AttributeDBCosmosDBOperationTypeRead = "Read" + // read_feed + AttributeDBCosmosDBOperationTypeReadFeed = "ReadFeed" + // delete + AttributeDBCosmosDBOperationTypeDelete = "Delete" + // replace + AttributeDBCosmosDBOperationTypeReplace = "Replace" + // execute + AttributeDBCosmosDBOperationTypeExecute = "Execute" + // query + AttributeDBCosmosDBOperationTypeQuery = "Query" + // head + AttributeDBCosmosDBOperationTypeHead = "Head" + // head_feed + AttributeDBCosmosDBOperationTypeHeadFeed = "HeadFeed" + // upsert + AttributeDBCosmosDBOperationTypeUpsert = "Upsert" + // batch + AttributeDBCosmosDBOperationTypeBatch = "Batch" + // query_plan + AttributeDBCosmosDBOperationTypeQueryPlan = "QueryPlan" + // execute_javascript + AttributeDBCosmosDBOperationTypeExecuteJavascript = "ExecuteJavaScript" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeOTelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'resource not found' + AttributeOTelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOTelStatusCodeOk = "OK" + // The operation contains an error + AttributeOTelStatusCodeError = "ERROR" +) + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // The invocation ID of the current function invocation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSInvocationID = "faas.invocation_id" +) + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSColdstart = "faas.coldstart" +) + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // The AWS request ID as returned in the response headers x-amz-request-id or + // x-amz-requestid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AttributeAWSRequestID = "aws.request_id" +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" +) + +// DynamoDB.ListTables +const ( + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The the number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" +) + +// DynamoDB.Query +const ( + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" +) + +// DynamoDB.Scan +const ( + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The JSON-serialized value of each item in the the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" +) + +// Attributes that exist for S3 request types. +const ( + // The S3 bucket name the request refers to. Corresponds to the --bucket parameter + // of the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The bucket attribute is applicable to all S3 operations that reference a + // bucket, i.e. that require the bucket name as a mandatory parameter. + // This applies to almost all S3 operations except list-buckets. + AttributeAWSS3Bucket = "aws.s3.bucket" + // The source object (in the form bucket/key) for the copy operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The copy_source attribute applies to S3 copy operations and corresponds + // to the --copy-source parameter + // of the copy-object operation within the S3 API. + // This applies in particular to the following operations:
    + //
  • copy-object
  • + //
  • upload-part-copy
  • + //
+ AttributeAWSS3CopySource = "aws.s3.copy_source" + // The delete request container that specifies the objects to be deleted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string} + // ],Quiet=boolean' + // Note: The delete attribute is only applicable to the delete-object operation. + // The delete attribute corresponds to the --delete parameter of the + // delete-objects operation within the S3 API. + AttributeAWSS3Delete = "aws.s3.delete" + // The S3 object key the request refers to. Corresponds to the --key parameter of + // the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The key attribute is applicable to all object-related S3 operations, i.e. + // that require the object key as a mandatory parameter. + // This applies in particular to the following operations:
    + //
  • copy-object
  • + //
  • delete-object
  • + //
  • get-object
  • + //
  • head-object
  • + //
  • put-object
  • + //
  • restore-object
  • + //
  • select-object-content
  • + //
  • abort-multipart-upload
  • + //
  • complete-multipart-upload
  • + //
  • create-multipart-upload
  • + //
  • list-parts
  • + //
  • upload-part
  • + //
  • upload-part-copy
  • + //
+ AttributeAWSS3Key = "aws.s3.key" + // The part number of the part being uploaded in a multipart-upload operation. + // This is a positive integer between 1 and 10,000. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3456 + // Note: The part_number attribute is only applicable to the upload-part + // and upload-part-copy operations. + // The part_number attribute corresponds to the --part-number parameter of the + // upload-part operation within the S3 API. + AttributeAWSS3PartNumber = "aws.s3.part_number" + // Upload ID that identifies the multipart upload. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The upload_id attribute applies to S3 multipart-upload operations and + // corresponds to the --upload-id parameter + // of the S3 API multipart operations. + // This applies in particular to the following operations:
    + //
  • abort-multipart-upload
  • + //
  • complete-multipart-upload
  • + //
  • list-parts
  • + //
  • upload-part
  • + //
  • upload-part-copy
  • + //
+ AttributeAWSS3UploadID = "aws.s3.upload_id" +) + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +// General attributes used in messaging systems. +const ( + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // Requirement Level: Conditionally Required - If the span describes an operation + // on a batch of messages. + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set messaging.batch.message_count on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use messaging.batch.message_count for batching APIs and SHOULD NOT use + // it for single-message APIs. + AttributeMessagingBatchMessageCount = "messaging.batch.message_count" + // A unique identifier for the client that consumes or produces a message. + // + // Type: string + // Requirement Level: Recommended - If a client id is available + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + AttributeMessagingClientID = "messaging.client_id" + // A string identifying the kind of messaging operation as defined in the + // Operation names section above. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + AttributeMessagingOperation = "messaging.operation" + // A string identifying the messaging system. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + AttributeMessagingSystem = "messaging.system" +) + +const ( + // publish + AttributeMessagingOperationPublish = "publish" + // receive + AttributeMessagingOperationReceive = "receive" + // process + AttributeMessagingOperationProcess = "process" +) + +// Semantic conventions for remote procedure calls. +const ( + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeRPCSystem = "rpc.system" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" + // Connect RPC + AttributeRPCSystemConnectRPC = "connect_rpc" +) + +// Tech-specific attributes for gRPC. +const ( + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Conditionally Required - If response is not successful. + // Stability: experimental + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Conditionally Required - If other than the default version + // (`1.0`) + // Stability: experimental + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" +) + +// Tech-specific attributes for Connect RPC. +const ( + // The error codes of the Connect request. Error codes are always string values. + // + // Type: Enum + // Requirement Level: Conditionally Required - If response is not successful and + // if error code available. + // Stability: experimental + AttributeRPCConnectRPCErrorCode = "rpc.connect_rpc.error_code" +) + +const ( + // cancelled + AttributeRPCConnectRPCErrorCodeCancelled = "cancelled" + // unknown + AttributeRPCConnectRPCErrorCodeUnknown = "unknown" + // invalid_argument + AttributeRPCConnectRPCErrorCodeInvalidArgument = "invalid_argument" + // deadline_exceeded + AttributeRPCConnectRPCErrorCodeDeadlineExceeded = "deadline_exceeded" + // not_found + AttributeRPCConnectRPCErrorCodeNotFound = "not_found" + // already_exists + AttributeRPCConnectRPCErrorCodeAlreadyExists = "already_exists" + // permission_denied + AttributeRPCConnectRPCErrorCodePermissionDenied = "permission_denied" + // resource_exhausted + AttributeRPCConnectRPCErrorCodeResourceExhausted = "resource_exhausted" + // failed_precondition + AttributeRPCConnectRPCErrorCodeFailedPrecondition = "failed_precondition" + // aborted + AttributeRPCConnectRPCErrorCodeAborted = "aborted" + // out_of_range + AttributeRPCConnectRPCErrorCodeOutOfRange = "out_of_range" + // unimplemented + AttributeRPCConnectRPCErrorCodeUnimplemented = "unimplemented" + // internal + AttributeRPCConnectRPCErrorCodeInternal = "internal" + // unavailable + AttributeRPCConnectRPCErrorCodeUnavailable = "unavailable" + // data_loss + AttributeRPCConnectRPCErrorCodeDataLoss = "data_loss" + // unauthenticated + AttributeRPCConnectRPCErrorCodeUnauthenticated = "unauthenticated" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionType, + AttributePeerService, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeThreadDaemon, + AttributeThreadID, + AttributeThreadName, + AttributeCodeColumn, + AttributeCodeFilepath, + AttributeCodeFunction, + AttributeCodeLineNumber, + AttributeCodeNamespace, + AttributeAWSLambdaInvokedARN, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventSubject, + AttributeCloudeventsEventType, + AttributeOpentracingRefType, + AttributeDBConnectionString, + AttributeDBJDBCDriverClassname, + AttributeDBName, + AttributeDBOperation, + AttributeDBStatement, + AttributeDBSystem, + AttributeDBUser, + AttributeDBMSSQLInstanceName, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraCoordinatorDC, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraIdempotence, + AttributeDBCassandraPageSize, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraTable, + AttributeDBRedisDBIndex, + AttributeDBMongoDBCollection, + AttributeDBElasticsearchClusterName, + AttributeDBElasticsearchNodeName, + AttributeDBSQLTable, + AttributeDBCosmosDBClientID, + AttributeDBCosmosDBConnectionMode, + AttributeDBCosmosDBContainer, + AttributeDBCosmosDBOperationType, + AttributeDBCosmosDBRequestCharge, + AttributeDBCosmosDBRequestContentLength, + AttributeDBCosmosDBStatusCode, + AttributeDBCosmosDBSubStatusCode, + AttributeOTelStatusCode, + AttributeOTelStatusDescription, + AttributeFaaSInvocationID, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentName, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSCron, + AttributeFaaSTime, + AttributeFaaSColdstart, + AttributeAWSRequestID, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeAWSS3Bucket, + AttributeAWSS3CopySource, + AttributeAWSS3Delete, + AttributeAWSS3Key, + AttributeAWSS3PartNumber, + AttributeAWSS3UploadID, + AttributeGraphqlDocument, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + AttributeMessagingBatchMessageCount, + AttributeMessagingClientID, + AttributeMessagingOperation, + AttributeMessagingSystem, + AttributeRPCMethod, + AttributeRPCService, + AttributeRPCSystem, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcVersion, + AttributeRPCConnectRPCErrorCode, + } +} diff --git a/semconv/v1.22.0/schema.go b/semconv/v1.22.0/schema.go new file mode 100644 index 00000000000..bf32dc2f18c --- /dev/null +++ b/semconv/v1.22.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.22.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.22.0" diff --git a/service/go.mod b/service/go.mod index 9e4f1645d4b..e8a7eafca54 100644 --- a/service/go.mod +++ b/service/go.mod @@ -8,24 +8,24 @@ require ( github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 - github.com/shirou/gopsutil/v3 v3.23.10 + github.com/shirou/gopsutil/v3 v3.23.11 github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 - go.opentelemetry.io/collector v0.90.0 - go.opentelemetry.io/collector/component v0.90.0 - go.opentelemetry.io/collector/config/confignet v0.90.0 - go.opentelemetry.io/collector/config/configtelemetry v0.90.0 - go.opentelemetry.io/collector/confmap v0.90.0 - go.opentelemetry.io/collector/connector v0.90.0 - go.opentelemetry.io/collector/consumer v0.90.0 - go.opentelemetry.io/collector/exporter v0.90.0 - go.opentelemetry.io/collector/extension v0.90.0 - go.opentelemetry.io/collector/extension/zpagesextension v0.90.0 + go.opentelemetry.io/collector v0.91.0 + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/confignet v0.91.0 + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/connector v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 + go.opentelemetry.io/collector/exporter v0.91.0 + go.opentelemetry.io/collector/extension v0.91.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.91.0 go.opentelemetry.io/collector/featuregate v1.0.0 go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/processor v0.90.0 - go.opentelemetry.io/collector/receiver v0.90.0 - go.opentelemetry.io/collector/semconv v0.90.0 + go.opentelemetry.io/collector/processor v0.91.0 + go.opentelemetry.io/collector/receiver v0.91.0 + go.opentelemetry.io/collector/semconv v0.91.0 go.opentelemetry.io/contrib/config v0.1.1 go.opentelemetry.io/contrib/propagators/b3 v1.21.1 go.opentelemetry.io/otel v1.21.0 diff --git a/service/go.sum b/service/go.sum index 510dba490d3..ae8e59ec70b 100644 --- a/service/go.sum +++ b/service/go.sum @@ -249,8 +249,8 @@ github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/shirou/gopsutil/v3 v3.23.10 h1:/N42opWlYzegYaVkWejXWJpbzKv2JDy3mrgGzKsh9hM= -github.com/shirou/gopsutil/v3 v3.23.10/go.mod h1:JIE26kpucQi+innVlAUnIEOSBhBUkirr5b44yr55+WE= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -466,7 +466,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/service/internal/graph/graph.go b/service/internal/graph/graph.go index 1c3c5392bde..9e69eab3cf6 100644 --- a/service/internal/graph/graph.go +++ b/service/internal/graph/graph.go @@ -288,7 +288,10 @@ func (g *Graph) buildComponents(ctx context.Context, set Settings) error { case *connectorNode: err = n.buildComponent(ctx, telemetrySettings, set.BuildInfo, set.ConnectorBuilder, g.nextConsumers(n.ID())) case *capabilitiesNode: - capability := consumer.Capabilities{MutatesData: false} + capability := consumer.Capabilities{ + // The fanOutNode represents the aggregate capabilities of the exporters in the pipeline. + MutatesData: g.pipelines[n.pipelineID].fanOutNode.getConsumer().Capabilities().MutatesData, + } for _, proc := range g.pipelines[n.pipelineID].processors { capability.MutatesData = capability.MutatesData || proc.getConsumer().Capabilities().MutatesData } @@ -319,7 +322,6 @@ func (g *Graph) buildComponents(ctx context.Context, set Settings) error { case component.DataTypeMetrics: consumers := make([]consumer.Metrics, 0, len(nexts)) for _, next := range nexts { - consumers = append(consumers, next.(consumer.Metrics)) } n.baseConsumer = fanoutconsumer.NewMetrics(consumers) diff --git a/service/internal/graph/graph_test.go b/service/internal/graph/graph_test.go index b198d3b81e6..b4aee55cc4c 100644 --- a/service/internal/graph/graph_test.go +++ b/service/internal/graph/graph_test.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "strings" "sync" "testing" @@ -384,11 +385,11 @@ func TestConnectorPipelinesGraph(t *testing.T) { component.NewIDWithName("traces", "in"): { Receivers: []component.ID{component.NewID("examplereceiver")}, Processors: []component.ID{component.NewID("exampleprocessor")}, - Exporters: []component.ID{component.NewID("exampleconnector")}, // wrapped w/ mutates: true + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, }, component.NewIDWithName("traces", "out"): { - Receivers: []component.ID{component.NewID("exampleconnector")}, - Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, // mutate propagates upstream to connector Exporters: []component.ID{component.NewID("exampleexporter")}, }, }, @@ -400,11 +401,11 @@ func TestConnectorPipelinesGraph(t *testing.T) { component.NewIDWithName("metrics", "in"): { Receivers: []component.ID{component.NewID("examplereceiver")}, Processors: []component.ID{component.NewID("exampleprocessor")}, - Exporters: []component.ID{component.NewID("exampleconnector")}, // wrapped w/ mutates: true + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, }, component.NewIDWithName("metrics", "out"): { - Receivers: []component.ID{component.NewID("exampleconnector")}, - Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, // mutate propagates upstream to connector Exporters: []component.ID{component.NewID("exampleexporter")}, }, }, @@ -416,11 +417,11 @@ func TestConnectorPipelinesGraph(t *testing.T) { component.NewIDWithName("logs", "in"): { Receivers: []component.ID{component.NewID("examplereceiver")}, Processors: []component.ID{component.NewID("exampleprocessor")}, - Exporters: []component.ID{component.NewID("exampleconnector")}, // wrapped w/ mutates: true + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, }, component.NewIDWithName("logs", "out"): { - Receivers: []component.ID{component.NewID("exampleconnector")}, - Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, // mutate propagates upstream to connector Exporters: []component.ID{component.NewID("exampleexporter")}, }, }, @@ -432,16 +433,16 @@ func TestConnectorPipelinesGraph(t *testing.T) { component.NewIDWithName("traces", "in"): { Receivers: []component.ID{component.NewID("examplereceiver")}, Processors: []component.ID{component.NewID("exampleprocessor")}, - Exporters: []component.ID{component.NewIDWithName("exampleconnector", "fork")}, // wrapped w/ mutates: true + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, }, component.NewIDWithName("traces", "type0"): { - Receivers: []component.ID{component.NewIDWithName("exampleconnector", "fork")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, Processors: []component.ID{component.NewID("exampleprocessor")}, Exporters: []component.ID{component.NewIDWithName("exampleconnector", "merge")}, }, component.NewIDWithName("traces", "type1"): { - Receivers: []component.ID{component.NewIDWithName("exampleconnector", "fork")}, - Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, // mutate propagates upstream to connector Exporters: []component.ID{component.NewIDWithName("exampleconnector", "merge")}, }, component.NewIDWithName("traces", "out"): { @@ -458,16 +459,16 @@ func TestConnectorPipelinesGraph(t *testing.T) { component.NewIDWithName("metrics", "in"): { Receivers: []component.ID{component.NewID("examplereceiver")}, Processors: []component.ID{component.NewID("exampleprocessor")}, - Exporters: []component.ID{component.NewIDWithName("exampleconnector", "fork")}, // wrapped w/ mutates: true + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, }, component.NewIDWithName("metrics", "type0"): { - Receivers: []component.ID{component.NewIDWithName("exampleconnector", "fork")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, Processors: []component.ID{component.NewID("exampleprocessor")}, Exporters: []component.ID{component.NewIDWithName("exampleconnector", "merge")}, }, component.NewIDWithName("metrics", "type1"): { - Receivers: []component.ID{component.NewIDWithName("exampleconnector", "fork")}, - Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, // mutate propagates upstream to connector Exporters: []component.ID{component.NewIDWithName("exampleconnector", "merge")}, }, component.NewIDWithName("metrics", "out"): { @@ -484,16 +485,16 @@ func TestConnectorPipelinesGraph(t *testing.T) { component.NewIDWithName("logs", "in"): { Receivers: []component.ID{component.NewID("examplereceiver")}, Processors: []component.ID{component.NewID("exampleprocessor")}, - Exporters: []component.ID{component.NewIDWithName("exampleconnector", "fork")}, // wrapped w/ mutates: true + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, }, component.NewIDWithName("logs", "type0"): { - Receivers: []component.ID{component.NewIDWithName("exampleconnector", "fork")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, Processors: []component.ID{component.NewID("exampleprocessor")}, Exporters: []component.ID{component.NewIDWithName("exampleconnector", "merge")}, }, component.NewIDWithName("logs", "type1"): { - Receivers: []component.ID{component.NewIDWithName("exampleconnector", "fork")}, - Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, // mutate propagates upstream to connector Exporters: []component.ID{component.NewIDWithName("exampleconnector", "merge")}, }, component.NewIDWithName("logs", "out"): { @@ -572,32 +573,32 @@ func TestConnectorPipelinesGraph(t *testing.T) { pipelineConfigs: pipelines.Config{ component.NewIDWithName("traces", "in"): { Receivers: []component.ID{component.NewID("examplereceiver")}, - Processors: []component.ID{component.NewID("exampleprocessor")}, - Exporters: []component.ID{component.NewID("exampleconnector")}, // wrapped w/ mutates: true + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, }, component.NewIDWithName("metrics", "in"): { Receivers: []component.ID{component.NewID("examplereceiver")}, Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, - Exporters: []component.ID{component.NewID("exampleconnector")}, + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, }, component.NewIDWithName("logs", "in"): { Receivers: []component.ID{component.NewID("examplereceiver")}, - Processors: []component.ID{component.NewID("exampleprocessor")}, - Exporters: []component.ID{component.NewID("exampleconnector")}, // wrapped w/ mutates: true + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, }, component.NewIDWithName("traces", "out"): { - Receivers: []component.ID{component.NewID("exampleconnector")}, - Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, // mutate propagates upstream to connector Exporters: []component.ID{component.NewID("exampleexporter")}, }, component.NewIDWithName("metrics", "out"): { - Receivers: []component.ID{component.NewID("exampleconnector")}, - Processors: []component.ID{component.NewID("exampleprocessor")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, // mutate propagates upstream to connector Exporters: []component.ID{component.NewID("exampleexporter")}, }, component.NewIDWithName("logs", "out"): { - Receivers: []component.ID{component.NewID("exampleconnector")}, - Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewIDWithName("exampleprocessor", "mutate")}, // mutate propagates upstream to connector Exporters: []component.ID{component.NewID("exampleexporter")}, }, }, @@ -633,6 +634,84 @@ func TestConnectorPipelinesGraph(t *testing.T) { }, expectedPerExporter: 1, }, + { + name: "pipelines_conn_mutate_traces.yaml", + pipelineConfigs: pipelines.Config{ + component.NewIDWithName("traces", "in"): { + Receivers: []component.ID{component.NewID("examplereceiver")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + }, + component.NewIDWithName("traces", "out0"): { + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewID("exampleexporter")}, + }, + component.NewIDWithName("traces", "middle"): { + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "mutate")}, + }, + component.NewIDWithName("traces", "out1"): { + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "mutate")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewID("exampleexporter")}, + }, + }, + expectedPerExporter: 2, + }, + { + name: "pipelines_conn_mutate_metrics.yaml", + pipelineConfigs: pipelines.Config{ + component.NewIDWithName("metrics", "in"): { + Receivers: []component.ID{component.NewID("examplereceiver")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + }, + component.NewIDWithName("metrics", "out0"): { + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewID("exampleexporter")}, + }, + component.NewIDWithName("metrics", "middle"): { + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "mutate")}, + }, + component.NewIDWithName("metrics", "out1"): { + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "mutate")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewID("exampleexporter")}, + }, + }, + expectedPerExporter: 2, + }, + { + name: "pipelines_conn_mutate_logs.yaml", + pipelineConfigs: pipelines.Config{ + component.NewIDWithName("logs", "in"): { + Receivers: []component.ID{component.NewID("examplereceiver")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + }, + component.NewIDWithName("logs", "out0"): { + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewID("exampleexporter")}, + }, + component.NewIDWithName("logs", "middle"): { + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "inherit_mutate")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewIDWithName("exampleconnector", "mutate")}, + }, + component.NewIDWithName("logs", "out1"): { + Receivers: []component.ID{component.NewIDWithName("exampleconnector", "mutate")}, + Processors: []component.ID{component.NewID("exampleprocessor")}, + Exporters: []component.ID{component.NewID("exampleexporter")}, + }, + }, + expectedPerExporter: 2, + }, } for _, test := range tests { @@ -670,10 +749,11 @@ func TestConnectorPipelinesGraph(t *testing.T) { ), ConnectorBuilder: connector.NewBuilder( map[component.ID]component.Config{ - component.NewID("exampleconnector"): testcomponents.ExampleConnectorFactory.CreateDefaultConfig(), - component.NewIDWithName("exampleconnector", "fork"): testcomponents.ExampleConnectorFactory.CreateDefaultConfig(), - component.NewIDWithName("exampleconnector", "merge"): testcomponents.ExampleConnectorFactory.CreateDefaultConfig(), - component.NewID("mockforward"): testcomponents.MockForwardConnectorFactory.CreateDefaultConfig(), + component.NewID("exampleconnector"): testcomponents.ExampleConnectorFactory.CreateDefaultConfig(), + component.NewIDWithName("exampleconnector", "merge"): testcomponents.ExampleConnectorFactory.CreateDefaultConfig(), + component.NewIDWithName("exampleconnector", "mutate"): testcomponents.ExampleConnectorFactory.CreateDefaultConfig(), + component.NewIDWithName("exampleconnector", "inherit_mutate"): testcomponents.ExampleConnectorFactory.CreateDefaultConfig(), + component.NewID("mockforward"): testcomponents.MockForwardConnectorFactory.CreateDefaultConfig(), }, map[component.Type]connector.Factory{ testcomponents.ExampleConnectorFactory.Type(): testcomponents.ExampleConnectorFactory, @@ -700,6 +780,11 @@ func TestConnectorPipelinesGraph(t *testing.T) { // Determine independently if the capabilities node should report MutateData as true var expectMutatesData bool + for _, expr := range pipelineCfg.Exporters { + if strings.Contains(expr.Name(), "mutate") { + expectMutatesData = true + } + } for _, proc := range pipelineCfg.Processors { if proc.Name() == "mutate" { expectMutatesData = true @@ -767,6 +852,9 @@ func TestConnectorPipelinesGraph(t *testing.T) { if !ok { continue } + if expConn.getConsumer().Capabilities().MutatesData { + continue + } // find all the pipelines of the same type where this connector is a receiver var inheritMutatesData bool for recPipelineID, recPipeline := range pg.pipelines { diff --git a/service/internal/graph/nodes.go b/service/internal/graph/nodes.go index 7f7fda70f85..eb8e886b554 100644 --- a/service/internal/graph/nodes.go +++ b/service/internal/graph/nodes.go @@ -255,6 +255,9 @@ func (n *connectorNode) buildComponent( n.Component = conn // When connecting pipelines of the same data type, the connector must // inherit the capabilities of pipelines in which it is acting as a receiver. + // Since the incoming and outgoing data types are the same, we must also consider + // that the connector itself may MutatesData. + capability.MutatesData = capability.MutatesData || conn.Capabilities().MutatesData n.baseConsumer = capabilityconsumer.NewTraces(conn, capability) case component.DataTypeMetrics: conn, err := builder.CreateMetricsToTraces(ctx, set, next) @@ -294,6 +297,9 @@ func (n *connectorNode) buildComponent( n.Component = conn // When connecting pipelines of the same data type, the connector must // inherit the capabilities of pipelines in which it is acting as a receiver. + // Since the incoming and outgoing data types are the same, we must also consider + // that the connector itself may MutatesData. + capability.MutatesData = capability.MutatesData || conn.Capabilities().MutatesData n.baseConsumer = capabilityconsumer.NewMetrics(conn, capability) case component.DataTypeLogs: conn, err := builder.CreateLogsToMetrics(ctx, set, next) @@ -332,6 +338,9 @@ func (n *connectorNode) buildComponent( n.Component = conn // When connecting pipelines of the same data type, the connector must // inherit the capabilities of pipelines in which it is acting as a receiver. + // Since the incoming and outgoing data types are the same, we must also consider + // that the connector itself may MutatesData. + capability.MutatesData = capability.MutatesData || conn.Capabilities().MutatesData n.baseConsumer = capabilityconsumer.NewLogs(conn, capability) } } diff --git a/service/internal/testcomponents/example_connector.go b/service/internal/testcomponents/example_connector.go index ccf6d678d28..c5c10b92b78 100644 --- a/service/internal/testcomponents/example_connector.go +++ b/service/internal/testcomponents/example_connector.go @@ -47,69 +47,78 @@ func createExampleConnectorDefaultConfig() component.Config { return &struct{}{} } -func createExampleTracesToTraces(_ context.Context, _ connector.CreateSettings, _ component.Config, traces consumer.Traces) (connector.Traces, error) { +func createExampleTracesToTraces(_ context.Context, set connector.CreateSettings, _ component.Config, traces consumer.Traces) (connector.Traces, error) { return &ExampleConnector{ ConsumeTracesFunc: traces.ConsumeTraces, + mutatesData: set.ID.Name() == "mutate", }, nil } -func createExampleTracesToMetrics(_ context.Context, _ connector.CreateSettings, _ component.Config, metrics consumer.Metrics) (connector.Traces, error) { +func createExampleTracesToMetrics(_ context.Context, set connector.CreateSettings, _ component.Config, metrics consumer.Metrics) (connector.Traces, error) { return &ExampleConnector{ ConsumeTracesFunc: func(ctx context.Context, td ptrace.Traces) error { return metrics.ConsumeMetrics(ctx, testdata.GenerateMetrics(td.SpanCount())) }, + mutatesData: set.ID.Name() == "mutate", }, nil } -func createExampleTracesToLogs(_ context.Context, _ connector.CreateSettings, _ component.Config, logs consumer.Logs) (connector.Traces, error) { +func createExampleTracesToLogs(_ context.Context, set connector.CreateSettings, _ component.Config, logs consumer.Logs) (connector.Traces, error) { return &ExampleConnector{ ConsumeTracesFunc: func(ctx context.Context, td ptrace.Traces) error { return logs.ConsumeLogs(ctx, testdata.GenerateLogs(td.SpanCount())) }, + mutatesData: set.ID.Name() == "mutate", }, nil } -func createExampleMetricsToTraces(_ context.Context, _ connector.CreateSettings, _ component.Config, traces consumer.Traces) (connector.Metrics, error) { +func createExampleMetricsToTraces(_ context.Context, set connector.CreateSettings, _ component.Config, traces consumer.Traces) (connector.Metrics, error) { return &ExampleConnector{ ConsumeMetricsFunc: func(ctx context.Context, md pmetric.Metrics) error { return traces.ConsumeTraces(ctx, testdata.GenerateTraces(md.MetricCount())) }, + mutatesData: set.ID.Name() == "mutate", }, nil } -func createExampleMetricsToMetrics(_ context.Context, _ connector.CreateSettings, _ component.Config, metrics consumer.Metrics) (connector.Metrics, error) { +func createExampleMetricsToMetrics(_ context.Context, set connector.CreateSettings, _ component.Config, metrics consumer.Metrics) (connector.Metrics, error) { return &ExampleConnector{ ConsumeMetricsFunc: metrics.ConsumeMetrics, + mutatesData: set.ID.Name() == "mutate", }, nil } -func createExampleMetricsToLogs(_ context.Context, _ connector.CreateSettings, _ component.Config, logs consumer.Logs) (connector.Metrics, error) { +func createExampleMetricsToLogs(_ context.Context, set connector.CreateSettings, _ component.Config, logs consumer.Logs) (connector.Metrics, error) { return &ExampleConnector{ ConsumeMetricsFunc: func(ctx context.Context, md pmetric.Metrics) error { return logs.ConsumeLogs(ctx, testdata.GenerateLogs(md.MetricCount())) }, + mutatesData: set.ID.Name() == "mutate", }, nil } -func createExampleLogsToTraces(_ context.Context, _ connector.CreateSettings, _ component.Config, traces consumer.Traces) (connector.Logs, error) { +func createExampleLogsToTraces(_ context.Context, set connector.CreateSettings, _ component.Config, traces consumer.Traces) (connector.Logs, error) { return &ExampleConnector{ ConsumeLogsFunc: func(ctx context.Context, ld plog.Logs) error { return traces.ConsumeTraces(ctx, testdata.GenerateTraces(ld.LogRecordCount())) }, + mutatesData: set.ID.Name() == "mutate", }, nil } -func createExampleLogsToMetrics(_ context.Context, _ connector.CreateSettings, _ component.Config, metrics consumer.Metrics) (connector.Logs, error) { +func createExampleLogsToMetrics(_ context.Context, set connector.CreateSettings, _ component.Config, metrics consumer.Metrics) (connector.Logs, error) { return &ExampleConnector{ ConsumeLogsFunc: func(ctx context.Context, ld plog.Logs) error { return metrics.ConsumeMetrics(ctx, testdata.GenerateMetrics(ld.LogRecordCount())) }, + mutatesData: set.ID.Name() == "mutate", }, nil } -func createExampleLogsToLogs(_ context.Context, _ connector.CreateSettings, _ component.Config, logs consumer.Logs) (connector.Logs, error) { +func createExampleLogsToLogs(_ context.Context, set connector.CreateSettings, _ component.Config, logs consumer.Logs) (connector.Logs, error) { return &ExampleConnector{ ConsumeLogsFunc: logs.ConsumeLogs, + mutatesData: set.ID.Name() == "mutate", }, nil } @@ -118,8 +127,9 @@ type ExampleConnector struct { consumer.ConsumeTracesFunc consumer.ConsumeMetricsFunc consumer.ConsumeLogsFunc + mutatesData bool } func (c *ExampleConnector) Capabilities() consumer.Capabilities { - return consumer.Capabilities{MutatesData: false} + return consumer.Capabilities{MutatesData: c.mutatesData} } diff --git a/versions.yaml b/versions.yaml index d5a98856a53..73dcb57025c 100644 --- a/versions.yaml +++ b/versions.yaml @@ -8,7 +8,7 @@ module-sets: - go.opentelemetry.io/collector/featuregate - go.opentelemetry.io/collector/pdata beta: - version: v0.90.1 + version: v0.91.0 modules: - go.opentelemetry.io/collector - go.opentelemetry.io/collector/cmd/builder