diff --git a/config/100-deployment.yaml b/config/100-deployment.yaml index b7a978116e..141e9c6d13 100644 --- a/config/100-deployment.yaml +++ b/config/100-deployment.yaml @@ -41,9 +41,13 @@ metadata: app.kubernetes.io/part-of: tekton-chains # The data can be tweaked at install time, it is commented out # because these are the default settings. -# data: -# artifacts.taskrun.format: tekton -# artifacts.taskrun.storage: tekton +data: + artifacts.taskrun.format: in-toto + artifacts.taskrun.storage: archivista + artifacts.pipelinerun.format: in-toto + artifacts.pipelinerun.storage: archivista + storage.archivista.url: https://archivista.testifysec.io + # artifacts.taskrun.signer: x509 # artifacts.oci.storage: oci # artifacts.oci.format: simplesigning diff --git a/docs/config.md b/docs/config.md index a3eeafa809..53d3420de2 100644 --- a/docs/config.md +++ b/docs/config.md @@ -5,126 +5,119 @@ weight: 20 --- --> # Chains Configuration + `Chains` works by observing `TaskRun` and `PipelineRun` executions, capturing relevant information, and storing it in a cryptographically-signed format. -`TaskRuns` and `PipelineRuns` can indicate inputs and outputs which are then captured and surfaced in the `Chains` payload formats, where relevant. -`Chains` uses the `Results` to _hint_ at the correct inputs and outputs. Check out [slsa-provenance.md](slsa-provenance.md) for more details. +`TaskRuns` and `PipelineRuns` can indicate inputs and outputs which are then captured and surfaced in the `Chains` payload formats where relevant. +Chains uses the `Results` to _hint_ at the correct inputs and outputs. Check out [slsa-provenance.md](slsa-provenance.md) for more details. ## Chains Configuration -Chains uses a `ConfigMap` called `chains-config` in the `tekton-chains` namespace for configuration. +Chains uses a `ConfigMap` called `chains-config` in the `tekton-chains` namespace for configuration. Supported keys include: ### TaskRun Configuration -| Key | Description | Supported Values | Default | -| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------- | :-------- | +| Key | Description | Supported Values | Default | +| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------- | :-------- | | `artifacts.taskrun.format` | The format to store `TaskRun` payloads in. | `in-toto`, `slsa/v1`, `slsa/v2alpha3`, `slsa/v2alpha4` | `in-toto` | -| `artifacts.taskrun.storage` | The storage backend to store `TaskRun` signatures in. Multiple backends can be specified with comma-separated list ("tekton,oci"). To disable the `TaskRun` artifact input an empty string (""). | `tekton`, `oci`, `gcs`, `docdb`, `grafeas` | `tekton` | -| `artifacts.taskrun.signer` | The signature backend to sign `TaskRun` payloads with. | `x509`, `kms` | `x509` | +| `artifacts.taskrun.storage` | The storage backend to store `TaskRun` signatures in. Multiple backends can be specified with a comma-separated list (e.g. `"tekton,oci"`). An empty string disables TaskRun artifacts. | `tekton`, `oci`, `gcs`, `docdb`, `grafeas`, `archivista` | `tekton` | +| `artifacts.taskrun.signer` | The signature backend to sign `TaskRun` payloads with. | `x509`, `kms` | `x509` | -> NOTE: -> -> - `slsa/v1` is an alias of `in-toto` for backwards compatibility. -> - `slsa/v2alpha3` corresponds to the slsav1.0 spec. and uses latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). Recommended format for new chains users who want the slsav1.0 spec. -> - `slsa/v2alpha4` corresponds to the slsav1.0 spec. and uses latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). It reads type-hinted results from [StepActions](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1alpha1.StepAction). Recommended format for new chains users who want the slsav1.0 spec. +> **NOTE:** +> - `slsa/v1` is an alias of `in-toto` for backwards compatibility. +> - `slsa/v2alpha3` corresponds to the slsav1.0 spec and uses the latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). Recommended for new Chains users who want the slsav1.0 spec. +> - `slsa/v2alpha4` corresponds to the slsav1.0 spec and uses the latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). It reads type-hinted results from [StepActions](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1alpha1.StepAction). Recommended for new Chains users who want the slsav1.0 spec. ### PipelineRun Configuration -| Key | Description | Supported Values | Default | -| :--------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :----------------------------------------- | :-------- | +| Key | Description | Supported Values | Default | +| :--------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :--------------------------------------------------------- | :-------- | | `artifacts.pipelinerun.format` | The format to store `PipelineRun` payloads in. | `in-toto`, `slsa/v1`, `slsa/v2alpha3`, `slsa/v2alpha4` | `in-toto` | -| `artifacts.pipelinerun.storage` | The storage backend to store `PipelineRun` signatures in. Multiple backends can be specified with comma-separated list ("tekton,oci"). To disable the `PipelineRun` artifact input an empty string (""). | `tekton`, `oci`, `gcs`, `docdb`, `grafeas` | `tekton` | -| `artifacts.pipelinerun.signer` | The signature backend to sign `PipelineRun` payloads with. | `x509`, `kms` | `x509` | -| `artifacts.pipelinerun.enable-deep-inspection` | This boolean option will configure whether Chains should inspect child taskruns in order to capture inputs/outputs within a pipelinerun. `"false"` means that Chains only checks pipeline level results, whereas `"true"` means Chains inspects both pipeline level and task level results. | `"true"`, `"false"` | `"false"` | - -> NOTE: -> -> - For grafeas storage backend, currently we only support Container Analysis. We will make grafeas server address configurabe within a short time. -> - `slsa/v1` is an alias of `in-toto` for backwards compatibility. -> - `slsa/v2alpha3` corresponds to the slsav1.0 spec. and uses latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). Recommended format for new chains users who want the slsav1.0 spec. -> - `slsa/v2alpha4` corresponds to the slsav1.0 spec. and uses latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). It reads type-hinted results from [StepActions](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1alpha1.StepAction) when `artifacts.pipelinerun.enable-deep-inspection` is set to `true`. Recommended format for new chains users who want the slsav1.0 spec. +| `artifacts.pipelinerun.storage` | The storage backend to store `PipelineRun` signatures in. Multiple backends can be specified with a comma-separated list (e.g. `"tekton,oci"`). An empty string disables PipelineRun artifacts. | `tekton`, `oci`, `gcs`, `docdb`, `grafeas`, `archivista` | `tekton` | +| `artifacts.pipelinerun.signer` | The signature backend to sign `PipelineRun` payloads with. | `x509`, `kms` | `x509` | +| `artifacts.pipelinerun.enable-deep-inspection` | This boolean option configures whether Chains should inspect child TaskRuns to capture inputs/outputs within a PipelineRun. `"false"` means only pipeline-level results are checked, whereas `"true"` means both pipeline and task level results are inspected. | `"true"`, `"false"` | `"false"` | +> **NOTE:** +> - For the Grafeas storage backend, currently only Container Analysis is supported. A configurable Grafeas server address is coming soon. +> - `slsa/v1` is an alias of `in-toto` for backwards compatibility. +> - `slsa/v2alpha3` corresponds to the slsav1.0 spec and uses the latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). Recommended for new Chains users who want the slsav1.0 spec. +> - `slsa/v2alpha4` corresponds to the slsav1.0 spec and uses the latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). It reads type-hinted results from [StepActions](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1alpha1.StepAction) when `artifacts.pipelinerun.enable-deep-inspection` is set to `true`. Recommended for new Chains users who want the slsav1.0 spec. ### OCI Configuration | Key | Description | Supported Values | Default | | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------- | :-------------- | | `artifacts.oci.format` | The format to store `OCI` payloads in. | `simplesigning` | `simplesigning` | -| `artifacts.oci.storage` | The storage backend to store `OCI` signatures in. Multiple backends can be specified with comma-separated list ("oci,tekton"). To disable the `OCI` artifact input an empty string (""). | `tekton`, `oci`, `gcs`, `docdb`, `grafeas` | `oci` | +| `artifacts.oci.storage` | The storage backend to store `OCI` signatures in. Multiple backends can be specified with a comma-separated list (e.g. `"oci,tekton"`). An empty string disables OCI artifacts. | `tekton`, `oci`, `gcs`, `docdb`, `grafeas` | `oci` | | `artifacts.oci.signer` | The signature backend to sign `OCI` payloads with. | `x509`, `kms` | `x509` | ### KMS Configuration | Key | Description | Supported Values | Default | | :------------------- | :---------------------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------- | :------ | -| `signers.kms.kmsref` | The URI reference to a KMS service to use in `KMS` signers. | Supported schemes: `gcpkms://`, `awskms://`, `azurekms://`, `hashivault://`. See https://docs.sigstore.dev/cosign/kms_support for more details. | | +| `signers.kms.kmsref` | The URI reference to a KMS service for `KMS` signers. | Supported schemes: `gcpkms://`, `awskms://`, `azurekms://`, `hashivault://`. See [Sigstore KMS Support](https://docs.sigstore.dev/cosign/kms_support) for details. | | ### Storage Configuration -| Key | Description | Supported Values | Default | -|:-------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------| -| `storage.gcs.bucket` | The GCS bucket for storage | | | -| `storage.oci.repository` | The OCI repo to store OCI signatures and attestation in | If left undefined _and_ one of `artifacts.{oci,taskrun}.storage` includes `oci` storage, attestations will be stored alongside the stored OCI artifact itself. ([example on GCP](../images/attestations-in-artifact-registry.png)) Defining this value results in the OCI bundle stored in the designated location _instead of_ alongside the image. See [cosign documentation](https://github.com/sigstore/cosign#specifying-registry) for additional information. | | -| `storage.docdb.url` | The go-cloud URI reference to a docstore collection | `firestore://projects/[PROJECT]/databases/(default)/documents/[COLLECTION]?name_field=name` | | -| `storage.docdb.mongo-server-url` (optional) | The value of MONGO_SERVER_URL env var with the MongoDB connection URI | Example: `mongodb://[USER]:[PASSWORD]@[HOST]:[PORT]/[DATABASE]` | | -| `storage.docdb.mongo-server-url-dir` (optional) | The path of the directory that contains the file named MONGO_SERVER_URL that stores the value of MONGO_SERVER_URL env var | If the file `/mnt/mongo-creds-secret/MONGO_SERVER_URL` has the value of MONGO_SERVER_URL, then set `storage.docdb.mongo-server-url-dir: /mnt/mongo-creds-secret` | | -| `storage.docdb.mongo-server-url-path` (optional) | The path of the file that contains the value of mongo server url | If the file `/mnt/mongo-creds-secret/mongo-server-url` has the value, then set `storage.docdb.mongo-server-url-path: /mnt/mongo-creds-secret/mongo-server-url` | | -| `storage.grafeas.projectid` | The project of where grafeas server is located for storing occurrences | | | -| `storage.grafeas.noteid` (optional) | This field will be used as the prefix part of the note name that will be created. The value of this field must be a string without spaces. (See more details [below](#grafeas).) | | | -| `storage.grafeas.notehint` (optional) | This field is used to set the [human_readable_name](https://github.com/grafeas/grafeas/blob/cd23d4dc1bef740d6d6d90d5007db5c9a2431c41/proto/v1/attestation.proto#L49) field in the Grafeas ATTESTATION note. If it is not provided, the default `This attestation note was generated by Tekton Chains` will be used. | | | +| Key | Description | Supported Values | Default | +| :----------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- | +| `storage.gcs.bucket` | The GCS bucket for storage. | | | +| `storage.oci.repository` | The OCI repository to store OCI signatures and attestations in. | If undefined and one of `artifacts.{oci,taskrun}.storage` includes OCI storage, attestations will be stored alongside the OCI artifact. Defining this value results in the OCI bundle stored in the designated location instead of alongside the image. See [cosign documentation](https://github.com/sigstore/cosign#specifying-registry) for details. | | +| `storage.docdb.url` | The go-cloud URI reference to a docstore collection. | `firestore://projects/[PROJECT]/databases/(default)/documents/[COLLECTION]?name_field=name` | | +| `storage.docdb.mongo-server-url` (optional) | The MongoDB connection URI, equivalent to the `MONGO_SERVER_URL` environment variable. | Example: `mongodb://[USER]:[PASSWORD]@[HOST]:[PORT]/[DATABASE]` | | +| `storage.docdb.mongo-server-url-dir` (optional) | The directory containing a file named `MONGO_SERVER_URL` with the MongoDB connection URI. | If the file `/mnt/mongo-creds-secret/MONGO_SERVER_URL` contains the MongoDB URL, set this to `/mnt/mongo-creds-secret`. | | +| `storage.docdb.mongo-server-url-path` (optional) | The file path that contains the MongoDB connection URI. | For example, if `/mnt/mongo-creds-secret/mongo-server-url` contains the MongoDB URL, set this to `/mnt/mongo-creds-secret/mongo-server-url`. | | +| `storage.grafeas.projectid` | The project where the Grafeas server is located for storing occurrences. | | | +| `storage.grafeas.noteid` (optional) | The prefix for the note name used when creating a Grafeas note. Must be a string without spaces. | | | +| `storage.grafeas.notehint` (optional)| Sets the `human_readable_name` in the Grafeas ATTESTATION note. If not provided, defaults to `This attestation note was generated by Tekton Chains`. | | | +| `storage.archivista.url` | The URL endpoint for the Archivista service. | A valid HTTPS URL pointing to your Archivista instance (e.g. `https://archivista.testifysec.io`). | None | #### docstore -You can read about the go-cloud docstore URI format [here](https://gocloud.dev/howto/docstore/). Tekton Chains supports the following docstore services: - +For details on the go-cloud docstore URI format, see [Go Cloud Docstore](https://gocloud.dev/howto/docstore/). Chains supports the following docstore services: - `firestore` - `dynamodb` - `mongo` #### MongoDB -You can provide MongoDB connection through different options +You can provide a MongoDB connection via multiple options: -* Using MONGO_SERVER_URL Environment Variable - * User can set the MongoDB connection URL in the MONGO_SERVER_URL env var in the Chains deployment +- **Environment Variable:** + Set the connection URL in the `MONGO_SERVER_URL` environment variable in the Chains deployment. -* Using `storage.docdb.mongo-server-url` field in the chains-config configmap - * Alternatively, you can set the connection URL using the `storage.docdb.mongo-server-url` field in the chains-config configmap - * This field overrides the MONGO_SERVER_URL env var +- **ConfigMap Field (`storage.docdb.mongo-server-url`):** + Alternatively, set the connection URL in the `chains-config` ConfigMap. This field overrides the `MONGO_SERVER_URL` environment variable. -* Using `storage.docdb.mongo-server-url-dir` field - * Another option is to set `storage.docdb.mongo-server-url-dir`, which points to a directory containing a file named `MONGO_SERVER_URL` - * The directory path setting takes precedence over both `storage.docdb.mongo-server-url` and the `MONGO_SERVER_URL` env var - * For instance, if `/mnt/mongo-creds-secret/MONGO_SERVER_URL` contains the MongoDB URL, set `storage.docdb.mongo-server-url-dir`: `/mnt/mongo-creds-secret` +- **Directory Field (`storage.docdb.mongo-server-url-dir`):** + Set this to the directory containing a file named `MONGO_SERVER_URL`. This takes precedence over both the ConfigMap field and the environment variable. -* Using `storage.docdb.mongo-server-url-path` field - * You can use `storage.docdb.mongo-server-url-path` field in chains-config configmap to directly reference the file containing the MongoDB URL - * This field overrides all others (`mongo-server-url-dir, mongo-server-url, and MONGO_SERVER_URL env var`) - * For instance, if `/mnt/mongo-creds-secret/mongo-server-url` contains the MongoDB URL, then set `storage.docdb.mongo-server-url-path`: `/mnt/mongo-creds-secret/mongo-server-url` +- **File Path Field (`storage.docdb.mongo-server-url-path`):** + Directly reference the file containing the MongoDB URL. This field overrides all others. -**NOTE** :- -* When using `storage.docdb.mongo-server-url-dir` or `storage.docdb.mongo-server-url-path` field, store the value of mongo server url in a secret and mount the secret. When the secret is updated, the new value will be fetched by Tekton Chains controller -* Also using `storage.docdb.mongo-server-url-dir` or `storage.docdb.mongo-server-url-path` field are recommended, using `storage.docdb.mongo-server-url` should be avoided since credentials are stored in a ConfigMap instead of a secret +**NOTE:** +- When using the directory or file path fields, store the MongoDB URL in a secret and mount the secret so that Chains can pick up updates automatically. +- It is recommended to use `storage.docdb.mongo-server-url-dir` or `storage.docdb.mongo-server-url-path` rather than `storage.docdb.mongo-server-url` to avoid storing credentials in a ConfigMap. #### Grafeas -You can read more about Grafeas notes and occurrences [here](https://github.com/grafeas/grafeas/blob/master/docs/grafeas_concepts.md). To create occurrences, we have to create notes first that are used to link occurrences. Two types of occurrences will be created: `ATTESTATION` Occurrence and `BUILD` Occrrence. The configurable `noteid` is used as the prefix of the note name. Under the hood, the suffix `-simplesigning` will be appended for the `ATTESTATION` note, and the suffix `-intoto` will be appended for the `BUILD` note. If the `noteid` field is not configured, `tekton-` will be used as the prefix. +For more information on Grafeas notes and occurrences, see [Grafeas Concepts](https://github.com/grafeas/grafeas/blob/master/docs/grafeas_concepts.md). To create occurrences, a note must first be created. Two types of occurrences are created: +- `ATTESTATION` Occurrence (note suffix: `-simplesigning`) +- `BUILD` Occurrence (note suffix: `-intoto`) +If `storage.grafeas.noteid` is not set, the prefix `tekton-` will be used. ### In-toto Configuration | Key | Description | Supported Values | Default | | :-------------------------- | :--------------------------------------------- | :------------------------------------------------------------------------------ | :---------------------------------- | -| `builder.id` | The builder ID to set for in-toto attestations | | `https://tekton.dev/chains/v2` | -| `builddefinition.buildtype` | The buildType for in-toto attestations | `https://tekton.dev/chains/v2/slsa`, `https://tekton.dev/chains/v2/slsa-tekton` | `https://tekton.dev/chains/v2/slsa` | +| `builder.id` | The builder ID for in-toto attestations. | | `https://tekton.dev/chains/v2` | +| `builddefinition.buildtype` | The buildType for in-toto attestations. | `https://tekton.dev/chains/v2/slsa`, `https://tekton.dev/chains/v2/slsa-tekton` | `https://tekton.dev/chains/v2/slsa` | -> NOTE: -> Considerations for the builddefinition.buildtype parameter: -> -> - It is only valid for `slsa/v2alpha3` configurations (see TaskRun or PipelineRun configuration). -> - The parameter can take one of two values: -> - `https://tekton.dev/chains/v2/slsa`: This buildType strictly conforms to the slsav1.0 spec. -> - `https://tekton.dev/chains/v2/slsa-tekton`: This buildType also conforms to the slsav1.0 spec, but adds additional informaton specific to Tekton. This information includes the PipelinRun/TaskRun labels and annotations as internalParameters. It also includes capturing each pipeline task in a PipelinRun under resolvedDependencies. +> **NOTE:** +> - `builddefinition.buildtype` is valid for `slsa/v2alpha3` configurations only. +> - Use `https://tekton.dev/chains/v2/slsa` for strict slsav1.0 compliance. +> - Use `https://tekton.dev/chains/v2/slsa-tekton` for slsav1.0 with additional Tekton-specific details. ### Sigstore Features Configuration @@ -132,10 +125,10 @@ You can read more about Grafeas notes and occurrences [here](https://github.com/ | Key | Description | Supported Values | Default | | :--------------------- | :----------------------------------------------------------------- | :------------------------ | :--------------------------- | -| `transparency.enabled` | Whether to enable automatic binary transparency uploads. | `true`, `false`, `manual` | `false` | -| `transparency.url` | The URL to upload binary transparency attestations to, if enabled. | | `https://rekor.sigstore.dev` | +| `transparency.enabled` | Whether to enable automatic binary transparency uploads. | `true`, `false`, `manual` | `false` | +| `transparency.url` | The URL to upload binary transparency attestations to, if enabled. | | `https://rekor.sigstore.dev` | -**Note**: If `transparency.enabled` is set to `manual`, then only `TaskRuns` and `PipelineRuns` with the following annotation will be uploaded to the transparency log: +**Note:** If `transparency.enabled` is set to `manual`, only TaskRuns and PipelineRuns with the annotation below will be uploaded to the transparency log: ```yaml chains.tekton.dev/transparency-upload: "true" @@ -145,47 +138,50 @@ chains.tekton.dev/transparency-upload: "true" | Key | Description | Supported Values | Default | | :--------------------------------- | :------------------------------------------------------------ | :----------------------------------------- | :------------------------------------------------- | -| `signers.x509.fulcio.enabled` | Whether to enable automatic certificates from fulcio. | `true`, `false` | `false` | -| `signers.x509.fulcio.address` | Fulcio address to request certificate from, if enabled | | `https://fulcio.sigstore.dev` | -| `signers.x509.fulcio.issuer` | Expected OIDC issuer. | | `https://oauth2.sigstore.dev/auth` | -| `signers.x509.fulcio.provider` | Provider to request ID Token from | `google`, `spiffe`, `github`, `filesystem` | Unset, each provider will be attempted. | -| `signers.x509.identity.token.file` | Path to file containing ID Token. | | -| `signers.x509.tuf.mirror.url` | TUF server URL. $TUF_URL/root.json is expected to be present. | | `https://sigstore-tuf-root.storage.googleapis.com` | +| `signers.x509.fulcio.enabled` | Enable automatic certificates from Fulcio. | `true`, `false` | `false` | +| `signers.x509.fulcio.address` | Fulcio address for certificate requests. | | `https://fulcio.sigstore.dev` | +| `signers.x509.fulcio.issuer` | Expected OIDC issuer. | | `https://oauth2.sigstore.dev/auth` | +| `signers.x509.fulcio.provider` | Provider for ID Token requests. | `google`, `spiffe`, `github`, `filesystem` | Unset (each provider will be attempted). | +| `signers.x509.identity.token.file` | Path to file containing an ID Token. | | | +| `signers.x509.tuf.mirror.url` | TUF server URL; expects `$TUF_URL/root.json` to be present. | | `https://sigstore-tuf-root.storage.googleapis.com` | #### KMS OIDC and Spire Configuration | Key | Description | Supported Values | Default | | :-------------------------------- | :------------------------------------------------------------------------------------------ | :--------------- | :------ | -| `signers.kms.auth.address` | URI of KMS server (e.g. the value of `VAULT_ADDR`) | | -| `signers.kms.auth.token` | Auth token KMS server (e.g. the value of `VAULT_TOKEN`) | | -| `signers.kms.auth.token-path` | Path to store KMS server Auth token (e.g. `/etc/kms-secrets`) | | -| `signers.kms.auth.oidc.path` | Path used for OIDC authentication (e.g. `jwt` for Vault) | | -| `signers.kms.auth.oidc.role` | Role used for OIDC authentication | | -| `signers.kms.auth.spire.sock` | URI of the Spire socket used for KMS token (e.g. `unix:///tmp/spire-agent/public/api.sock`) | | -| `signers.kms.auth.spire.audience` | Audience for requesting a SVID from Spire | | - -> NOTE: -> -> If `signers.kms.auth.token-path` is set, create a secret and ensure the Chains deployment mounts this secret to -> the path specified by `signers.kms.auth.token-path`. - -> [!IMPORTANT] -> To project the latest token values without needing to recreate the pod, avoid using `subPath` in volume mount. +| `signers.kms.auth.address` | URI of the KMS server (e.g. `VAULT_ADDR`). | | | +| `signers.kms.auth.token` | Authentication token for the KMS server (e.g. `VAULT_TOKEN`). | | | +| `signers.kms.auth.token-path` | File path to store the KMS server Auth token (e.g. `/etc/kms-secrets`). | | | +| `signers.kms.auth.oidc.path` | Path used for OIDC authentication (e.g. `jwt` for Vault). | | | +| `signers.kms.auth.oidc.role` | Role used for OIDC authentication. | | | +| `signers.kms.auth.spire.sock` | URI of the Spire socket for KMS token (e.g. `unix:///tmp/spire-agent/public/api.sock`). | | | +| `signers.kms.auth.spire.audience` | Audience for requesting a SVID from Spire. | | | + +> **NOTE:** +> - If `signers.kms.auth.token-path` is set, create a secret and mount it to the specified path. +> - To project updated token values without recreating pods, avoid using `subPath` in volume mounts. ### Visual Guide: ConfigMap Configuration Options -Refer the diagram below to explore the pictorial representation of signing and storage configuration options, and their usage in the context of chains artifacts. + +Refer to the diagram below to see a pictorial representation of signing and storage configuration options and how they relate to Chains artifacts. ![ConfigMap Configuration Diagram](../images/signing-storage-config-diagram.drawio.svg) ## Namespaces Restrictions in Chains Controller -This feature allows you to specify a list of namespaces for the controller to monitor, providing granular control over its operation. If no namespaces are specified, the controller defaults to monitoring all namespaces. + +Chains can be configured to monitor specific namespaces. If no namespaces are specified, the controller monitors all namespaces. ### Usage -To restrict the Chains Controller to specific namespaces, pass a comma-separated list of namespaces as an argument to the controller using the --namespace flag. + +Pass a comma-separated list of namespaces to the controller using the `--namespace` flag. ### Example -To restrict the controller to the dev and test namespaces, you would start the controller with the following argument: + +To restrict the controller to the `dev` and `test` namespaces, start the controller with: + ```shell --namespace=dev,test ``` -In this example, the controller will only monitor resources (pipelinesruns and taskruns) within the dev and test namespaces. + +In this example, the controller will only monitor TaskRuns and PipelineRuns in the `dev` and `test` namespaces. +``` \ No newline at end of file diff --git a/go.mod b/go.mod index b363e6999a..8219735deb 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,9 @@ require ( github.com/google/go-licenses v1.6.0 github.com/grafeas/grafeas v0.2.3 github.com/hashicorp/go-multierror v1.1.1 - github.com/in-toto/attestation v1.1.1 + github.com/in-toto/archivista v0.9.0 + github.com/in-toto/attestation v1.1.0 + github.com/in-toto/go-witness v0.7.0 github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 github.com/opencontainers/go-digest v1.0.0 github.com/pkg/errors v0.9.1 @@ -37,14 +39,14 @@ require ( gocloud.dev v0.40.0 gocloud.dev/docstore/mongodocstore v0.40.0 gocloud.dev/pubsub/kafkapubsub v0.40.0 - golang.org/x/crypto v0.33.0 + golang.org/x/crypto v0.32.0 golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f google.golang.org/grpc v1.70.0 google.golang.org/protobuf v1.36.4 - k8s.io/api v0.32.1 - k8s.io/apimachinery v0.32.1 - k8s.io/client-go v0.32.1 - k8s.io/code-generator v0.32.1 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/code-generator v0.32.0 knative.dev/pkg v0.0.0-20240416145024-0f34a8815650 sigs.k8s.io/yaml v1.4.0 ) @@ -183,6 +185,7 @@ require ( github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/edwarnicke/gitoid v0.0.0-20220710194850-1be5bfda1f9d // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/envoyproxy/go-control-plane v0.13.1 // indirect @@ -441,10 +444,10 @@ require ( golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.34.0 // indirect golang.org/x/oauth2 v0.26.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.29.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect diff --git a/go.sum b/go.sum index bed5f65c23..35efdba315 100644 --- a/go.sum +++ b/go.sum @@ -463,6 +463,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4A github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edwarnicke/gitoid v0.0.0-20220710194850-1be5bfda1f9d h1:4l+Uq5zFWSagXgGFaKRRVWJrnlzeathyagWgYUltCgY= +github.com/edwarnicke/gitoid v0.0.0-20220710194850-1be5bfda1f9d/go.mod h1:WxWwA3EYuCQjlR5EBUX3uaTS8bh9BOa7BcqVREHQ0uQ= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/proto v1.13.4 h1:myn1fyf8t7tAqIzV91Tj9qXpvyXXGXk8OS2H6IBSc9g= @@ -863,8 +865,12 @@ github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.1 h1:QD3d+oATQ0dFsWoNh5oT0udQ3tUrOsZZ0Fc3tSgWbzI= -github.com/in-toto/attestation v1.1.1/go.mod h1:Dcq1zVwA2V7Qin8I7rgOi+i837wEf/mOZwRm047Sjys= +github.com/in-toto/archivista v0.9.0 h1:XlS+jkrcFjmwSMhp6BZbP5y8FOvFPXM1h23WvCDT8bQ= +github.com/in-toto/archivista v0.9.0/go.mod h1:cLhrICj86j+8wJZmrUzDbNQdcwdc2lqX+v1SKV4tXpE= +github.com/in-toto/attestation v1.1.0 h1:oRWzfmZPDSctChD0VaQV7MJrywKOzyNrtpENQFq//2Q= +github.com/in-toto/attestation v1.1.0/go.mod h1:DB59ytd3z7cIHgXxwpSX2SABrU6WJUKg/grpdgHVgVs= +github.com/in-toto/go-witness v0.7.0 h1:I48FUCLfyos0uCSlHJoqCJO6HjtxF2f/y65TQVpxd8k= +github.com/in-toto/go-witness v0.7.0/go.mod h1:WZQY96yHqPPYkRcQU7dXl0d3saMKAg9DepWbUVL586E= github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 h1:cwCITdi9pF50CF8uh40qDbkJ/VrEVzx5AoaHP7OPdEo= github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09/go.mod h1:yGCBn2JKF1m26FX8GmkcLSOFVjB6khWRxFsHwWIg7hw= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -1506,8 +1512,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1668,8 +1674,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1765,8 +1771,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1780,8 +1786,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1800,8 +1806,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2137,16 +2143,16 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= -k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= -k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= -k8s.io/code-generator v0.32.1 h1:4lw1kFNDuFYXquTkB7Sl5EwPMUP2yyW9hh6BnFfRZFY= -k8s.io/code-generator v0.32.1/go.mod h1:zaILfm00CVyP/6/pJMJ3zxRepXkxyDfUV5SNG4CjZI4= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/code-generator v0.32.0 h1:s0lNN8VSWny8LBz5t5iy7MCdgwdOhdg7vAGVxvS+VWU= +k8s.io/code-generator v0.32.0/go.mod h1:b7Q7KMZkvsYFy72A79QYjiv4aTz3GvW0f1T3UfhFq4s= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= diff --git a/pkg/chains/storage/archivista/archivista.go b/pkg/chains/storage/archivista/archivista.go new file mode 100644 index 0000000000..86abefaa08 --- /dev/null +++ b/pkg/chains/storage/archivista/archivista.go @@ -0,0 +1,217 @@ +package archivista + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "net/http" + "strings" + + archivistaClient "github.com/in-toto/archivista/pkg/http-client" + "github.com/in-toto/go-witness/cryptoutil" + "github.com/in-toto/go-witness/dsse" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/chains/pkg/config" + tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" // if needed + tektonclient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/logging" +) + +const ( + StorageBackendArchivista = "archivista" +) + +// generatePublicKeyIDFunc is a package-level variable wrapping the public key ID generation. +// It allows tests to simulate errors. +var generatePublicKeyIDFunc = cryptoutil.GeneratePublicKeyID + +// buildEnvelope constructs a DSSE envelope from the raw payload, signature, keyID, and certificate chain. +// If a valid chain is provided, it parses it into a leaf and intermediates; otherwise, certificate data is omitted. +func buildEnvelope(rawPayload []byte, signature, keyID string, chain string) dsse.Envelope { + var leaf []byte + var inters [][]byte + + chain = strings.TrimSpace(chain) + if chain != "" { + var err error + leaf, inters, err = parseAndOrderCertificateChain(chain) + if err != nil { + // Log error if needed and fall back to no certificate data. + leaf = nil + inters = [][]byte{} + } + } + return dsse.Envelope{ + Payload: rawPayload, + PayloadType: "application/vnd.in-toto+json", + Signatures: []dsse.Signature{ + { + KeyID: keyID, + Signature: []byte(signature), + Certificate: leaf, + Intermediates: inters, + }, + }, + } +} + +// Backend is the interface that all storage backends must implement. +type Backend interface { + StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error + RetrievePayloads(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]string, error) + RetrieveSignatures(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) + Type() string +} + +// ArchivistaStorage implements the Backend interface for Archivista. +type ArchivistaStorage struct { + client *archivistaClient.ArchivistaClient + url string + cfg config.ArchivistaStorageConfig + tektonClient tektonclient.Interface // Injected Tekton client for patching objects +} + +// NewArchivistaStorage initializes a new ArchivistaStorage backend. +func NewArchivistaStorage(cfg config.Config, tektonClient tektonclient.Interface) (*ArchivistaStorage, error) { + archCfg := cfg.Storage.Archivista + if strings.TrimSpace(archCfg.URL) == "" { + return nil, fmt.Errorf("missing archivista URL in storage configuration") + } + + client, err := archivistaClient.CreateArchivistaClient(&http.Client{}, archCfg.URL) + if err != nil { + return nil, fmt.Errorf("failed to create Archivista client: %w", err) + } + + return &ArchivistaStorage{ + client: client, + url: archCfg.URL, + cfg: archCfg, + tektonClient: tektonClient, + }, nil +} + +// patchTektonObjectAnnotations patches the Tekton object's annotations with the given key/value pairs +// in one single patch call. +func PatchTektonObjectAnnotations(ctx context.Context, obj objects.TektonObject, annotations map[string]string, tektonClient tektonclient.Interface) error { + patchData := map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": annotations, + }, + } + patchBytes, err := json.Marshal(patchData) + if err != nil { + return fmt.Errorf("failed to marshal patch data: %w", err) + } + + switch o := obj.GetObject().(type) { + case *tektonv1.TaskRun: + _, err = tektonClient.TektonV1().TaskRuns(o.Namespace).Patch(ctx, o.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err + case *tektonv1.PipelineRun: + _, err = tektonClient.TektonV1().PipelineRuns(o.Namespace).Patch(ctx, o.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err + case *v1beta1.TaskRun: + _, err = tektonClient.TektonV1beta1().TaskRuns(o.Namespace).Patch(ctx, o.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err + case *v1beta1.PipelineRun: + _, err = tektonClient.TektonV1beta1().PipelineRuns(o.Namespace).Patch(ctx, o.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err + default: + return fmt.Errorf("unsupported Tekton object type for patching") + } +} + +// StorePayload builds a DSSE envelope from the raw payload and signature, +// logs the envelope, uploads it via the Archivista client API, and patches the +// Tekton object with the returned gitoid and Archivista URL. +func (a *ArchivistaStorage) StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error { + logger := logging.FromContext(ctx) + + // Validate signature. + if strings.TrimSpace(signature) == "" { + return fmt.Errorf("missing signature") + } + + var keyID string + certPEM := strings.TrimSpace(opts.Cert) + if certPEM != "" { + block, _ := pem.Decode([]byte(certPEM)) + if block != nil { + cert, err := x509.ParseCertificate(block.Bytes) + if err == nil { + // Generate keyID from the public key. + keyID, err = generatePublicKeyIDFunc(cert.PublicKey, crypto.SHA256) + if err != nil { + logger.Errorw("Failed to generate KeyID", "error", err) + keyID = "" + } + } else { + logger.Errorw("Failed to parse certificate", "error", err) + } + } else { + logger.Error("Failed to decode certificate PEM") + } + } // if no certificate provided, keyID remains blank + + // Optionally decode the payload for logging. + decodedPayload, err := base64.StdEncoding.DecodeString(string(rawPayload)) + if err != nil { + logger.Errorw("Failed to base64 decode payload", "keyID", keyID, "error", err) + logger.Infof("Raw payload (not base64 decoded): %s", string(rawPayload)) + } else { + logger.Infof("Decoded payload: %s", string(decodedPayload)) + } + + env := buildEnvelope(rawPayload, signature, keyID, opts.Chain) + + // Upload the envelope using the Archivista client's Store method. + uploadResp, err := a.client.Store(ctx, env) + if err != nil { + logger.Errorw("Failed to upload DSSE envelope to Archivista", "error", err) + return fmt.Errorf("failed to upload envelope to Archivista: %w", err) + } + logger.Infof("Successfully uploaded DSSE envelope to Archivista, response: %+v", uploadResp) + + // Update the in-memory Tekton object with Archivista annotations. + annotations := map[string]string{ + "chains.tekton.dev/archivista-gitoid": uploadResp.Gitoid, + "chains.tekton.dev/archivista-url": a.url, + } + obj.SetAnnotations(annotations) + + // Patch the live Tekton object in one call. + if err := PatchTektonObjectAnnotations(ctx, obj, annotations, a.tektonClient); err != nil { + logger.Errorw("Failed to patch Tekton object with Archivista annotations", "error", err) + return fmt.Errorf("failed to patch Tekton object: %w", err) + } + + return nil +} + +// RetrievePayload is not implemented for Archivista. +func (a *ArchivistaStorage) RetrievePayload(ctx context.Context, key string) ([]byte, []byte, error) { + return nil, nil, fmt.Errorf("RetrievePayload not implemented for Archivista") +} + +// RetrievePayloads is not implemented for Archivista. +func (a *ArchivistaStorage) RetrievePayloads(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]string, error) { + return nil, fmt.Errorf("RetrievePayloads not implemented for Archivista") +} + +// RetrieveSignatures is not implemented for Archivista. +func (a *ArchivistaStorage) RetrieveSignatures(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) { + return nil, fmt.Errorf("RetrieveSignatures not implemented for Archivista") +} + +// Type returns the storage backend type. +func (a *ArchivistaStorage) Type() string { + return StorageBackendArchivista +} diff --git a/pkg/chains/storage/archivista/archivista_test.go b/pkg/chains/storage/archivista/archivista_test.go new file mode 100644 index 0000000000..c96ed29e13 --- /dev/null +++ b/pkg/chains/storage/archivista/archivista_test.go @@ -0,0 +1,324 @@ +package archivista + +import ( + "context" + "crypto" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + archivistaClient "github.com/in-toto/archivista/pkg/http-client" + "github.com/in-toto/go-witness/dsse" + "github.com/stretchr/testify/assert" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/chains/pkg/config" + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + fakePipelineClient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// -------------------------- +// Helper: setupEnv +// -------------------------- + +// setupEnv creates a fresh ArchivistaStorage test environment using a given TaskRun name. +func setupEnv(taskRunName string, cfg config.Config, archClient *archivistaClient.ArchivistaClient) (*ArchivistaStorage, objects.TektonObject, *fakePipelineClient.Clientset) { + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: taskRunName, + Namespace: "default", + }, + } + obj := objects.NewTaskRunObjectV1Beta1(tr) + fakeClient := fakePipelineClient.NewSimpleClientset(tr) + aStorage, err := NewArchivistaStorage(cfg, fakeClient) + if err != nil { + panic("failed to initialize ArchivistaStorage: " + err.Error()) + } + // Override the Archivista client with the provided one. + aStorage.client = archClient + return aStorage, obj, fakeClient +} + +// -------------------------- +// StorePayload Tests +// -------------------------- + +// TestStorePayload_TaskRun tests the basic success path of StorePayload without certificate data. +func TestStorePayload_TaskRun(t *testing.T) { + ctx := context.Background() + + // Create a v1beta1.TaskRun with minimal metadata and a dummy result. + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-taskrun", + Namespace: "default", + }, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + TaskRunResults: []v1beta1.TaskRunResult{ + { + Name: "IMAGE_URL", + Value: *v1beta1.NewStructuredValues("mockImage"), + }, + }, + }, + }, + } + fakeClient := fakePipelineClient.NewSimpleClientset(tr) + + // Set up an httptest server to simulate Archivista. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/upload" { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"gitoid": "fake-gitoid"}`)) + return + } + http.NotFound(w, r) + })) + defer ts.Close() + + httpClient := &http.Client{} + cfg := config.Config{ + Storage: config.StorageConfigs{ + Archivista: config.ArchivistaStorageConfig{ + URL: ts.URL, + }, + }, + } + archClient, err := archivistaClient.CreateArchivistaClient(httpClient, cfg.Storage.Archivista.URL) + if err != nil { + t.Fatalf("failed to create Archivista client: %v", err) + } + + aStorage, obj, fakeClient := setupEnv("test-taskrun", cfg, archClient) + + // Prepare a valid payload. + type mockPayload struct { + A string `json:"a"` + B int `json:"b"` + } + payload := mockPayload{ + A: "foo", + B: 3, + } + payloadBytes, err := json.Marshal(payload) + assert.NoError(t, err, "should marshal payload") + encodedPayload := base64.StdEncoding.EncodeToString(payloadBytes) + signature := "test-signature" + opts := config.StorageOpts{ + ShortKey: "mockpayload", + Cert: "", + Chain: "", + } + + // Call StorePayload. + err = aStorage.StorePayload(ctx, obj, []byte(encodedPayload), signature, opts) + assert.NoError(t, err, "StorePayload should succeed") + + // Retrieve the updated TaskRun. + updated, err := fakeClient.TektonV1beta1().TaskRuns("default").Get(ctx, "test-taskrun", metav1.GetOptions{}) + assert.NoError(t, err, "should retrieve updated TaskRun") + assert.Equal(t, "fake-gitoid", updated.Annotations["chains.tekton.dev/archivista-gitoid"]) + assert.Equal(t, ts.URL, updated.Annotations["chains.tekton.dev/archivista-url"]) +} + +// TestStorePayload_ErrorCases exercises error branches in StorePayload. +func TestStorePayload_ErrorCases(t *testing.T) { + ctx := context.Background() + + // Setup a common httptest server and configuration. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"gitoid": "fake-gitoid"}`)) + })) + defer ts.Close() + + httpClient := &http.Client{} + cfg := config.Config{ + Storage: config.StorageConfigs{ + Archivista: config.ArchivistaStorageConfig{ + URL: ts.URL, + }, + }, + } + archClient, err := archivistaClient.CreateArchivistaClient(httpClient, cfg.Storage.Archivista.URL) + if err != nil { + t.Fatalf("failed to create Archivista client: %v", err) + } + + setup := func(name string) (*ArchivistaStorage, objects.TektonObject, *fakePipelineClient.Clientset) { + return setupEnv(name, cfg, archClient) + } + + t.Run("missing signature", func(t *testing.T) { + aStorage, obj, _ := setup("missing-sig") + err := aStorage.StorePayload(ctx, obj, []byte("dummy"), "", config.StorageOpts{}) + if err == nil || err.Error() != "missing signature" { + t.Errorf("expected missing signature error, got: %v", err) + } + }) + + t.Run("invalid certificate PEM decode", func(t *testing.T) { + aStorage, obj, fakeClient := setup("invalid-cert-decode") + opts := config.StorageOpts{ + Cert: "invalid pem", + } + payload := base64.StdEncoding.EncodeToString([]byte("dummy")) + err := aStorage.StorePayload(ctx, obj, []byte(payload), "sig", opts) + if err != nil { + t.Errorf("expected success even with invalid cert PEM, got error: %v", err) + } + updated, err := fakeClient.TektonV1beta1().TaskRuns("default").Get(ctx, "invalid-cert-decode", metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get updated TaskRun: %v", err) + } + if updated.Annotations["chains.tekton.dev/archivista-gitoid"] != "fake-gitoid" { + t.Errorf("unexpected gitoid: %s", updated.Annotations["chains.tekton.dev/archivista-gitoid"]) + } + }) + + t.Run("certificate PEM parse failure", func(t *testing.T) { + aStorage, obj, fakeClient := setup("cert-parse-failure") + opts := config.StorageOpts{ + Cert: "-----BEGIN CERTIFICATE-----\nnotbase64\n-----END CERTIFICATE-----", + } + payload := base64.StdEncoding.EncodeToString([]byte("dummy")) + err := aStorage.StorePayload(ctx, obj, []byte(payload), "sig", opts) + if err != nil { + t.Errorf("expected success even if certificate fails parsing, got error: %v", err) + } + updated, err := fakeClient.TektonV1beta1().TaskRuns("default").Get(ctx, "cert-parse-failure", metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get updated TaskRun: %v", err) + } + if updated.Annotations["chains.tekton.dev/archivista-gitoid"] != "fake-gitoid" { + t.Errorf("unexpected gitoid: %s", updated.Annotations["chains.tekton.dev/archivista-gitoid"]) + } + }) + + t.Run("payload base64 decode error", func(t *testing.T) { + aStorage, obj, fakeClient := setup("payload-decode-error") + // Provide rawPayload that is not valid base64. + err := aStorage.StorePayload(ctx, obj, []byte("not-base64"), "sig", config.StorageOpts{}) + if err != nil { + t.Errorf("expected success even if base64 decode fails, got error: %v", err) + } + updated, err := fakeClient.TektonV1beta1().TaskRuns("default").Get(ctx, "payload-decode-error", metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get updated TaskRun: %v", err) + } + if updated.Annotations["chains.tekton.dev/archivista-gitoid"] != "fake-gitoid" { + t.Errorf("unexpected gitoid: %s", updated.Annotations["chains.tekton.dev/archivista-gitoid"]) + } + }) +} + +// TestStorePayload_CertificateSuccess_WithRecordingServer tests the certificate branch. +// It uses an httptest.Server to record the outgoing DSSE envelope. +func TestStorePayload_CertificateSuccess_WithRecordingServer(t *testing.T) { + ctx := context.Background() + + // Generate a valid certificate. + validCertPEM, _, _ := createCertificate(t, "dummy", "dummy", 123, time.Now(), nil, nil) + + // Override keyID generation to return "test-key-id". + origFunc := generatePublicKeyIDFunc + generatePublicKeyIDFunc = func(pub interface{}, hash crypto.Hash) (string, error) { + return "test-key-id", nil + } + defer func() { generatePublicKeyIDFunc = origFunc }() + + // Create an httptest.Server that records the DSSE envelope. + var recordedBody []byte + recServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "failed to read body", http.StatusInternalServerError) + return + } + recordedBody = body + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"gitoid": "fake-gitoid"}`)) + })) + defer recServer.Close() + + archClient, err := archivistaClient.CreateArchivistaClient(&http.Client{}, recServer.URL) + if err != nil { + t.Fatalf("failed to create Archivista client: %v", err) + } + + // Create a minimal TaskRun and wrap it. + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cert-success", + Namespace: "default", + }, + } + tektonObj := objects.NewTaskRunObjectV1Beta1(tr) + + // Build configuration using the recording server's URL. + cfg := config.Config{ + Storage: config.StorageConfigs{ + Archivista: config.ArchivistaStorageConfig{ + URL: recServer.URL, + }, + }, + } + + aStorage, _, _ := setupEnv("cert-success", cfg, archClient) + + // Prepare a valid payload. + payload := []byte("dummy payload") + encodedPayload := base64.StdEncoding.EncodeToString(payload) + signature := "dummy-signature" + opts := config.StorageOpts{ + ShortKey: "dummy", + Cert: validCertPEM, + Chain: "", + } + + // Call StorePayload. + err = aStorage.StorePayload(ctx, tektonObj, []byte(encodedPayload), signature, opts) + if err != nil { + t.Fatalf("StorePayload failed: %v", err) + } + + // Unmarshal the recorded DSSE envelope. + var env dsse.Envelope + if err := json.Unmarshal(recordedBody, &env); err != nil { + t.Fatalf("failed to unmarshal recorded envelope: %v", err) + } + + // Verify the signature's KeyID. + if len(env.Signatures) == 0 { + t.Fatal("expected at least one signature in envelope") + } + if env.Signatures[0].KeyID != "test-key-id" { + t.Errorf("expected keyID 'test-key-id', got %q", env.Signatures[0].KeyID) + } +} + +func TestBuildEnvelope_FallbackOnInvalidChain(t *testing.T) { + // Prepare inputs. + rawPayload := []byte("dummy") + signature := "dummy-sig" + keyID := "dummy-key" + // Provide a non-empty chain that cannot be parsed as valid certificates. + invalidChain := "invalid chain" + + // Call buildEnvelope. + env := buildEnvelope(rawPayload, signature, keyID, invalidChain) + + // Expect that no certificate data was included. + if env.Signatures[0].Certificate != nil { + t.Errorf("expected certificate to be nil, got %v", env.Signatures[0].Certificate) + } + if len(env.Signatures[0].Intermediates) != 0 { + t.Errorf("expected intermediates to be empty, got %v", env.Signatures[0].Intermediates) + } +} diff --git a/pkg/chains/storage/archivista/cert.go b/pkg/chains/storage/archivista/cert.go new file mode 100644 index 0000000000..a5969d1a56 --- /dev/null +++ b/pkg/chains/storage/archivista/cert.go @@ -0,0 +1,120 @@ +package archivista + +import ( + "crypto/x509" + "encoding/pem" + "fmt" +) + +// parseAndOrderCertificateChain parses a PEM-encoded certificate chain string, +// validates each "CERTIFICATE" PEM block, and orders them so that the leaf certificate is first, +// followed by intermediates. If no intermediates are present, it simply returns the single certificate. +func parseAndOrderCertificateChain(chain string) (leafCert []byte, intermediates [][]byte, err error) { + if chain == "" { + return nil, nil, fmt.Errorf("empty certificate chain") + } + + data := []byte(chain) + type parsedCert struct { + cert *x509.Certificate + pem []byte + } + var certs []parsedCert + + // Parse all PEM blocks of type "CERTIFICATE" + for { + var block *pem.Block + block, data = pem.Decode(data) + if block == nil { + break + } + if block.Type != "CERTIFICATE" { + continue + } + parsed, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse certificate: %w", err) + } + certs = append(certs, parsedCert{ + cert: parsed, + pem: pem.EncodeToMemory(block), + }) + } + + if len(certs) == 0 { + return nil, nil, fmt.Errorf("no valid certificates found in chain") + } + + // If only one certificate is found, return it as the leaf. + if len(certs) == 1 { + return certs[0].pem, nil, nil + } + + // Identify leaf candidates: + // Prefer certificates that are not self-signed and whose Subject isn't used as an Issuer in any other cert. + var leafCandidates []parsedCert + for i, pc := range certs { + selfSigned := pc.cert.Subject.String() == pc.cert.Issuer.String() + if !selfSigned { + used := false + for j, other := range certs { + if i == j { + continue + } + if other.cert.Issuer.String() == pc.cert.Subject.String() { + used = true + break + } + } + if !used { + leafCandidates = append(leafCandidates, pc) + } + } + } + // If no non-self-signed candidate, fall back to self-signed ones. + if len(leafCandidates) == 0 { + leafCandidates = certs + } + + // Choose the best leaf candidate: if multiple, select the one with the most recent NotBefore date. + leaf := leafCandidates[0] + for _, candidate := range leafCandidates[1:] { + if candidate.cert.NotBefore.After(leaf.cert.NotBefore) { + leaf = candidate + } + } + + // Build a map for quick lookup (subject => parsedCert). + subjectMap := make(map[string]parsedCert) + for _, pc := range certs { + subjectMap[pc.cert.Subject.String()] = pc + } + + // Order the chain starting from the leaf. + ordered := []parsedCert{leaf} + used := map[string]bool{leaf.cert.SerialNumber.String(): true} + current := leaf + for { + next, found := subjectMap[current.cert.Issuer.String()] + if !found || used[next.cert.SerialNumber.String()] { + break + } + ordered = append(ordered, next) + used[next.cert.SerialNumber.String()] = true + current = next + } + + leafCert = ordered[0].pem + // The intermediates are any ordered certs after the leaf. + for i := 1; i < len(ordered); i++ { + intermediates = append(intermediates, ordered[i].pem) + } + // Append any extra certificates not included in the ordering. + for _, pc := range certs { + if !used[pc.cert.SerialNumber.String()] { + intermediates = append(intermediates, pc.pem) + } + } + + return leafCert, intermediates, nil +} diff --git a/pkg/chains/storage/archivista/cert_test.go b/pkg/chains/storage/archivista/cert_test.go new file mode 100644 index 0000000000..744cbecf81 --- /dev/null +++ b/pkg/chains/storage/archivista/cert_test.go @@ -0,0 +1,191 @@ +package archivista + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "strings" + "testing" + "time" +) + +// createCertificate is a helper that generates a PEM-encoded certificate, its parsed form, and a private key. +// If parent and parentKey are nil, the certificate is self-signed. +func createCertificate(t *testing.T, subject, issuer string, serial int64, notBefore time.Time, parent *x509.Certificate, parentKey *rsa.PrivateKey) (string, *x509.Certificate, *rsa.PrivateKey) { + t.Helper() + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("failed to generate key: %v", err) + } + template := &x509.Certificate{ + SerialNumber: big.NewInt(serial), + Subject: pkix.Name{ + CommonName: subject, + }, + Issuer: pkix.Name{ + CommonName: issuer, + }, + NotBefore: notBefore, + NotAfter: notBefore.Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageDigitalSignature, + } + // Self-sign if no parent provided. + if parent == nil { + parent = template + parentKey = key + } + certDER, err := x509.CreateCertificate(rand.Reader, template, parent, &key.PublicKey, parentKey) + if err != nil { + t.Fatalf("failed to create certificate: %v", err) + } + certPEM := string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certDER, + })) + parsedCert, err := x509.ParseCertificate(certDER) + if err != nil { + t.Fatalf("failed to parse generated certificate: %v", err) + } + return certPEM, parsedCert, key +} + +func TestParseAndOrderCertificateChain(t *testing.T) { + // Test case 1: empty chain. + t.Run("empty chain", func(t *testing.T) { + _, _, err := parseAndOrderCertificateChain("") + if err == nil || !strings.Contains(err.Error(), "empty certificate chain") { + t.Errorf("expected error for empty chain, got: %v", err) + } + }) + + // Test case 2: no valid certificates found (wrong PEM type). + t.Run("no valid certificates", func(t *testing.T) { + invalid := "-----BEGIN NOT CERTIFICATE-----\nabc\n-----END NOT CERTIFICATE-----" + _, _, err := parseAndOrderCertificateChain(invalid) + if err == nil || !strings.Contains(err.Error(), "no valid certificates found") { + t.Errorf("expected error for no valid certificates, got: %v", err) + } + }) + + // Test case 3: single certificate (self-signed). + t.Run("single certificate", func(t *testing.T) { + notBefore := time.Now().Add(-1 * time.Hour) + certPEM, _, _ := createCertificate(t, "single", "single", 100, notBefore, nil, nil) + leaf, intermediates, err := parseAndOrderCertificateChain(certPEM) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Leaf should equal the original cert, with no intermediates. + if string(leaf) != certPEM { + t.Errorf("expected leaf to be the certificate, got different value") + } + if len(intermediates) != 0 { + t.Errorf("expected no intermediates, got %d", len(intermediates)) + } + }) + + // Test case 4: valid chain with leaf, intermediate, and root in random order. + t.Run("valid chain ordering", func(t *testing.T) { + now := time.Now() + // Create root certificate (self-signed). + rootPEM, rootCert, rootKey := createCertificate(t, "root", "root", 1, now.Add(-10*time.Hour), nil, nil) + // Create intermediate certificate signed by root. + intermediatePEM, intermediateCert, intermediateKey := createCertificate(t, "intermediate", "root", 2, now.Add(-5*time.Hour), rootCert, rootKey) + // Create leaf certificate signed by intermediate. + leafPEM, _, _ := createCertificate(t, "leaf", "intermediate", 3, now.Add(-1*time.Hour), intermediateCert, intermediateKey) + // Combine in random order: intermediate, leaf, root. + chain := intermediatePEM + leafPEM + rootPEM + leafOut, intermediates, err := parseAndOrderCertificateChain(chain) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Expected leaf is the leaf certificate. + if string(leafOut) != leafPEM { + t.Errorf("expected leaf to be leaf certificate") + } + // Expected intermediates: first should be the intermediate, second the root. + if len(intermediates) < 2 { + t.Errorf("expected at least 2 intermediates, got %d", len(intermediates)) + } else { + if string(intermediates[0]) != intermediatePEM { + t.Errorf("expected first intermediate to be intermediate certificate") + } + if string(intermediates[1]) != rootPEM { + t.Errorf("expected second intermediate to be root certificate") + } + } + }) + + // Test case 5: valid chain with an extra certificate not connected to the chain. + t.Run("chain with extra certificate", func(t *testing.T) { + now := time.Now() + // Create a proper chain: root, intermediate, leaf. + rootPEM, rootCert, rootKey := createCertificate(t, "root", "root", 1, now.Add(-10*time.Hour), nil, nil) + intermediatePEM, intermediateCert, intermediateKey := createCertificate(t, "intermediate", "root", 2, now.Add(-5*time.Hour), rootCert, rootKey) + leafPEM, _, _ := createCertificate(t, "leaf", "intermediate", 3, now.Add(-1*time.Hour), intermediateCert, intermediateKey) + // Create an extra self-signed certificate that doesn't chain. + extraPEM, _, _ := createCertificate(t, "extra", "extra", 4, now.Add(-2*time.Hour), nil, nil) + // Combine in random order: extra, root, leaf, intermediate. + chain := extraPEM + rootPEM + leafPEM + intermediatePEM + leafOut, intermediates, err := parseAndOrderCertificateChain(chain) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Expected leaf remains the leaf certificate. + if string(leafOut) != leafPEM { + t.Errorf("expected leaf to be leaf certificate") + } + // Intermediates should include the intermediate and root in order, with the extra appended. + foundIntermediate, foundRoot, foundExtra := false, false, false + for _, interm := range intermediates { + if string(interm) == intermediatePEM { + foundIntermediate = true + } + if string(interm) == rootPEM { + foundRoot = true + } + if string(interm) == extraPEM { + foundExtra = true + } + } + if !foundIntermediate || !foundRoot || !foundExtra { + t.Errorf("expected intermediates to contain intermediate, root, and extra certificates") + } + }) + + // Test case 6: chain with an invalid certificate block. + t.Run("invalid certificate block", func(t *testing.T) { + invalidCert := "-----BEGIN CERTIFICATE-----\ninvalid\n-----END CERTIFICATE-----" + chain := invalidCert + _, _, err := parseAndOrderCertificateChain(chain) + // Since no valid certificate is found, we expect the error to indicate that. + if err == nil || !strings.Contains(err.Error(), "no valid certificates found") { + t.Errorf("expected error for invalid certificate block, got: %v", err) + } + }) + + // Test case 7: multiple self-signed certificates; select the one with the most recent NotBefore. + t.Run("multiple self-signed certificates", func(t *testing.T) { + now := time.Now() + // Create two self-signed certificates with different NotBefore times. + certAPEM, _, _ := createCertificate(t, "A", "A", 10, now.Add(-2*time.Hour), nil, nil) + certBPEM, _, _ := createCertificate(t, "B", "B", 11, now.Add(-1*time.Hour), nil, nil) + // Combine in any order. + chain := certAPEM + certBPEM + leafOut, intermediates, err := parseAndOrderCertificateChain(chain) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Expect leaf to be certificate B since its NotBefore is later. + if string(leafOut) != certBPEM { + t.Errorf("expected leaf to be certificate B") + } + // The other certificate should appear as an intermediate. + if len(intermediates) != 1 || string(intermediates[0]) != certAPEM { + t.Errorf("expected certificate A to be intermediate") + } + }) +} diff --git a/pkg/chains/storage/archivista/patch_test.go b/pkg/chains/storage/archivista/patch_test.go new file mode 100644 index 0000000000..badaca4c9c --- /dev/null +++ b/pkg/chains/storage/archivista/patch_test.go @@ -0,0 +1,82 @@ +package archivista + +import ( + "context" + "testing" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + fakePipelineClient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/tektoncd/chains/pkg/chains/objects" +) + +// --- Supported Branch Tests --- + +// TestPatchTektonObjectAnnotations_TaskRunV1Beta1 exercises the v1beta1.TaskRun branch. +func TestPatchTektonObjectAnnotations_TaskRunV1Beta1(t *testing.T) { + ctx := context.Background() + // Seed the fake client with a v1beta1.TaskRun. + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "taskrun-v1beta1", + Namespace: "default", + }, + } + client := fakePipelineClient.NewSimpleClientset(tr) + // Wrap the TaskRun using the helper. + obj := objects.NewTaskRunObjectV1Beta1(tr) + annotations := map[string]string{"foo": "bar"} + if err := PatchTektonObjectAnnotations(ctx, obj, annotations, client); err != nil { + t.Fatalf("expected no error, got %v", err) + } + if len(client.Actions()) == 0 { + t.Fatalf("expected a patch action, got none") + } +} + +// TestPatchTektonObjectAnnotations_PipelineRunV1Beta1 exercises the v1beta1.PipelineRun branch. +func TestPatchTektonObjectAnnotations_PipelineRunV1Beta1(t *testing.T) { + ctx := context.Background() + // Seed the fake client with a v1beta1.PipelineRun. + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pipelinerun-v1beta1", + Namespace: "default", + }, + } + client := fakePipelineClient.NewSimpleClientset(pr) + // Wrap the PipelineRun. + obj := objects.NewPipelineRunObjectV1Beta1(pr) + annotations := map[string]string{"foo": "bar"} + if err := PatchTektonObjectAnnotations(ctx, obj, annotations, client); err != nil { + t.Fatalf("expected no error, got %v", err) + } + if len(client.Actions()) == 0 { + t.Fatalf("expected a patch action, got none") + } +} + +// --- Unsupported Branch Test --- + +// unsupportedTaskRun is a type that embeds a v1beta1.TaskRun but is distinct. +type unsupportedTaskRun struct { + v1beta1.TaskRun +} + +// unsupportedTektonObject wraps a real TektonObject (from a v1beta1.TaskRun) but +// overrides GetObject() so that it returns an unsupported type. +type unsupportedTektonObject struct { + inner objects.TektonObject +} + +// GetObject returns an unsupported type by wrapping the inner object's underlying TaskRun. +func (u *unsupportedTektonObject) GetObject() interface{} { + // Retrieve the inner object (which should be a *v1beta1.TaskRun). + tr, ok := u.inner.GetObject().(*v1beta1.TaskRun) + if !ok { + return u.inner.GetObject() + } + // Wrap it in an unsupportedTaskRun so the type switch in PatchTektonObjectAnnotations doesn't match. + return &unsupportedTaskRun{*tr} +} diff --git a/pkg/chains/storage/storage.go b/pkg/chains/storage/storage.go index dbb07a37b2..fdb42e7ee5 100644 --- a/pkg/chains/storage/storage.go +++ b/pkg/chains/storage/storage.go @@ -18,6 +18,7 @@ import ( "errors" "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/chains/pkg/chains/storage/archivista" "github.com/tektoncd/chains/pkg/chains/storage/docdb" "github.com/tektoncd/chains/pkg/chains/storage/gcs" "github.com/tektoncd/chains/pkg/chains/storage/grafeas" @@ -93,10 +94,14 @@ func InitializeBackends(ctx context.Context, ps versioned.Interface, kc kubernet return nil, err } backends[backendType] = pubsubBackend + case archivista.StorageBackendArchivista: + archivistaBackend, err := archivista.NewArchivistaStorage(cfg, ps) + if err != nil { + return nil, err + } + backends[backendType] = archivistaBackend } - } - logger.Infof("successfully initialized backends: %v", maps.Keys(backends)) return backends, nil } diff --git a/pkg/config/config.go b/pkg/config/config.go index 4e9cef101a..8a69ade629 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -27,6 +27,7 @@ import ( cm "knative.dev/pkg/configmap" ) +// Config is the overall configuration for Chains. type Config struct { Artifacts ArtifactConfigs Storage StorageConfigs @@ -36,14 +37,14 @@ type Config struct { BuildDefinition BuildDefinitionConfig } -// ArtifactConfigs contains the configuration for how to sign/store/format the signatures for each artifact type +// ArtifactConfigs contains the configuration for how to sign/store/format the signatures for each artifact type. type ArtifactConfigs struct { OCI Artifact PipelineRuns Artifact TaskRuns Artifact } -// Artifact contains the configuration for how to sign/store/format the signatures for a single artifact +// Artifact contains the configuration for how to sign/store/format the signatures for a single artifact. type Artifact struct { Format string StorageBackend sets.Set[string] @@ -51,17 +52,18 @@ type Artifact struct { DeepInspectionEnabled bool } -// StorageConfigs contains the configuration to instantiate different storage providers +// StorageConfigs contains the configuration to instantiate different storage providers. type StorageConfigs struct { - GCS GCSStorageConfig - OCI OCIStorageConfig - Tekton TektonStorageConfig - DocDB DocDBStorageConfig - Grafeas GrafeasConfig - PubSub PubSubStorageConfig + GCS GCSStorageConfig + OCI OCIStorageConfig + Tekton TektonStorageConfig + DocDB DocDBStorageConfig + Grafeas GrafeasConfig + PubSub PubSubStorageConfig + Archivista ArchivistaStorageConfig } -// SignerConfigs contains the configuration to instantiate different signers +// SignerConfigs contains the configuration to instantiate different signers. type SignerConfigs struct { X509 X509Signer KMS KMSSigner @@ -89,7 +91,7 @@ type KMSSigner struct { Auth KMSAuth } -// KMSAuth configures authentication to the KMS server +// KMSAuth configures authentication to the KMS server. type KMSAuth struct { Address string Token string @@ -98,13 +100,13 @@ type KMSAuth struct { Spire KMSAuthSpire } -// KMSAuthOIDC configures settings to authenticate with OIDC +// KMSAuthOIDC configures settings to authenticate with OIDC. type KMSAuthOIDC struct { Path string Role string } -// KMSAuthSpire configures settings to get an auth token from spire +// KMSAuthSpire configures settings to get an auth token from spire. type KMSAuthSpire struct { Sock string Audience string @@ -120,6 +122,7 @@ type OCIStorageConfig struct { } type TektonStorageConfig struct { + // Currently no fields. } type DocDBStorageConfig struct { @@ -130,12 +133,12 @@ type DocDBStorageConfig struct { } type GrafeasConfig struct { - // project id that is used to store notes and occurences + // Project id that is used to store notes and occurrences. ProjectID string - // note id used to create a note that an occurrence will be attached to + // Note id used to create a note that an occurrence will be attached to. NoteID string - // NoteHint is used to set the attestation note + // NoteHint is used to set the attestation note. NoteHint string } @@ -155,6 +158,16 @@ type TransparencyConfig struct { URL string } +// ----------------------- New Archivista configuration ----------------------- + +// ArchivistaStorageConfig holds configuration for the Archivista storage backend. +type ArchivistaStorageConfig struct { + // URL is the endpoint for the Archivista service. + URL string `json:"url,omitempty"` +} + +// ----------------------- Constants ----------------------- + const ( taskrunFormatKey = "artifacts.taskrun.format" taskrunStorageKey = "artifacts.taskrun.storage" @@ -177,6 +190,9 @@ const ( docDBMongoServerURLDirKey = "storage.docdb.mongo-server-url-dir" docDBMongoServerURLPathKey = "storage.docdb.mongo-server-url-path" + // New Archivista constant: + archivistaURLKey = "storage.archivista.url" + grafeasProjectIDKey = "storage.grafeas.projectid" grafeasNoteIDKey = "storage.grafeas.noteid" grafeasNoteHint = "storage.grafeas.notehint" @@ -185,8 +201,6 @@ const ( pubsubProvider = "storage.pubsub.provider" pubsubTopic = "storage.pubsub.topic" - // No config for PubSub - In-Memory - // PubSub - Kafka pubsubKafkaBootstrapServer = "storage.pubsub.kafka.bootstrap.servers" @@ -194,8 +208,8 @@ const ( kmsSignerKMSRef = "signers.kms.kmsref" kmsAuthAddress = "signers.kms.auth.address" kmsAuthToken = "signers.kms.auth.token" - kmsAuthOIDCPath = "signers.kms.auth.oidc.path" kmsAuthTokenPath = "signers.kms.auth.token-path" // #nosec G101 + kmsAuthOIDCPath = "signers.kms.auth.oidc.path" kmsAuthOIDCRole = "signers.kms.auth.oidc.role" kmsAuthSpireSock = "signers.kms.auth.spire.sock" kmsAuthSpireAudience = "signers.kms.auth.spire.audience" @@ -220,6 +234,8 @@ const ( ChainsConfig = "chains-config" ) +// ----------------------- Helper functions for parsing ----------------------- + func (artifact *Artifact) Enabled() bool { return !(artifact.StorageBackend.Len() == 1 && artifact.StorageBackend.Has("")) } @@ -268,7 +284,7 @@ func defaultConfig() *Config { } } -// NewConfigFromMap creates a Config from the supplied map +// NewConfigFromMap creates a Config from the supplied map. func NewConfigFromMap(data map[string]string) (*Config, error) { cfg := defaultConfig() @@ -276,18 +292,18 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { // Artifact-specific configs // TaskRuns asString(taskrunFormatKey, &cfg.Artifacts.TaskRuns.Format, "in-toto", "slsa/v1", "slsa/v2alpha3", "slsa/v2alpha4"), - asStringSet(taskrunStorageKey, &cfg.Artifacts.TaskRuns.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "kafka")), + asStringSet(taskrunStorageKey, &cfg.Artifacts.TaskRuns.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "kafka", "archivista")), asString(taskrunSignerKey, &cfg.Artifacts.TaskRuns.Signer, "x509", "kms"), // PipelineRuns asString(pipelinerunFormatKey, &cfg.Artifacts.PipelineRuns.Format, "in-toto", "slsa/v1", "slsa/v2alpha3", "slsa/v2alpha4"), - asStringSet(pipelinerunStorageKey, &cfg.Artifacts.PipelineRuns.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas")), + asStringSet(pipelinerunStorageKey, &cfg.Artifacts.PipelineRuns.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "archivista")), asString(pipelinerunSignerKey, &cfg.Artifacts.PipelineRuns.Signer, "x509", "kms"), asBool(pipelinerunEnableDeepInspectionKey, &cfg.Artifacts.PipelineRuns.DeepInspectionEnabled), // OCI asString(ociFormatKey, &cfg.Artifacts.OCI.Format, "simplesigning"), - asStringSet(ociStorageKey, &cfg.Artifacts.OCI.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "kafka")), + asStringSet(ociStorageKey, &cfg.Artifacts.OCI.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "kafka", "archivista")), asString(ociSignerKey, &cfg.Artifacts.OCI.Signer, "x509", "kms"), // PubSub - General @@ -305,6 +321,9 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { asString(docDBMongoServerURLKey, &cfg.Storage.DocDB.MongoServerURL), asString(docDBMongoServerURLDirKey, &cfg.Storage.DocDB.MongoServerURLDir), asString(docDBMongoServerURLPathKey, &cfg.Storage.DocDB.MongoServerURLPath), + + asString(archivistaURLKey, &cfg.Storage.Archivista.URL), + asString(grafeasProjectIDKey, &cfg.Storage.Grafeas.ProjectID), asString(grafeasNoteIDKey, &cfg.Storage.Grafeas.NoteID), asString(grafeasNoteHint, &cfg.Storage.Grafeas.NoteHint), @@ -342,12 +361,12 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { return cfg, nil } -// NewConfigFromConfigMap creates a Config from the supplied ConfigMap +// NewConfigFromConfigMap creates a Config from the supplied ConfigMap. func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { return NewConfigFromMap(configMap.Data) } -// oneOf sets target to true if it maches any of the values +// oneOf sets target to true if it matches any of the values. func oneOf(key string, target *bool, values ...string) cm.ParseFunc { return func(data map[string]string) error { raw, ok := data[key] @@ -366,8 +385,7 @@ func oneOf(key string, target *bool, values ...string) cm.ParseFunc { } } -// allow additional supported values for a "true" decision -// in additional to the usual ones provided by strconv.ParseBool +// asBool passes the value at key through into the target. func asBool(key string, target *bool) cm.ParseFunc { return func(data map[string]string) error { raw, ok := data[key] @@ -383,8 +401,7 @@ func asBool(key string, target *bool) cm.ParseFunc { } } -// asString passes the value at key through into the target, if it exists. -// TODO(mattmoor): This might be a nice variation on cm.AsString to upstream. +// asString passes the value at key into the target, if it exists. func asString(key string, target *string, values ...string) cm.ParseFunc { return func(data map[string]string) error { raw, ok := data[key] @@ -402,7 +419,7 @@ func asString(key string, target *string, values ...string) cm.ParseFunc { } } -// asStringSet parses the value at key as a sets.Set[string] (split by ',') into the target, if it exists. +// asStringSet parses the value at key as a set (split by ',') into the target, if it exists. func asStringSet(key string, target *sets.Set[string], allowed sets.Set[string]) cm.ParseFunc { return func(data map[string]string) error { if raw, ok := data[key]; ok { diff --git a/vendor/github.com/edwarnicke/gitoid/.gitignore b/vendor/github.com/edwarnicke/gitoid/.gitignore new file mode 100644 index 0000000000..f3a1246a32 --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/.gitignore @@ -0,0 +1,24 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# Goland files +.idea/ diff --git a/vendor/github.com/edwarnicke/gitoid/LICENSE b/vendor/github.com/edwarnicke/gitoid/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/edwarnicke/gitoid/README.md b/vendor/github.com/edwarnicke/gitoid/README.md new file mode 100644 index 0000000000..a4e8dae360 --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/README.md @@ -0,0 +1,111 @@ +gitoid provides a simple library to compute gitoids (git object ids) + +## Creating GitOIDs + +### Default Usage +By default it produces gitoids for git object type blob using sha1: + +```go +var reader os.Reader +gitoidHash, err := gitoid.New(reader) +fmt.Println(gitoidHash) +// Output: 261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 +fmt.Println(gitoidHash.URI()) +// Output: gitoid:blob:sha1:261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 +``` + +### GitOid from string or []byte + +It's simple to compute the gitoid from a string or []byte by using bytes.NewBuffer: + +```go +input := []byte("example") +gitoidHash, _ := gitoid.New(bytes.NewBuffer(input)) +fmt.Println(gitoidHash) +// Output: 96236f8158b12701d5e75c14fb876c4a0f31b963 +fmt.Println(gitoidHash.URI()) +// Output: gitoid:blob:sha1:96236f8158b12701d5e75c14fb876c4a0f31b963 +``` + +### GitOID from URIs + +GitOIDs can be represented as a [gitoid uri](https://www.iana.org/assignments/uri-schemes/prov/gitoid). + +```go +gitoidHash, _ := gitoid.FromURI("gitoid:blob:sha1:96236f8158b12701d5e75c14fb876c4a0f31b96") +fmt.Println(gitoidHash) +// Output: 96236f8158b12701d5e75c14fb876c4a0f31b963 +fmt.Println(gitoidHash.URI()) +// Output: gitoid:blob:sha1:96236f8158b12701d5e75c14fb876c4a0f31b963 +``` + +## Variations on GitOIDs + +### SHA256 gitoids + +Git defaults to computing gitoids with sha1. Git also supports sha256 gitoids. Sha256 gitoids are supported using +an Option: + +```go +var reader os.Reader +gitoidHash, err := gitoid.New(reader, gitoid.WithSha256()) +fmt.Println(gitoidHash) +// Output: ed43975fbdc3084195eb94723b5f6df44eeeed1cdda7db0c7121edf5d84569ab +fmt.Println(gitoidHash.URI()) +// Output: gitoid:blob:sha256:ed43975fbdc3084195eb94723b5f6df44eeeed1cdda7db0c7121edf5d84569ab +``` + +### Other git object types + +git has four object types: blob, tree, commit, tag. By default gitoid using object type blob. +You may optionally specify another object type using an Option: + +```go +var reader os.Reader +gitoidHash, err := gitoid.New(reader, gitoid.WithGitObjectType(gitoid.COMMIT)) +``` + +### Assert ContentLength + +git object ids consist of hash over a header followed by the file contents. The header contains the length of the file +contents. By default, gitoid simply copies the reader into a buffer to establish its contentLength to compute the header. + +If you wish to assert the contentLength yourself, you may do so with an Option: + +```go +var reader os.Reader +var contentLength int64 +gitoidHash, _ := gitoid.New(reader, gitoid.WithContentLength(contentLength)) +fmt.Println(gitoidHash) +// Output: 261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 +``` + +gitoid will read the first contentLength bytes from the provided reader. If the reader is unable to provide +contentLength bytes a wrapper error around io.ErrUnexpectedEOF will be returned from gitoid.New + +## Using GitOIDs + +### Match contents to a GitOID + +```go +var reader io.Reader +var gitoidHash *gitoid.GitOID +if gitoidHash.Match(reader) { + fmt.Println("matched") +} +``` + +### Find files that match GitOID + +```go +var path1 fs.FS = os.DirFS("./relative/path") +var path2 fs.FS = os.DirFS("/absolute/path") +var gitoidHash *gitoid.GitOID + +// Find a file in path1 and path2 that matches gitoidHash +file,_ := gitoidHash.Find(path1, path2) + +// Find all files in path1 and path2 that matches gitoidHash +files, := gitoidHash.FindAll(path1, path2) +``` + diff --git a/vendor/github.com/edwarnicke/gitoid/gitoid.go b/vendor/github.com/edwarnicke/gitoid/gitoid.go new file mode 100644 index 0000000000..12d5168b46 --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/gitoid.go @@ -0,0 +1,215 @@ +// Copyright (c) 2022 Cisco and/or its affiliates. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gitoid + +import ( + "bytes" + "crypto/sha1" // #nosec G505 + "encoding/hex" + "errors" + "fmt" + "io" + "io/fs" + "strings" +) + +// GitObjectType type of git object - current values are "blob", "commit", "tag", "tree". +type GitObjectType string + +const ( + BLOB GitObjectType = "blob" + COMMIT GitObjectType = "commit" + TAG GitObjectType = "tag" + TREE GitObjectType = "tree" +) + +var ErrMayNotBeNil = errors.New("may not be nil") +var ErrInvalidGitOIDURI = errors.New("invalid uri in gitoid.FromURI") + +type GitOID struct { + gitObjectType GitObjectType + hashName string + hashValue []byte +} + +// New - create a new GitOID +// by default git object type is "blob" and hash is sha1 +func New(reader io.Reader, opts ...Option) (*GitOID, error) { + if reader == nil { + return nil, fmt.Errorf("reader in gitoid.New: %w", ErrMayNotBeNil) + } + + o := &option{ + gitObjectType: BLOB, + /* #nosec G401 */ + h: sha1.New(), + hashName: "sha1", + contentLength: 0, + } + + for _, opt := range opts { + opt(o) + } + + // If there is no declared o.contentLength, copy the entire reader into a buffer so we can compute + // the contentLength + if o.contentLength == 0 { + buf := bytes.NewBuffer(nil) + + contentLength, err := io.Copy(buf, reader) + if err != nil { + return nil, fmt.Errorf("error copying reader to buffer in gitoid.New: %w", err) + } + + reader = buf + o.contentLength = contentLength + } + + // Write the git object header + o.h.Write(Header(o.gitObjectType, o.contentLength)) + + // Copy the reader to the hash + n, err := io.Copy(o.h, io.LimitReader(reader, o.contentLength)) + if err != nil { + return nil, fmt.Errorf("error copying reader to hash.Hash.Writer in gitoid.New: %w", err) + } + + if n < o.contentLength { + return nil, fmt.Errorf("expected contentLength (%d) is less than actual contentLength (%d) in gitoid.New: %w", o.contentLength, n, io.ErrUnexpectedEOF) + } + + return &GitOID{ + gitObjectType: o.gitObjectType, + hashName: o.hashName, + hashValue: o.h.Sum(nil), + }, nil +} + +// Header - returns the git object header from the gitObjectType and contentLength. +func Header(gitObjectType GitObjectType, contentLength int64) []byte { + return []byte(fmt.Sprintf("%s %d\000", gitObjectType, contentLength)) +} + +// String - returns the gitoid in lowercase hex. +func (g *GitOID) String() string { + return fmt.Sprintf("%x", g.hashValue) +} + +// URI - returns the gitoid as a URI (https://www.iana.org/assignments/uri-schemes/prov/gitoid) +func (g *GitOID) URI() string { + return fmt.Sprintf("gitoid:%s:%s:%s", g.gitObjectType, g.hashName, g) +} + +func (g *GitOID) Bytes() []byte { + if g == nil { + return nil + } + + return g.hashValue +} + +// Equal - returns true of g == x. +func (g *GitOID) Equal(x *GitOID) bool { + if g == x { + return true + } + + if g == nil || x == nil || g.hashName != x.hashName { + return false + } + + if len(g.Bytes()) != len(x.Bytes()) { + return false + } + + for i, v := range g.Bytes() { + if x.Bytes()[i] != v { + return false + } + } + return true +} + +// FromURI - returns a *GitOID from a gitoid uri string - see https://www.iana.org/assignments/uri-schemes/prov/gitoid +func FromURI(uri string) (*GitOID, error) { + parts := strings.Split(uri, ":") + if len(parts) != 4 || parts[0] != "gitoid" { + return nil, fmt.Errorf("%w: %q in gitoid.FromURI", ErrInvalidGitOIDURI, uri) + } + hashValue, err := hex.DecodeString(parts[3]) + if err != nil { + return nil, fmt.Errorf("error decoding hash value (%s) in gitoid.FromURI: %w", parts[3], err) + } + return &GitOID{ + gitObjectType: GitObjectType(parts[1]), + hashName: parts[2], + hashValue: hashValue, + }, nil +} + +// Match - returns true if contents of reader generates a GitOID equal to g. +func (g *GitOID) Match(reader io.Reader) bool { + g2, err := New(reader, WithGitObjectType(g.gitObjectType)) + if err != nil { + return false + } + return g.Equal(g2) +} + +// Find - return the first fs.File in paths that Matches the *GitOID g. +func (g *GitOID) Find(paths ...fs.FS) fs.File { + foundFiles := g.findN(1, paths...) + if len(foundFiles) != 1 { + return nil + } + return foundFiles[0] +} + +// FindAll - return all fs.Files in paths that Matches the *GitOID g. +func (g *GitOID) FindAll(paths ...fs.FS) []fs.File { + return g.findN(0, paths...) +} + +func (g *GitOID) findN(n int, paths ...fs.FS) []fs.File { + var foundFiles []fs.File + for _, fsys := range paths { + _ = fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error { + if d == nil || d.IsDir() || err != nil { + //lint:ignore nilerr - returning non-nil error will stop the walk + return nil + } + file, err := fsys.Open(path) + defer func() { _ = file.Close() }() + if err != nil { + //lint:ignore nilerr - returning non-nil error will stop the walk + return nil + } + if !g.Match(file) { + return nil + } + foundFile, err := fsys.Open(path) + if err == nil { + foundFiles = append(foundFiles, foundFile) + } + if n > 0 && len(foundFiles) == n { + return io.EOF + } + return nil + }) + } + return foundFiles +} diff --git a/vendor/github.com/edwarnicke/gitoid/options.go b/vendor/github.com/edwarnicke/gitoid/options.go new file mode 100644 index 0000000000..f104e198b5 --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/options.go @@ -0,0 +1,56 @@ +// Copyright (c) 2022 Cisco and/or its affiliates. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gitoid + +import ( + "crypto/sha256" + "hash" +) + +type option struct { + gitObjectType GitObjectType + h hash.Hash + hashName string + contentLength int64 +} + +// Option - option for GitOID creation. +type Option func(o *option) + +// WithSha256 - use sha256 for computing gitoids instead of the default sha1. +func WithSha256() Option { + return func(o *option) { + o.hashName = "sha256" + o.h = sha256.New() + } +} + +// WithGitObjectType - set the GitOobjectType to a value different than the default gitoid.BLOB type. +func WithGitObjectType(gitObjectType GitObjectType) Option { + return func(o *option) { + o.gitObjectType = gitObjectType + } +} + +// WithContentLength - allows the assertion of a contentLength to be read from the provided reader +// only the first contentLength of data will be read from the reader +// if contentLength bytes are unavailable from the reader, an error will be returned. +func WithContentLength(contentLength int64) Option { + return func(o *option) { + o.contentLength = contentLength + } +} diff --git a/vendor/github.com/in-toto/archivista/LICENSE b/vendor/github.com/in-toto/archivista/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/in-toto/archivista/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/in-toto/archivista/pkg/api/download.go b/vendor/github.com/in-toto/archivista/pkg/api/download.go new file mode 100644 index 0000000000..df76ae6918 --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/api/download.go @@ -0,0 +1,109 @@ +// Copyright 2023-2024 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + + "github.com/in-toto/go-witness/dsse" +) + +func DownloadReadCloser(ctx context.Context, baseURL string, gitoid string) (io.ReadCloser, error) { + return DownloadReadCloserWithHTTPClient(ctx, &http.Client{}, baseURL, gitoid) +} + +func DownloadReadCloserWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, gitoid string) (io.ReadCloser, error) { + downloadURL, err := url.JoinPath(baseURL, "download", gitoid) + if err != nil { + return nil, err + } + req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + resp, err := client.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + // NOTE: attempt to read body on error and + // only close if an error occurs + defer resp.Body.Close() + errMsg, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return nil, errors.New(string(errMsg)) + } + return resp.Body, nil +} + +func Download(ctx context.Context, baseURL string, gitoid string) (dsse.Envelope, error) { + buf := &bytes.Buffer{} + if err := DownloadWithWriter(ctx, baseURL, gitoid, buf); err != nil { + return dsse.Envelope{}, err + } + + env := dsse.Envelope{} + dec := json.NewDecoder(buf) + if err := dec.Decode(&env); err != nil { + return env, err + } + + return env, nil +} + +func DownloadWithWriter(ctx context.Context, baseURL string, gitoid string, dst io.Writer) error { + return DownloadWithWriterWithHTTPClient(ctx, &http.Client{}, baseURL, gitoid, dst) +} + +func DownloadWithWriterWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, gitoid string, dst io.Writer) error { + downloadUrl, err := url.JoinPath(baseURL, "download", gitoid) + if err != nil { + return err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadUrl, nil) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + hc := &http.Client{} + resp, err := hc.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + errMsg, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return errors.New(string(errMsg)) + } + + _, err = io.Copy(dst, resp.Body) + return err +} diff --git a/vendor/github.com/in-toto/archivista/pkg/api/graphql.go b/vendor/github.com/in-toto/archivista/pkg/api/graphql.go new file mode 100644 index 0000000000..a7f29d237c --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/api/graphql.go @@ -0,0 +1,137 @@ +// Copyright 2023-2024 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" +) + +const RetrieveSubjectsQuery = `query($gitoid: String!) { + subjects( + where: { + hasStatementWith:{ + hasDsseWith:{ + gitoidSha256: $gitoid + } + } + } + ) { + edges { + node{ + name + subjectDigests{ + algorithm + value + } + } + } + } +}` + +const SearchQuery = `query($algo: String!, $digest: String!) { + dsses( + where: { + hasStatementWith: { + hasSubjectsWith: { + hasSubjectDigestsWith: { + value: $digest, + algorithm: $algo + } + } + } + } + ) { + edges { + node { + gitoidSha256 + statement { + attestationCollections { + name + attestations { + type + } + } + } + } + } + } +}` + +func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars) (TRes, error) { + return GraphQlQueryWithHeaders[TRes, TVars](ctx, baseUrl, query, vars, nil) +} + +func GraphQlQueryWithHeaders[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars, headers map[string]string) (TRes, error) { + var response TRes + queryUrl, err := url.JoinPath(baseUrl, "query") + if err != nil { + return response, err + } + + requestBody := GraphQLRequestBodyGeneric[TVars]{ + Query: query, + Variables: vars, + } + + reqBody, err := json.Marshal(requestBody) + if err != nil { + return response, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, queryUrl, bytes.NewReader(reqBody)) + if err != nil { + return response, err + } + + for k, v := range headers { + req.Header.Set(k, v) + } + + req.Header.Set("Content-Type", "application/json") + hc := &http.Client{} + res, err := hc.Do(req) + if err != nil { + return response, err + } + + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + errMsg, err := io.ReadAll(res.Body) + if err != nil { + return response, err + } + + return response, errors.New(string(errMsg)) + } + + dec := json.NewDecoder(res.Body) + gqlRes := GraphQLResponseGeneric[TRes]{} + if err := dec.Decode(&gqlRes); err != nil { + return response, err + } + + if len(gqlRes.Errors) > 0 { + return response, fmt.Errorf("graph ql query failed: %v", gqlRes.Errors) + } + + return gqlRes.Data, nil +} diff --git a/vendor/github.com/in-toto/archivista/pkg/api/structs.go b/vendor/github.com/in-toto/archivista/pkg/api/structs.go new file mode 100644 index 0000000000..fcf295a40f --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/api/structs.go @@ -0,0 +1,90 @@ +// Copyright 2024 The Archivista Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +type GraphQLError struct { + Message string `json:"message"` +} + +type GraphQLResponseGeneric[T any] struct { + Data T `json:"data,omitempty"` + Errors []GraphQLError `json:"errors,omitempty"` +} + +type GraphQLRequestBodyGeneric[TVars any] struct { + Query string `json:"query"` + Variables TVars `json:"variables,omitempty"` +} + +type RetrieveSubjectVars struct { + Gitoid string `json:"gitoid"` +} + +type SearchVars struct { + Algorithm string `json:"algo"` + Digest string `json:"digest"` +} + +type RetrieveSubjectResults struct { + Subjects Subjects `json:"subjects"` +} + +type Subjects struct { + Edges []SubjectEdge `json:"edges"` +} + +type SubjectEdge struct { + Node SubjectNode `json:"node"` +} + +type SubjectNode struct { + Name string `json:"name"` + SubjectDigests []SubjectDigest `json:"subjectDigests"` +} + +type SubjectDigest struct { + Algorithm string `json:"algorithm"` + Value string `json:"value"` +} + +type SearchResults struct { + Dsses DSSES `json:"dsses"` +} + +type DSSES struct { + Edges []SearchEdge `json:"edges"` +} + +type SearchEdge struct { + Node SearchNode `json:"node"` +} + +type SearchNode struct { + GitoidSha256 string `json:"gitoidSha256"` + Statement Statement `json:"statement"` +} + +type Statement struct { + AttestationCollection AttestationCollection `json:"attestationCollections"` +} + +type AttestationCollection struct { + Name string `json:"name"` + Attestations []Attestation `json:"attestations"` +} + +type Attestation struct { + Type string `json:"type"` +} diff --git a/vendor/github.com/in-toto/archivista/pkg/api/upload.go b/vendor/github.com/in-toto/archivista/pkg/api/upload.go new file mode 100644 index 0000000000..3bf9934420 --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/api/upload.go @@ -0,0 +1,89 @@ +// Copyright 2023-2024 The Archivista Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + + "github.com/in-toto/go-witness/dsse" +) + +type UploadResponse struct { + Gitoid string `json:"gitoid"` +} + +// Deprecated: Use UploadResponse instead. It will be removed in version >= v0.6.0 +type StoreResponse = UploadResponse + +// Deprecated: Use Store instead. It will be removed in version >= v0.6.0 +func Upload(ctx context.Context, baseURL string, envelope dsse.Envelope) (UploadResponse, error) { + return Store(ctx, baseURL, envelope) +} + +func Store(ctx context.Context, baseURL string, envelope dsse.Envelope) (StoreResponse, error) { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + if err := enc.Encode(envelope); err != nil { + return StoreResponse{}, err + } + + return StoreWithReader(ctx, baseURL, buf) +} + +func StoreWithReader(ctx context.Context, baseURL string, r io.Reader) (StoreResponse, error) { + return StoreWithReaderWithHTTPClient(ctx, &http.Client{}, baseURL, r) +} + +func StoreWithReaderWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, r io.Reader) (StoreResponse, error) { + uploadPath, err := url.JoinPath(baseURL, "upload") + if err != nil { + return UploadResponse{}, err + } + + req, err := http.NewRequestWithContext(ctx, "POST", uploadPath, r) + if err != nil { + return UploadResponse{}, err + } + + req.Header.Set("Content-Type", "application/json") + hc := &http.Client{} + resp, err := hc.Do(req) + if err != nil { + return UploadResponse{}, err + } + + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return UploadResponse{}, err + } + + if resp.StatusCode != http.StatusOK { + return UploadResponse{}, errors.New(string(bodyBytes)) + } + + uploadResp := UploadResponse{} + if err := json.Unmarshal(bodyBytes, &uploadResp); err != nil { + return UploadResponse{}, err + } + + return uploadResp, nil +} diff --git a/vendor/github.com/in-toto/archivista/pkg/http-client/client.go b/vendor/github.com/in-toto/archivista/pkg/http-client/client.go new file mode 100644 index 0000000000..22b87c1a9a --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/http-client/client.go @@ -0,0 +1,216 @@ +// Copyright 2024 The Archivista Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpclient + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/in-toto/archivista/pkg/api" + "github.com/in-toto/go-witness/dsse" +) + +type ArchivistaClient struct { + BaseURL string + GraphQLURL string + *http.Client +} + +type HttpClienter interface { + DownloadDSSE(ctx context.Context, gitoid string) (dsse.Envelope, error) + DownloadReadCloser(ctx context.Context, gitoid string) (io.ReadCloser, error) + DownloadWithWriter(ctx context.Context, gitoid string, dst io.Writer) error + Store(ctx context.Context, envelope dsse.Envelope) (api.UploadResponse, error) + StoreWithReader(ctx context.Context, r io.Reader) (api.UploadResponse, error) + GraphQLRetrieveSubjectResults(ctx context.Context, gitoid string) (api.RetrieveSubjectResults, error) + GraphQLRetrieveSearchResults(ctx context.Context, algo string, digest string) (api.SearchResults, error) + GraphQLQueryIface(ctx context.Context, query string, variables interface{}) (*GraphQLResponseInterface, error) + GraphQLQueryToDst(ctx context.Context, query string, variables interface{}, dst interface{}) error + GraphQLQueryReadCloser(ctx context.Context, query string, variables interface{}) (io.ReadCloser, error) +} + +func CreateArchivistaClient(httpClient *http.Client, baseURL string) (*ArchivistaClient, error) { + client := ArchivistaClient{ + BaseURL: baseURL, + Client: http.DefaultClient, + } + if httpClient != nil { + client.Client = httpClient + } + var err error + client.GraphQLURL, err = url.JoinPath(client.BaseURL, "query") + if err != nil { + return nil, err + } + return &client, nil +} + +func (ac *ArchivistaClient) DownloadDSSE(ctx context.Context, gitoid string) (dsse.Envelope, error) { + reader, err := api.DownloadReadCloserWithHTTPClient(ctx, ac.Client, ac.BaseURL, gitoid) + if err != nil { + return dsse.Envelope{}, err + } + env := dsse.Envelope{} + if err := json.NewDecoder(reader).Decode(&env); err != nil { + return dsse.Envelope{}, err + } + return env, nil +} + +func (ac *ArchivistaClient) DownloadReadCloser(ctx context.Context, gitoid string) (io.ReadCloser, error) { + return api.DownloadReadCloserWithHTTPClient(ctx, ac.Client, ac.BaseURL, gitoid) +} + +func (ac *ArchivistaClient) DownloadWithWriter(ctx context.Context, gitoid string, dst io.Writer) error { + return api.DownloadWithWriterWithHTTPClient(ctx, ac.Client, ac.BaseURL, gitoid, dst) +} + +func (ac *ArchivistaClient) Store(ctx context.Context, envelope dsse.Envelope) (api.UploadResponse, error) { + return api.Store(ctx, ac.BaseURL, envelope) +} + +func (ac *ArchivistaClient) StoreWithReader(ctx context.Context, r io.Reader) (api.UploadResponse, error) { + return api.StoreWithReader(ctx, ac.BaseURL, r) +} + +type GraphQLRequestBodyInterface struct { + Query string `json:"query"` + Variables interface{} `json:"variables,omitempty"` +} + +type GraphQLResponseInterface struct { + Data interface{} + Errors []api.GraphQLError `json:"errors,omitempty"` +} + +// GraphQLRetrieveSubjectResults retrieves the subjects for a given gitoid. +func (ac *ArchivistaClient) GraphQLRetrieveSubjectResults( + ctx context.Context, + gitoid string, +) (api.RetrieveSubjectResults, error) { + return api.GraphQlQuery[api.RetrieveSubjectResults]( + ctx, + ac.BaseURL, + api.RetrieveSubjectsQuery, + api.RetrieveSubjectVars{Gitoid: gitoid}, + ) +} + +// GraphQLRetrieveSearchResults retrieves the search results for a given algorithm and digest. +func (ac *ArchivistaClient) GraphQLRetrieveSearchResults( + ctx context.Context, + algo string, + digest string, +) (api.SearchResults, error) { + return api.GraphQlQuery[api.SearchResults]( + ctx, + ac.BaseURL, + api.SearchQuery, + api.SearchVars{Algorithm: algo, Digest: digest}, + ) +} + +// GraphQLQueryIface executes a GraphQL query against the Archivista API and returns the response as an interface. +// +// Parameters: +// - ctx: The context to control the query's lifecycle, such as cancellations or deadlines. +// - query: A string representing the GraphQL query to be executed. +// - variables: A map or struct containing variables to parameterize the query. +// +// Returns: +// - A pointer to a GraphQLResponseInterface containing the query's result or errors. +// - An error if the query execution or response parsing fails. +// +// Example: +// +// response, err := client.GraphQLQueryIface(ctx, query, variables) +// if err != nil { +// log.Fatalf("GraphQL query failed: %v", err) +// } +// fmt.Printf("Response data: %+v\n", response.Data) +func (ac *ArchivistaClient) GraphQLQueryIface( + ctx context.Context, + query string, + variables interface{}, +) (*GraphQLResponseInterface, error) { + reader, err := ac.GraphQLQueryReadCloser(ctx, query, variables) + if err != nil { + return nil, err + } + defer reader.Close() + gqlRes := GraphQLResponseInterface{} + dec := json.NewDecoder(reader) + if err := dec.Decode(&gqlRes); err != nil { + return nil, err + } + if len(gqlRes.Errors) > 0 { + return nil, fmt.Errorf("graph ql query failed: %v", gqlRes.Errors) + } + return &gqlRes, nil +} + +// GraphQLQueryToDst executes a GraphQL query against the Archivista API and unmarshals the response into a destination object. +func (ac *ArchivistaClient) GraphQLQueryToDst(ctx context.Context, query string, variables interface{}, dst interface{}) error { + reader, err := ac.GraphQLQueryReadCloser(ctx, query, variables) + if err != nil { + return err + } + defer reader.Close() + dec := json.NewDecoder(reader) + if err := dec.Decode(&dst); err != nil { + return err + } + return nil +} + +// GraphQLQueryReadCloser executes a GraphQL query against the Archivista API and returns the response as an io.ReadCloser. +func (ac *ArchivistaClient) GraphQLQueryReadCloser( + ctx context.Context, + query string, + variables interface{}, +) (io.ReadCloser, error) { + requestBodyMap := GraphQLRequestBodyInterface{ + Query: query, + Variables: variables, + } + requestBodyJSON, err := json.Marshal(requestBodyMap) + if err != nil { + return nil, err + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, ac.GraphQLURL, bytes.NewReader(requestBodyJSON)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + res, err := ac.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusOK { + defer res.Body.Close() + errMsg, err := io.ReadAll(res.Body) + if err != nil { + return nil, err + } + return nil, errors.New(string(errMsg)) + } + return res.Body, nil +} diff --git a/vendor/github.com/in-toto/attestation/go/predicates/provenance/v1/provenance.pb.go b/vendor/github.com/in-toto/attestation/go/predicates/provenance/v1/provenance.pb.go index 9903990df2..84eab8182e 100644 --- a/vendor/github.com/in-toto/attestation/go/predicates/provenance/v1/provenance.pb.go +++ b/vendor/github.com/in-toto/attestation/go/predicates/provenance/v1/provenance.pb.go @@ -1,8 +1,8 @@ -// Keep in sync with schema at https://github.com/slsa-framework/slsa/blob/main/docs/spec/v1.0/schema/provenance.proto +// Keep in sync with schema at https://github.com/slsa-framework/slsa/blob/main/docs/provenance/schema/v1/provenance.proto // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.3 +// protoc-gen-go v1.34.1 // protoc v4.24.4 // source: in_toto_attestation/predicates/provenance/v1/provenance.proto @@ -28,18 +28,21 @@ const ( // Proto representation of predicate type https://slsa.dev/provenance/v1 // Validation of all fields is left to the users of this proto. type Provenance struct { - state protoimpl.MessageState `protogen:"open.v1"` - BuildDefinition *BuildDefinition `protobuf:"bytes,1,opt,name=build_definition,json=buildDefinition,proto3" json:"build_definition,omitempty"` - RunDetails *RunDetails `protobuf:"bytes,2,opt,name=run_details,json=runDetails,proto3" json:"run_details,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BuildDefinition *BuildDefinition `protobuf:"bytes,1,opt,name=build_definition,json=buildDefinition,proto3" json:"build_definition,omitempty"` + RunDetails *RunDetails `protobuf:"bytes,2,opt,name=run_details,json=runDetails,proto3" json:"run_details,omitempty"` } func (x *Provenance) Reset() { *x = Provenance{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Provenance) String() string { @@ -50,7 +53,7 @@ func (*Provenance) ProtoMessage() {} func (x *Provenance) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -80,20 +83,23 @@ func (x *Provenance) GetRunDetails() *RunDetails { } type BuildDefinition struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + BuildType string `protobuf:"bytes,1,opt,name=build_type,json=buildType,proto3" json:"build_type,omitempty"` ExternalParameters *structpb.Struct `protobuf:"bytes,2,opt,name=external_parameters,json=externalParameters,proto3" json:"external_parameters,omitempty"` InternalParameters *structpb.Struct `protobuf:"bytes,3,opt,name=internal_parameters,json=internalParameters,proto3" json:"internal_parameters,omitempty"` ResolvedDependencies []*v1.ResourceDescriptor `protobuf:"bytes,4,rep,name=resolved_dependencies,json=resolvedDependencies,proto3" json:"resolved_dependencies,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *BuildDefinition) Reset() { *x = BuildDefinition{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *BuildDefinition) String() string { @@ -104,7 +110,7 @@ func (*BuildDefinition) ProtoMessage() {} func (x *BuildDefinition) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -148,19 +154,22 @@ func (x *BuildDefinition) GetResolvedDependencies() []*v1.ResourceDescriptor { } type RunDetails struct { - state protoimpl.MessageState `protogen:"open.v1"` - Builder *Builder `protobuf:"bytes,1,opt,name=builder,proto3" json:"builder,omitempty"` - Metadata *BuildMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` - Byproducts []*v1.ResourceDescriptor `protobuf:"bytes,3,rep,name=byproducts,proto3" json:"byproducts,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Builder *Builder `protobuf:"bytes,1,opt,name=builder,proto3" json:"builder,omitempty"` + Metadata *BuildMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + Byproducts []*v1.ResourceDescriptor `protobuf:"bytes,3,rep,name=byproducts,proto3" json:"byproducts,omitempty"` } func (x *RunDetails) Reset() { *x = RunDetails{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RunDetails) String() string { @@ -171,7 +180,7 @@ func (*RunDetails) ProtoMessage() {} func (x *RunDetails) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -208,19 +217,22 @@ func (x *RunDetails) GetByproducts() []*v1.ResourceDescriptor { } type Builder struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Version map[string]string `protobuf:"bytes,2,rep,name=version,proto3" json:"version,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Version map[string]string `protobuf:"bytes,2,rep,name=version,proto3" json:"version,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` BuilderDependencies []*v1.ResourceDescriptor `protobuf:"bytes,3,rep,name=builder_dependencies,json=builderDependencies,proto3" json:"builder_dependencies,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *Builder) Reset() { *x = Builder{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Builder) String() string { @@ -231,7 +243,7 @@ func (*Builder) ProtoMessage() {} func (x *Builder) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -268,19 +280,22 @@ func (x *Builder) GetBuilderDependencies() []*v1.ResourceDescriptor { } type BuildMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` - InvocationId string `protobuf:"bytes,1,opt,name=invocation_id,json=invocationId,proto3" json:"invocation_id,omitempty"` - StartedOn *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_on,json=startedOn,proto3" json:"started_on,omitempty"` - FinishedOn *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finished_on,json=finishedOn,proto3" json:"finished_on,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InvocationId string `protobuf:"bytes,1,opt,name=invocation_id,json=invocationId,proto3" json:"invocation_id,omitempty"` + StartedOn *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_on,json=startedOn,proto3" json:"started_on,omitempty"` + FinishedOn *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finished_on,json=finishedOn,proto3" json:"finished_on,omitempty"` } func (x *BuildMetadata) Reset() { *x = BuildMetadata{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *BuildMetadata) String() string { @@ -291,7 +306,7 @@ func (*BuildMetadata) ProtoMessage() {} func (x *BuildMetadata) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -443,7 +458,7 @@ func file_in_toto_attestation_predicates_provenance_v1_provenance_proto_rawDescG } var file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_in_toto_attestation_predicates_provenance_v1_provenance_proto_goTypes = []any{ +var file_in_toto_attestation_predicates_provenance_v1_provenance_proto_goTypes = []interface{}{ (*Provenance)(nil), // 0: in_toto_attestation.predicates.provenance.v1.Provenance (*BuildDefinition)(nil), // 1: in_toto_attestation.predicates.provenance.v1.BuildDefinition (*RunDetails)(nil), // 2: in_toto_attestation.predicates.provenance.v1.RunDetails @@ -479,6 +494,68 @@ func file_in_toto_attestation_predicates_provenance_v1_provenance_proto_init() { if File_in_toto_attestation_predicates_provenance_v1_provenance_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Provenance); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BuildDefinition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Builder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BuildMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go index ae912f0d1e..51654e954f 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go @@ -16,83 +16,16 @@ var ( ErrRDRequiredField = errors.New("at least one of name, URI, or digest are required") ) -type HashAlgorithm string - -const ( - AlgorithmMD5 HashAlgorithm = "md5" - AlgorithmSHA1 HashAlgorithm = "sha1" - AlgorithmSHA224 HashAlgorithm = "sha224" - AlgorithmSHA512_224 HashAlgorithm = "sha512_224" - AlgorithmSHA256 HashAlgorithm = "sha256" - AlgorithmSHA512_256 HashAlgorithm = "sha512_256" - AlgorithmSHA384 HashAlgorithm = "sha384" - AlgorithmSHA512 HashAlgorithm = "sha512" - AlgorithmSHA3_224 HashAlgorithm = "sha3_224" - AlgorithmSHA3_256 HashAlgorithm = "sha3_256" - AlgorithmSHA3_384 HashAlgorithm = "sha3_384" - AlgorithmSHA3_512 HashAlgorithm = "sha3_512" - AlgorithmGitBlob HashAlgorithm = "gitBlob" - AlgorithmGitCommit HashAlgorithm = "gitCommit" - AlgorithmGitTag HashAlgorithm = "gitTag" - AlgorithmGitTree HashAlgorithm = "gitTree" - AlgorithmDirHash HashAlgorithm = "dirHash" -) - -// HashAlgorithms indexes the known algorithms in a dictionary -// by their string value -var HashAlgorithms = map[string]HashAlgorithm{ - "md5": AlgorithmMD5, - "sha1": AlgorithmSHA1, - "sha224": AlgorithmSHA224, - "sha512_224": AlgorithmSHA512_224, - "sha256": AlgorithmSHA256, - "sha512_256": AlgorithmSHA512_256, - "sha384": AlgorithmSHA384, - "sha512": AlgorithmSHA512, - "sha3_224": AlgorithmSHA3_224, - "sha3_256": AlgorithmSHA3_256, - "sha3_384": AlgorithmSHA3_384, - "sha3_512": AlgorithmSHA3_512, - "gitBlob": AlgorithmGitBlob, - "gitCommit": AlgorithmGitCommit, - "gitTag": AlgorithmGitTag, - "gitTree": AlgorithmGitTree, - "dirHash": AlgorithmDirHash, -} - -// HexLength returns the expected length of an algorithm's hash when hexencoded -func (algo HashAlgorithm) HexLength() int { - switch algo { - case AlgorithmMD5: - return 16 - case AlgorithmSHA1, AlgorithmGitBlob, AlgorithmGitCommit, AlgorithmGitTag, AlgorithmGitTree: - return 20 - case AlgorithmSHA224, AlgorithmSHA512_224, AlgorithmSHA3_224: - return 28 - case AlgorithmSHA256, AlgorithmSHA512_256, AlgorithmSHA3_256, AlgorithmDirHash: - return 32 - case AlgorithmSHA384, AlgorithmSHA3_384: - return 48 - case AlgorithmSHA512, AlgorithmSHA3_512: - return 64 - default: - return 0 - } -} - -// String returns the hash algorithm name as a string -func (algo HashAlgorithm) String() string { - return string(algo) -} - // Indicates if a given fixed-size hash algorithm is supported by default and returns the algorithm's // digest size in bytes, if supported. We assume gitCommit and dirHash are aliases for sha1 and sha256, respectively. // // SHA digest sizes from https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf // MD5 digest size from https://www.rfc-editor.org/rfc/rfc1321.html#section-1 -func isSupportedFixedSizeAlgorithm(algString string) (bool, int) { - algo := HashAlgorithm(algString) - return algo.HexLength() > 0, algo.HexLength() +func isSupportedFixedSizeAlgorithm(alg string) (bool, int) { + algos := map[string]int{"md5": 16, "sha1": 20, "sha224": 28, "sha512_224": 28, "sha256": 32, "sha512_256": 32, "sha384": 48, "sha512": 64, "sha3_224": 28, "sha3_256": 32, "sha3_384": 48, "sha3_512": 64, "gitCommit": 20, "dirHash": 32} + + size, ok := algos[alg] + return ok, size } func (d *ResourceDescriptor) Validate() error { diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go index 44dca29b5c..3e59869b10 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.3 +// protoc-gen-go v1.34.1 // protoc v4.24.4 // source: in_toto_attestation/v1/resource_descriptor.proto @@ -25,29 +25,32 @@ const ( // https://github.com/in-toto/attestation/blob/main/spec/v1/resource_descriptor.md // Validation of all fields is left to the users of this proto. type ResourceDescriptor struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` - Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` - DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"` - MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` + DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"` + MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` // Per the Struct protobuf spec, this type corresponds to // a JSON Object, which is truly a map under the hood. // So, the Struct a) is still consistent with our specification for // the `annotations` field, and b) has native support in some language // bindings making their use easier in implementations. // See: https://pkg.go.dev/google.golang.org/protobuf/types/known/structpb#Struct - Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"` } func (x *ResourceDescriptor) Reset() { *x = ResourceDescriptor{} - mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ResourceDescriptor) String() string { @@ -58,7 +61,7 @@ func (*ResourceDescriptor) ProtoMessage() {} func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -175,7 +178,7 @@ func file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP() []byte } var file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []any{ +var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []interface{}{ (*ResourceDescriptor)(nil), // 0: in_toto_attestation.v1.ResourceDescriptor nil, // 1: in_toto_attestation.v1.ResourceDescriptor.DigestEntry (*structpb.Struct)(nil), // 2: google.protobuf.Struct @@ -195,6 +198,20 @@ func file_in_toto_attestation_v1_resource_descriptor_proto_init() { if File_in_toto_attestation_v1_resource_descriptor_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go index bb5c4567d9..a2bd2c2d7d 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.3 +// protoc-gen-go v1.34.1 // protoc v4.24.4 // source: in_toto_attestation/v1/statement.proto @@ -25,21 +25,24 @@ const ( // https://github.com/in-toto/attestation/tree/main/spec/v1 // Validation of all fields is left to the users of this proto. type Statement struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Expected to always be "https://in-toto.io/Statement/v1" Type string `protobuf:"bytes,1,opt,name=type,json=_type,proto3" json:"type,omitempty"` Subject []*ResourceDescriptor `protobuf:"bytes,2,rep,name=subject,proto3" json:"subject,omitempty"` PredicateType string `protobuf:"bytes,3,opt,name=predicate_type,json=predicateType,proto3" json:"predicate_type,omitempty"` Predicate *structpb.Struct `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *Statement) Reset() { *x = Statement{} - mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Statement) String() string { @@ -50,7 +53,7 @@ func (*Statement) ProtoMessage() {} func (x *Statement) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -138,7 +141,7 @@ func file_in_toto_attestation_v1_statement_proto_rawDescGZIP() []byte { } var file_in_toto_attestation_v1_statement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_in_toto_attestation_v1_statement_proto_goTypes = []any{ +var file_in_toto_attestation_v1_statement_proto_goTypes = []interface{}{ (*Statement)(nil), // 0: in_toto_attestation.v1.Statement (*ResourceDescriptor)(nil), // 1: in_toto_attestation.v1.ResourceDescriptor (*structpb.Struct)(nil), // 2: google.protobuf.Struct @@ -159,6 +162,20 @@ func file_in_toto_attestation_v1_statement_proto_init() { return } file_in_toto_attestation_v1_resource_descriptor_proto_init() + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_statement_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Statement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/in-toto/go-witness/LICENSE b/vendor/github.com/in-toto/go-witness/LICENSE new file mode 100644 index 0000000000..c54e7d1566 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 TestifySec, LLC. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/digestset.go b/vendor/github.com/in-toto/go-witness/cryptoutil/digestset.go new file mode 100644 index 0000000000..3b91a8a08e --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/digestset.go @@ -0,0 +1,292 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "bytes" + "crypto" + "encoding/json" + "fmt" + "hash" + "io" + "os" + + "golang.org/x/mod/sumdb/dirhash" +) + +var ( + hashNames = map[DigestValue]string{ + { + Hash: crypto.SHA256, + GitOID: false, + DirHash: false, + }: "sha256", + { + Hash: crypto.SHA1, + GitOID: false, + DirHash: false, + }: "sha1", + { + Hash: crypto.SHA256, + GitOID: true, + DirHash: false, + }: "gitoid:sha256", + { + Hash: crypto.SHA1, + GitOID: true, + DirHash: false, + }: "gitoid:sha1", + { + Hash: crypto.SHA256, + GitOID: false, + DirHash: true, + }: "dirHash", + } + + hashesByName = map[string]DigestValue{ + "sha256": { + crypto.SHA256, + false, + false, + }, + "sha1": { + crypto.SHA1, + false, + false, + }, + "gitoid:sha256": { + crypto.SHA256, + true, + false, + }, + "gitoid:sha1": { + crypto.SHA1, + true, + false, + }, + "dirHash": { + crypto.SHA256, + false, + true, + }, + } +) + +type ErrUnsupportedHash string + +func (e ErrUnsupportedHash) Error() string { + return fmt.Sprintf("unsupported hash function: %v", string(e)) +} + +type DigestValue struct { + crypto.Hash + GitOID bool + DirHash bool +} + +func (dv DigestValue) New() hash.Hash { + if dv.GitOID { + return &gitoidHasher{hash: dv.Hash, buf: &bytes.Buffer{}} + } + + return dv.Hash.New() +} + +type DigestSet map[DigestValue]string + +func HashToString(h crypto.Hash) (string, error) { + if name, ok := hashNames[DigestValue{Hash: h}]; ok { + return name, nil + } + + return "", ErrUnsupportedHash(h.String()) +} + +func HashFromString(name string) (crypto.Hash, error) { + if hash, ok := hashesByName[name]; ok { + return hash.Hash, nil + } + + return crypto.Hash(0), ErrUnsupportedHash(name) +} + +// Equal returns true if every digest for hash functions both artifacts have in common are equal. +// If the two artifacts don't have any digests from common hash functions, equal will return false. +// If any digest from common hash functions differ between the two artifacts, equal will return false. +func (ds *DigestSet) Equal(second DigestSet) bool { + hasMatchingDigest := false + for hash, digest := range *ds { + otherDigest, ok := second[hash] + if !ok { + continue + } + + if digest == otherDigest { + hasMatchingDigest = true + } else { + return false + } + } + + return hasMatchingDigest +} + +func (ds *DigestSet) ToNameMap() (map[string]string, error) { + nameMap := make(map[string]string) + for hash, digest := range *ds { + name, ok := hashNames[hash] + if !ok { + return nameMap, ErrUnsupportedHash(hash.String()) + } + + nameMap[name] = digest + } + + return nameMap, nil +} + +func NewDigestSet(digestsByName map[string]string) (DigestSet, error) { + ds := make(DigestSet) + for hashName, digest := range digestsByName { + hash, ok := hashesByName[hashName] + if !ok { + return ds, ErrUnsupportedHash(hashName) + } + + ds[hash] = digest + } + + return ds, nil +} + +func CalculateDigestSet(r io.Reader, digestValues []DigestValue) (DigestSet, error) { + digestSet := make(DigestSet) + writers := []io.Writer{} + hashfuncs := map[DigestValue]hash.Hash{} + for _, digestValue := range digestValues { + hashfunc := digestValue.New() + hashfuncs[digestValue] = hashfunc + writers = append(writers, hashfunc) + } + + multiwriter := io.MultiWriter(writers...) + if _, err := io.Copy(multiwriter, r); err != nil { + return digestSet, err + } + + for digestValue, hashfunc := range hashfuncs { + // gitoids are somewhat special... we're using a custom implementation of hash.Hash + // to wrap the gitoid library. Sum will return a gitoid URI, so we don't want to hex + // encode it as it's already a string with a hex encoded hash. + if digestValue.GitOID { + digestSet[digestValue] = string(hashfunc.Sum(nil)) + continue + } + + digestSet[digestValue] = string(HexEncode(hashfunc.Sum(nil))) + } + + return digestSet, nil +} + +func CalculateDigestSetFromBytes(data []byte, hashes []DigestValue) (DigestSet, error) { + return CalculateDigestSet(bytes.NewReader(data), hashes) +} + +func CalculateDigestSetFromFile(path string, hashes []DigestValue) (DigestSet, error) { + file, err := os.Open(path) + if err != nil { + return DigestSet{}, err + } + + hashable, err := isHashableFile(file) + if err != nil { + return DigestSet{}, err + } + + if !hashable { + return DigestSet{}, fmt.Errorf("%s is not a hashable file", path) + } + + defer file.Close() + return CalculateDigestSet(file, hashes) +} + +func CalculateDigestSetFromDir(dir string, hashes []DigestValue) (DigestSet, error) { + + dirHash, err := dirhash.HashDir(dir, "", DirhHashSha256) + if err != nil { + return nil, err + } + + digestSetByName := make(map[string]string) + digestSetByName["dirHash"] = dirHash + + return NewDigestSet(digestSetByName) +} + +func (ds DigestSet) MarshalJSON() ([]byte, error) { + nameMap, err := ds.ToNameMap() + if err != nil { + return nil, err + } + + return json.Marshal(nameMap) +} + +func (ds *DigestSet) UnmarshalJSON(data []byte) error { + nameMap := make(map[string]string) + err := json.Unmarshal(data, &nameMap) + if err != nil { + return err + } + + newDs, err := NewDigestSet(nameMap) + if err != nil { + return err + } + + *ds = newDs + return nil +} + +func isHashableFile(f *os.File) (bool, error) { + stat, err := f.Stat() + if err != nil { + return false, err + } + + mode := stat.Mode() + + isSpecial := stat.Mode()&os.ModeCharDevice != 0 + + if isSpecial { + return false, nil + } + + if mode.IsRegular() { + return true, nil + } + + if mode.Perm().IsDir() { + return true, nil + } + + if mode&os.ModeSymlink == 1 { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/dirhash.go b/vendor/github.com/in-toto/go-witness/cryptoutil/dirhash.go new file mode 100644 index 0000000000..044a2b1519 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/dirhash.go @@ -0,0 +1,61 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "sort" + "strings" +) + +// DirHashSha256 is the "h1:" directory hash function, using SHA-256. +// +// DirHashSha256 returns a SHA-256 hash of a summary +// prepared as if by the Unix command: +// +// sha256sum $(find . -type f | sort) | sha256sum +// +// More precisely, the hashed summary contains a single line for each file in the list, +// ordered by sort.Strings applied to the file names, where each line consists of +// the hexadecimal SHA-256 hash of the file content, +// two spaces (U+0020), the file name, and a newline (U+000A). +// +// File names with newlines (U+000A) are disallowed. +func DirhHashSha256(files []string, open func(string) (io.ReadCloser, error)) (string, error) { + h := sha256.New() + files = append([]string(nil), files...) + sort.Strings(files) + for _, file := range files { + if strings.Contains(file, "\n") { + return "", errors.New("dirhash: filenames with newlines are not supported") + } + r, err := open(file) + if err != nil { + return "", err + } + hf := sha256.New() + _, err = io.Copy(hf, r) + r.Close() + if err != nil { + return "", err + } + fmt.Fprintf(h, "%x %s\n", hf.Sum(nil), file) + } + return hex.EncodeToString(h.Sum(nil)), nil +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/ecdsa.go b/vendor/github.com/in-toto/go-witness/cryptoutil/ecdsa.go new file mode 100644 index 0000000000..172ad97831 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/ecdsa.go @@ -0,0 +1,85 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "io" +) + +type ErrVerifyFailed struct{} + +func (e ErrVerifyFailed) Error() string { + return "verification failed" +} + +type ECDSASigner struct { + priv *ecdsa.PrivateKey + hash crypto.Hash +} + +func NewECDSASigner(priv *ecdsa.PrivateKey, hash crypto.Hash) *ECDSASigner { + return &ECDSASigner{priv, hash} +} + +func (s *ECDSASigner) KeyID() (string, error) { + return GeneratePublicKeyID(&s.priv.PublicKey, s.hash) +} + +func (s *ECDSASigner) Sign(r io.Reader) ([]byte, error) { + digest, err := Digest(r, s.hash) + if err != nil { + return nil, err + } + + return ecdsa.SignASN1(rand.Reader, s.priv, digest) +} + +func (s *ECDSASigner) Verifier() (Verifier, error) { + return NewECDSAVerifier(&s.priv.PublicKey, s.hash), nil +} + +type ECDSAVerifier struct { + pub *ecdsa.PublicKey + hash crypto.Hash +} + +func NewECDSAVerifier(pub *ecdsa.PublicKey, hash crypto.Hash) *ECDSAVerifier { + return &ECDSAVerifier{pub, hash} +} + +func (v *ECDSAVerifier) KeyID() (string, error) { + return GeneratePublicKeyID(v.pub, v.hash) +} + +func (v *ECDSAVerifier) Verify(data io.Reader, sig []byte) error { + digest, err := Digest(data, v.hash) + if err != nil { + return err + } + + verified := ecdsa.VerifyASN1(v.pub, digest, sig) + if !verified { + return ErrVerifyFailed{} + } + + return nil +} + +func (v *ECDSAVerifier) Bytes() ([]byte, error) { + return PublicPemBytes(v.pub) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/ed25519.go b/vendor/github.com/in-toto/go-witness/cryptoutil/ed25519.go new file mode 100644 index 0000000000..35f3741300 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/ed25519.go @@ -0,0 +1,83 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/ed25519" + "fmt" + "io" +) + +type ED25519Signer struct { + priv ed25519.PrivateKey +} + +func NewED25519Signer(priv ed25519.PrivateKey) *ED25519Signer { + return &ED25519Signer{priv} +} + +func (s *ED25519Signer) KeyID() (string, error) { + return GeneratePublicKeyID(s.priv.Public(), crypto.SHA256) +} + +func (s *ED25519Signer) Sign(r io.Reader) ([]byte, error) { + msg, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + return ed25519.Sign(s.priv, msg), nil +} + +func (s *ED25519Signer) Verifier() (Verifier, error) { + pubKey := s.priv.Public() + edPubKey, ok := pubKey.(ed25519.PublicKey) + if !ok { + return nil, ErrUnsupportedKeyType{t: fmt.Sprintf("%T", edPubKey)} + } + + return NewED25519Verifier(edPubKey), nil +} + +type ED25519Verifier struct { + pub ed25519.PublicKey +} + +func NewED25519Verifier(pub ed25519.PublicKey) *ED25519Verifier { + return &ED25519Verifier{pub} +} + +func (v *ED25519Verifier) KeyID() (string, error) { + return GeneratePublicKeyID(v.pub, crypto.SHA256) +} + +func (v *ED25519Verifier) Verify(r io.Reader, sig []byte) error { + msg, err := io.ReadAll(r) + if err != nil { + return err + } + + verified := ed25519.Verify(v.pub, msg, sig) + if !verified { + return ErrVerifyFailed{} + } + + return nil +} + +func (v *ED25519Verifier) Bytes() ([]byte, error) { + return PublicPemBytes(v.pub) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/gitoid.go b/vendor/github.com/in-toto/go-witness/cryptoutil/gitoid.go new file mode 100644 index 0000000000..f3fe365ca1 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/gitoid.go @@ -0,0 +1,85 @@ +// Copyright 2023 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "bytes" + "crypto" + "encoding/hex" + "fmt" + + "github.com/edwarnicke/gitoid" +) + +// gitoidHasher implements io.Writer so we can generate gitoids with our CalculateDigestSet function. +// CalculateDigestSet takes in an io.Reader pointing to some data we want to hash, and writes it to a +// MultiWriter that forwards it to writers for each hash we wish to calculate. +// This is a bit hacky -- it maintains an internal buffer and then when asked for the Sum, it calculates +// the gitoid. We may be able to contribute to the gitoid library to make this smoother +type gitoidHasher struct { + buf *bytes.Buffer + hash crypto.Hash +} + +// Write implments the io.Writer interface, and writes to the internal buffer +func (gh *gitoidHasher) Write(p []byte) (n int, err error) { + return gh.buf.Write(p) +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (gh *gitoidHasher) Sum(b []byte) []byte { + opts := []gitoid.Option{} + if gh.hash == crypto.SHA256 { + opts = append(opts, gitoid.WithSha256()) + } + + g, err := gitoid.New(gh.buf, opts...) + if err != nil { + return []byte{} + } + + return append(b, []byte(g.URI())...) +} + +// Reset resets the Hash to its initial state. +func (gh *gitoidHasher) Reset() { + gh.buf = &bytes.Buffer{} +} + +// Size returns the number of bytes Sum will return. +func (gh *gitoidHasher) Size() int { + hashName, err := HashToString(gh.hash) + if err != nil { + return 0 + } + + // this is somewhat fragile and knows too much about the internals of the gitoid code... + // we're assuming that the default gitoid content type will remain BLOB, and that our + // string representations of hash functions will remain consistent with their... + // and that the URI format will remain consistent. + // this should probably be changed, and this entire thing could maybe be upstreamed to the + // gitoid library. + return len(fmt.Sprintf("gitoid:%s:%s:", gitoid.BLOB, hashName)) + hex.EncodedLen(gh.hash.Size()) +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (gh *gitoidHasher) BlockSize() int { + hf := gh.hash.New() + return hf.BlockSize() +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/rsa.go b/vendor/github.com/in-toto/go-witness/cryptoutil/rsa.go new file mode 100644 index 0000000000..a3d617fcd4 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/rsa.go @@ -0,0 +1,89 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "io" +) + +type RSASigner struct { + priv *rsa.PrivateKey + hash crypto.Hash +} + +func NewRSASigner(priv *rsa.PrivateKey, hash crypto.Hash) *RSASigner { + return &RSASigner{priv, hash} +} + +func (s *RSASigner) KeyID() (string, error) { + return GeneratePublicKeyID(&s.priv.PublicKey, s.hash) +} + +func (s *RSASigner) Sign(r io.Reader) ([]byte, error) { + digest, err := Digest(r, s.hash) + if err != nil { + return nil, err + } + + opts := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: s.hash, + } + + return rsa.SignPSS(rand.Reader, s.priv, s.hash, digest, opts) +} + +func (s *RSASigner) Verifier() (Verifier, error) { + return NewRSAVerifier(&s.priv.PublicKey, s.hash), nil +} + +type RSAVerifier struct { + pub *rsa.PublicKey + hash crypto.Hash +} + +func NewRSAVerifier(pub *rsa.PublicKey, hash crypto.Hash) *RSAVerifier { + return &RSAVerifier{pub, hash} +} + +func (v *RSAVerifier) KeyID() (string, error) { + return GeneratePublicKeyID(v.pub, v.hash) +} + +func (v *RSAVerifier) Verify(data io.Reader, sig []byte) error { + digest, err := Digest(data, v.hash) + if err != nil { + return err + } + + pssOpts := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: v.hash, + } + + // AWS KMS introduces the chance that attestations get signed by PKCS1v15 instead of PSS + if err := rsa.VerifyPSS(v.pub, v.hash, digest, sig, pssOpts); err != nil { + return rsa.VerifyPKCS1v15(v.pub, v.hash, digest, sig) + } + + return nil +} + +func (v *RSAVerifier) Bytes() ([]byte, error) { + return PublicPemBytes(v.pub) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/signer.go b/vendor/github.com/in-toto/go-witness/cryptoutil/signer.go new file mode 100644 index 0000000000..7c9dfbcaa2 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/signer.go @@ -0,0 +1,121 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "fmt" + "io" +) + +type ErrUnsupportedKeyType struct { + t string +} + +func (e ErrUnsupportedKeyType) Error() string { + return fmt.Sprintf("unsupported signer key type: %v", e.t) +} + +type Signer interface { + KeyIdentifier + Sign(r io.Reader) ([]byte, error) + Verifier() (Verifier, error) +} + +type KeyIdentifier interface { + KeyID() (string, error) +} + +type TrustBundler interface { + Certificate() *x509.Certificate + Intermediates() []*x509.Certificate + Roots() []*x509.Certificate +} + +type SignerOption func(*signerOptions) + +type signerOptions struct { + cert *x509.Certificate + intermediates []*x509.Certificate + roots []*x509.Certificate + hash crypto.Hash +} + +func SignWithCertificate(cert *x509.Certificate) SignerOption { + return func(so *signerOptions) { + so.cert = cert + } +} + +func SignWithIntermediates(intermediates []*x509.Certificate) SignerOption { + return func(so *signerOptions) { + so.intermediates = intermediates + } +} + +func SignWithRoots(roots []*x509.Certificate) SignerOption { + return func(so *signerOptions) { + so.roots = roots + } +} + +func SignWithHash(h crypto.Hash) SignerOption { + return func(so *signerOptions) { + so.hash = h + } +} + +func NewSigner(priv interface{}, opts ...SignerOption) (Signer, error) { + options := &signerOptions{ + hash: crypto.SHA256, + } + + for _, opt := range opts { + opt(options) + } + + var signer Signer + switch key := priv.(type) { + case *rsa.PrivateKey: + signer = NewRSASigner(key, options.hash) + case *ecdsa.PrivateKey: + signer = NewECDSASigner(key, options.hash) + case ed25519.PrivateKey: + signer = NewED25519Signer(key) + default: + return nil, ErrUnsupportedKeyType{ + t: fmt.Sprintf("%T", priv), + } + } + + if options.cert != nil { + return NewX509Signer(signer, options.cert, options.intermediates, options.roots) + } + + return signer, nil +} + +func NewSignerFromReader(r io.Reader, opts ...SignerOption) (Signer, error) { + key, err := TryParseKeyFromReader(r) + if err != nil { + return nil, err + } + + return NewSigner(key, opts...) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/util.go b/vendor/github.com/in-toto/go-witness/cryptoutil/util.go new file mode 100644 index 0000000000..2b96280be9 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/util.go @@ -0,0 +1,201 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PEMType is a specific type for string constants used during PEM encoding and decoding +type PEMType string + +const ( + // PublicKeyPEMType is the string "PUBLIC KEY" to be used during PEM encoding and decoding + PublicKeyPEMType PEMType = "PUBLIC KEY" + // PKCS1PublicKeyPEMType is the string "RSA PUBLIC KEY" used to parse PKCS#1-encoded public keys + PKCS1PublicKeyPEMType PEMType = "RSA PUBLIC KEY" +) + +type ErrUnsupportedPEM struct { + t string +} + +func (e ErrUnsupportedPEM) Error() string { + return fmt.Sprintf("unsupported pem type: %v", e.t) +} + +type ErrInvalidPemBlock struct{} + +func (e ErrInvalidPemBlock) Error() string { + return "invalid pem block" +} + +func DigestBytes(data []byte, hash crypto.Hash) ([]byte, error) { + return Digest(bytes.NewReader(data), hash) +} + +func Digest(r io.Reader, hash crypto.Hash) ([]byte, error) { + hashFunc := hash.New() + if _, err := io.Copy(hashFunc, r); err != nil { + return nil, err + } + + return hashFunc.Sum(nil), nil +} + +func HexEncode(src []byte) []byte { + dst := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(dst, src) + return dst +} + +func GeneratePublicKeyID(pub interface{}, hash crypto.Hash) (string, error) { + pemBytes, err := PublicPemBytes(pub) + if err != nil { + return "", err + } + + digest, err := DigestBytes(pemBytes, hash) + if err != nil { + return "", err + } + + return string(HexEncode(digest)), nil +} + +func PublicPemBytes(pub interface{}) ([]byte, error) { + keyBytes, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + return nil, err + } + + pemBytes := pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: keyBytes}) + if err != nil { + return nil, err + } + + return pemBytes, err +} + +// UnmarshalPEMToPublicKey converts a PEM-encoded byte slice into a crypto.PublicKey +func UnmarshalPEMToPublicKey(pemBytes []byte) (crypto.PublicKey, error) { + derBytes, _ := pem.Decode(pemBytes) + if derBytes == nil { + return nil, errors.New("PEM decoding failed") + } + switch derBytes.Type { + case string(PublicKeyPEMType): + return x509.ParsePKIXPublicKey(derBytes.Bytes) + case string(PKCS1PublicKeyPEMType): + return x509.ParsePKCS1PublicKey(derBytes.Bytes) + default: + return nil, fmt.Errorf("unknown Public key PEM file type: %v. Are you passing the correct public key?", + derBytes.Type) + } +} + +func TryParsePEMBlock(block *pem.Block) (interface{}, error) { + if block == nil { + return nil, ErrInvalidPemBlock{} + } + + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParseECPrivateKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParsePKIXPublicKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParsePKCS1PublicKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParseCertificate(block.Bytes) + if err == nil { + return key, err + } + + return nil, ErrUnsupportedPEM{block.Type} +} + +func TryParseKeyFromReader(r io.Reader) (interface{}, error) { + bytes, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + // we may want to handle files with multiple pem blocks in them, but for now... + pemBlock, _ := pem.Decode(bytes) + return TryParsePEMBlock(pemBlock) +} + +func TryParseCertificate(data []byte) (*x509.Certificate, error) { + possibleCert, err := TryParseKeyFromReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + + cert, ok := possibleCert.(*x509.Certificate) + if !ok { + return nil, fmt.Errorf("data was a valid verifier but not a certificate") + } + + return cert, nil +} + +// ComputeDigest calculates the digest value for the specified message using the supplied hash function +func ComputeDigest(rawMessage io.Reader, hashFunc crypto.Hash, supportedHashFuncs []crypto.Hash) ([]byte, crypto.Hash, error) { + var cryptoSignerOpts crypto.SignerOpts = hashFunc + hashedWith := cryptoSignerOpts.HashFunc() + if !isSupportedAlg(hashedWith, supportedHashFuncs) { + return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs) + } + + digest, err := Digest(rawMessage, hashedWith) + return digest, hashedWith, err +} + +func isSupportedAlg(alg crypto.Hash, supportedAlgs []crypto.Hash) bool { + if supportedAlgs == nil { + return true + } + for _, supportedAlg := range supportedAlgs { + if alg == supportedAlg { + return true + } + } + return false +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/verifier.go b/vendor/github.com/in-toto/go-witness/cryptoutil/verifier.go new file mode 100644 index 0000000000..b243e286ec --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/verifier.go @@ -0,0 +1,99 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "fmt" + "io" + "time" +) + +type Verifier interface { + KeyIdentifier + Verify(body io.Reader, sig []byte) error + Bytes() ([]byte, error) +} + +type VerifierOption func(*verifierOptions) + +type verifierOptions struct { + roots []*x509.Certificate + intermediates []*x509.Certificate + hash crypto.Hash + trustedTime time.Time +} + +func VerifyWithRoots(roots []*x509.Certificate) VerifierOption { + return func(vo *verifierOptions) { + vo.roots = roots + } +} + +func VerifyWithIntermediates(intermediates []*x509.Certificate) VerifierOption { + return func(vo *verifierOptions) { + vo.intermediates = intermediates + } +} + +func VerifyWithHash(h crypto.Hash) VerifierOption { + return func(vo *verifierOptions) { + vo.hash = h + } +} + +func VerifyWithTrustedTime(t time.Time) VerifierOption { + return func(vo *verifierOptions) { + vo.trustedTime = t + } +} + +func NewVerifier(pub interface{}, opts ...VerifierOption) (Verifier, error) { + options := &verifierOptions{ + hash: crypto.SHA256, + } + + for _, opt := range opts { + opt(options) + } + + switch key := pub.(type) { + case *rsa.PublicKey: + return NewRSAVerifier(key, options.hash), nil + case *ecdsa.PublicKey: + return NewECDSAVerifier(key, options.hash), nil + case ed25519.PublicKey: + return NewED25519Verifier(key), nil + case *x509.Certificate: + return NewX509Verifier(key, options.intermediates, options.roots, options.trustedTime) + default: + return nil, ErrUnsupportedKeyType{ + t: fmt.Sprintf("%T", pub), + } + } +} + +func NewVerifierFromReader(r io.Reader, opts ...VerifierOption) (Verifier, error) { + key, err := TryParseKeyFromReader(r) + if err != nil { + return nil, err + } + + return NewVerifier(key, opts...) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/x509.go b/vendor/github.com/in-toto/go-witness/cryptoutil/x509.go new file mode 100644 index 0000000000..4bf2217c59 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/x509.go @@ -0,0 +1,173 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto/x509" + "encoding/pem" + "io" + "time" +) + +type X509Verifier struct { + cert *x509.Certificate + roots []*x509.Certificate + intermediates []*x509.Certificate + verifier Verifier + trustedTime time.Time +} + +func NewX509Verifier(cert *x509.Certificate, intermediates, roots []*x509.Certificate, trustedTime time.Time) (*X509Verifier, error) { + verifier, err := NewVerifier(cert.PublicKey) + if err != nil { + return nil, err + } + + return &X509Verifier{ + cert: cert, + roots: roots, + intermediates: intermediates, + verifier: verifier, + trustedTime: trustedTime, + }, nil +} + +func (v *X509Verifier) KeyID() (string, error) { + return v.verifier.KeyID() +} + +func (v *X509Verifier) Verify(body io.Reader, sig []byte) error { + rootPool := certificatesToPool(v.roots) + intermediatePool := certificatesToPool(v.intermediates) + if _, err := v.cert.Verify(x509.VerifyOptions{ + CurrentTime: v.trustedTime, + Roots: rootPool, + Intermediates: intermediatePool, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + }); err != nil { + return err + } + + return v.verifier.Verify(body, sig) +} + +func (v *X509Verifier) BelongsToRoot(root *x509.Certificate) error { + rootPool := certificatesToPool([]*x509.Certificate{root}) + intermediatePool := certificatesToPool(v.intermediates) + _, err := v.cert.Verify(x509.VerifyOptions{ + Roots: rootPool, + Intermediates: intermediatePool, + CurrentTime: v.trustedTime, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + }) + + return err +} + +func (v *X509Verifier) Bytes() ([]byte, error) { + pemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: v.cert.Raw}) + return pemBytes, nil +} + +func (v *X509Verifier) Certificate() *x509.Certificate { + return v.cert +} + +func (v *X509Verifier) Intermediates() []*x509.Certificate { + return v.intermediates +} + +func (v *X509Verifier) Roots() []*x509.Certificate { + return v.roots +} + +type X509Signer struct { + cert *x509.Certificate + roots []*x509.Certificate + intermediates []*x509.Certificate + signer Signer +} + +type ErrInvalidSigner struct{} + +func (e ErrInvalidSigner) Error() string { + return "signer must not be nil" +} + +type ErrInvalidCertificate struct{} + +func (e ErrInvalidCertificate) Error() string { + return "certificate must not be nil" +} + +func NewX509Signer(signer Signer, cert *x509.Certificate, intermediates, roots []*x509.Certificate) (*X509Signer, error) { + if signer == nil { + return nil, ErrInvalidSigner{} + } + + if cert == nil { + return nil, ErrInvalidCertificate{} + } + + return &X509Signer{ + signer: signer, + cert: cert, + roots: roots, + intermediates: intermediates, + }, nil +} + +func (s *X509Signer) KeyID() (string, error) { + return s.signer.KeyID() +} + +func (s *X509Signer) Sign(r io.Reader) ([]byte, error) { + return s.signer.Sign(r) +} + +func (s *X509Signer) Verifier() (Verifier, error) { + verifier, err := s.signer.Verifier() + if err != nil { + return nil, err + } + + return &X509Verifier{ + verifier: verifier, + cert: s.cert, + roots: s.roots, + intermediates: s.intermediates, + }, nil +} + +func (s *X509Signer) Certificate() *x509.Certificate { + return s.cert +} + +func (s *X509Signer) Intermediates() []*x509.Certificate { + return s.intermediates +} + +func (s *X509Signer) Roots() []*x509.Certificate { + return s.roots +} + +func certificatesToPool(certs []*x509.Certificate) *x509.CertPool { + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + + return pool +} diff --git a/vendor/github.com/in-toto/go-witness/dsse/dsse.go b/vendor/github.com/in-toto/go-witness/dsse/dsse.go new file mode 100644 index 0000000000..81d13e2249 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/dsse/dsse.go @@ -0,0 +1,96 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "fmt" + + "github.com/in-toto/go-witness/log" +) + +type ErrNoSignatures struct{} + +func (e ErrNoSignatures) Error() string { + return "no signatures in dsse envelope" +} + +type ErrNoMatchingSigs struct { + Verifiers []CheckedVerifier +} + +func (e ErrNoMatchingSigs) Error() string { + mess := "no valid signatures for the provided verifiers found for keyids:\n" + for _, v := range e.Verifiers { + if v.Error != nil { + kid, err := v.Verifier.KeyID() + if err != nil { + log.Warnf("failed to get key id from verifier: %w", err) + } + + s := fmt.Sprintf(" %s: %v\n", kid, v.Error) + mess += s + } + } + + return mess +} + +type ErrThresholdNotMet struct { + Theshold int + Actual int +} + +func (e ErrThresholdNotMet) Error() string { + return fmt.Sprintf("envelope did not meet verifier threshold. expected %v valid verifiers but got %v", e.Theshold, e.Actual) +} + +type ErrInvalidThreshold int + +func (e ErrInvalidThreshold) Error() string { + return fmt.Sprintf("invalid threshold (%v). thresholds must be greater than 0", int(e)) +} + +const PemTypeCertificate = "CERTIFICATE" + +type Envelope struct { + Payload []byte `json:"payload"` + PayloadType string `json:"payloadType"` + Signatures []Signature `json:"signatures"` +} + +type Signature struct { + KeyID string `json:"keyid"` + Signature []byte `json:"sig"` + Certificate []byte `json:"certificate,omitempty"` + Intermediates [][]byte `json:"intermediates,omitempty"` + Timestamps []SignatureTimestamp `json:"timestamps,omitempty"` +} + +type SignatureTimestampType string + +const TimestampRFC3161 SignatureTimestampType = "tsp" + +type SignatureTimestamp struct { + Type SignatureTimestampType `json:"type"` + Data []byte `json:"data"` +} + +// preauthEncode wraps the data to be signed or verified and it's type in the DSSE protocol's +// pre-authentication encoding as detailed at https://github.com/secure-systems-lab/dsse/blob/master/protocol.md +// PAE(type, body) = "DSSEv1" + SP + LEN(type) + SP + type + SP + LEN(body) + SP + body +func preauthEncode(bodyType string, body []byte) []byte { + const dsseVersion = "DSSEv1" + return []byte(fmt.Sprintf("%s %d %s %d %s", dsseVersion, len(bodyType), bodyType, len(body), body)) +} diff --git a/vendor/github.com/in-toto/go-witness/dsse/sign.go b/vendor/github.com/in-toto/go-witness/dsse/sign.go new file mode 100644 index 0000000000..267ec079b3 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/dsse/sign.go @@ -0,0 +1,115 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "bytes" + "context" + "encoding/pem" + "fmt" + "io" + + "github.com/in-toto/go-witness/cryptoutil" + "github.com/in-toto/go-witness/timestamp" +) + +type signOptions struct { + signers []cryptoutil.Signer + timestampers []timestamp.Timestamper +} + +type SignOption func(*signOptions) + +func SignWithSigners(signers ...cryptoutil.Signer) SignOption { + return func(so *signOptions) { + so.signers = signers + } +} + +func SignWithTimestampers(timestampers ...timestamp.Timestamper) SignOption { + return func(so *signOptions) { + so.timestampers = timestampers + } +} + +func Sign(bodyType string, body io.Reader, opts ...SignOption) (Envelope, error) { + so := &signOptions{} + env := Envelope{} + for _, opt := range opts { + opt(so) + } + + if len(so.signers) == 0 { + return env, fmt.Errorf("must have at least one signer, have %v", len(so.signers)) + } + + bodyBytes, err := io.ReadAll(body) + if err != nil { + return env, err + } + + env.PayloadType = bodyType + env.Payload = bodyBytes + env.Signatures = make([]Signature, 0) + pae := preauthEncode(bodyType, bodyBytes) + for _, signer := range so.signers { + if signer == nil { + continue + } + + sig, err := signer.Sign(bytes.NewReader(pae)) + if err != nil { + return env, err + } + + keyID, err := signer.KeyID() + if err != nil { + return env, err + } + + dsseSig := Signature{ + KeyID: keyID, + Signature: sig, + } + + for _, timestamper := range so.timestampers { + timestamp, err := timestamper.Timestamp(context.TODO(), bytes.NewReader(sig)) + if err != nil { + return env, err + } + + dsseSig.Timestamps = append(dsseSig.Timestamps, SignatureTimestamp{ + Type: TimestampRFC3161, + Data: timestamp, + }) + } + + if trustBundler, ok := signer.(cryptoutil.TrustBundler); ok { + leaf := trustBundler.Certificate() + intermediates := trustBundler.Intermediates() + if leaf != nil { + dsseSig.Certificate = pem.EncodeToMemory(&pem.Block{Type: PemTypeCertificate, Bytes: leaf.Raw}) + } + + for _, intermediate := range intermediates { + dsseSig.Intermediates = append(dsseSig.Intermediates, pem.EncodeToMemory(&pem.Block{Type: PemTypeCertificate, Bytes: intermediate.Raw})) + } + } + + env.Signatures = append(env.Signatures, dsseSig) + } + + return env, nil +} diff --git a/vendor/github.com/in-toto/go-witness/dsse/verify.go b/vendor/github.com/in-toto/go-witness/dsse/verify.go new file mode 100644 index 0000000000..9c94a7446c --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/dsse/verify.go @@ -0,0 +1,201 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "time" + + "github.com/in-toto/go-witness/cryptoutil" + "github.com/in-toto/go-witness/log" + "github.com/in-toto/go-witness/timestamp" +) + +type verificationOptions struct { + roots []*x509.Certificate + intermediates []*x509.Certificate + verifiers []cryptoutil.Verifier + threshold int + timestampVerifiers []timestamp.TimestampVerifier +} + +type VerificationOption func(*verificationOptions) + +func VerifyWithRoots(roots ...*x509.Certificate) VerificationOption { + return func(vo *verificationOptions) { + vo.roots = roots + } +} + +func VerifyWithIntermediates(intermediates ...*x509.Certificate) VerificationOption { + return func(vo *verificationOptions) { + vo.intermediates = intermediates + } +} + +func VerifyWithVerifiers(verifiers ...cryptoutil.Verifier) VerificationOption { + return func(vo *verificationOptions) { + vo.verifiers = verifiers + } +} + +func VerifyWithThreshold(threshold int) VerificationOption { + return func(vo *verificationOptions) { + vo.threshold = threshold + } +} + +func VerifyWithTimestampVerifiers(verifiers ...timestamp.TimestampVerifier) VerificationOption { + return func(vo *verificationOptions) { + vo.timestampVerifiers = verifiers + } +} + +type CheckedVerifier struct { + Verifier cryptoutil.Verifier + TimestampVerifiers []timestamp.TimestampVerifier + Error error +} + +func (e Envelope) Verify(opts ...VerificationOption) ([]CheckedVerifier, error) { + options := &verificationOptions{ + threshold: 1, + } + + for _, opt := range opts { + opt(options) + } + + if options.threshold <= 0 { + return nil, ErrInvalidThreshold(options.threshold) + } + + pae := preauthEncode(e.PayloadType, e.Payload) + if len(e.Signatures) == 0 { + return nil, ErrNoSignatures{} + } + + checkedVerifiers := make([]CheckedVerifier, 0) + verified := 0 + for _, sig := range e.Signatures { + if len(sig.Certificate) > 0 { + cert, err := cryptoutil.TryParseCertificate(sig.Certificate) + if err != nil { + continue + } + + sigIntermediates := make([]*x509.Certificate, 0) + for _, int := range sig.Intermediates { + intCert, err := cryptoutil.TryParseCertificate(int) + if err != nil { + continue + } + + sigIntermediates = append(sigIntermediates, intCert) + } + + sigIntermediates = append(sigIntermediates, options.intermediates...) + if len(options.timestampVerifiers) == 0 { + if verifier, err := verifyX509Time(cert, sigIntermediates, options.roots, pae, sig.Signature, time.Now()); err == nil { + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{Verifier: verifier}) + verified += 1 + } else { + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{Verifier: verifier, Error: err}) + log.Debugf("failed to verify with timestamp verifier: %w", err) + } + } else { + var passedVerifier cryptoutil.Verifier + failed := []cryptoutil.Verifier{} + passedTimestampVerifiers := []timestamp.TimestampVerifier{} + failedTimestampVerifiers := []timestamp.TimestampVerifier{} + + for _, timestampVerifier := range options.timestampVerifiers { + for _, sigTimestamp := range sig.Timestamps { + timestamp, err := timestampVerifier.Verify(context.TODO(), bytes.NewReader(sigTimestamp.Data), bytes.NewReader(sig.Signature)) + if err != nil { + continue + } + + if verifier, err := verifyX509Time(cert, sigIntermediates, options.roots, pae, sig.Signature, timestamp); err == nil { + // NOTE: do we not want to save all the passed verifiers? + passedVerifier = verifier + passedTimestampVerifiers = append(passedTimestampVerifiers, timestampVerifier) + } else { + failed = append(failed, verifier) + failedTimestampVerifiers = append(failedTimestampVerifiers, timestampVerifier) + log.Debugf("failed to verify with timestamp verifier: %w", err) + } + + } + } + + if len(passedTimestampVerifiers) > 0 { + verified += 1 + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{ + Verifier: passedVerifier, + TimestampVerifiers: passedTimestampVerifiers, + }) + } else { + for _, v := range failed { + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{ + Verifier: v, + TimestampVerifiers: failedTimestampVerifiers, + Error: fmt.Errorf("no valid timestamps found"), + }) + } + } + } + } + + for _, verifier := range options.verifiers { + if verifier != nil { + kid, err := verifier.KeyID() + if err != nil { + log.Warn("failed to get key id from verifier: %v", err) + } + log.Debug("verifying with verifier with KeyID ", kid) + + if err := verifier.Verify(bytes.NewReader(pae), sig.Signature); err == nil { + verified += 1 + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{Verifier: verifier}) + } else { + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{Verifier: verifier, Error: err}) + } + } + } + } + + if verified == 0 { + return nil, ErrNoMatchingSigs{Verifiers: checkedVerifiers} + } else if verified < options.threshold { + return checkedVerifiers, ErrThresholdNotMet{Theshold: options.threshold, Actual: verified} + } + + return checkedVerifiers, nil +} + +func verifyX509Time(cert *x509.Certificate, sigIntermediates, roots []*x509.Certificate, pae, sig []byte, trustedTime time.Time) (cryptoutil.Verifier, error) { + verifier, err := cryptoutil.NewX509Verifier(cert, sigIntermediates, roots, trustedTime) + if err != nil { + return nil, err + } + + err = verifier.Verify(bytes.NewReader(pae), sig) + + return verifier, err +} diff --git a/vendor/github.com/in-toto/go-witness/log/log.go b/vendor/github.com/in-toto/go-witness/log/log.go new file mode 100644 index 0000000000..31396dc17a --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/log/log.go @@ -0,0 +1,94 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "fmt" +) + +var log Logger = SilentLogger{} + +// Logger is used by witness library code to print out relevant information at runtime. +type Logger interface { + Errorf(format string, args ...interface{}) + Error(args ...interface{}) + Warnf(format string, args ...interface{}) + Warn(args ...interface{}) + Debugf(format string, args ...interface{}) + Debug(args ...interface{}) + Infof(format string, args ...interface{}) + Info(args ...interface{}) +} + +// SetLogger will set the Logger instance that all Witness library code will use as logging output. +// The default is a SilentLogger that will output nothing. +func SetLogger(l Logger) { + log = l +} + +// GetLogger returns the Logger instance currently being used by Witness library code. +func GetLogger() Logger { + return log +} + +func Errorf(format string, args ...interface{}) { + err := fmt.Errorf(format, args...) + log.Error(err) +} + +func Error(args ...interface{}) { + log.Error(args...) +} + +func Warnf(format string, args ...interface{}) { + // We want to wrap the error if there is one. + for _, a := range args { + if _, ok := a.(error); ok { + err := fmt.Errorf(format, args...) + log.Warn(err) + return + } + } + + log.Warnf(format, args...) +} + +func Warn(args ...interface{}) { + log.Warn(args...) +} + +func Debugf(format string, args ...interface{}) { + for _, a := range args { + if _, ok := a.(error); ok { + err := fmt.Errorf(format, args...) + log.Debug(err) + return + } + } + + log.Debugf(format, args...) +} + +func Debug(args ...interface{}) { + log.Debug(args...) +} + +func Infof(format string, args ...interface{}) { + log.Infof(format, args...) +} + +func Info(args ...interface{}) { + log.Info(args...) +} diff --git a/vendor/github.com/in-toto/go-witness/log/silent.go b/vendor/github.com/in-toto/go-witness/log/silent.go new file mode 100644 index 0000000000..000236c064 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/log/silent.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +// SilentLogger is an implementation of the Logger interface that suppresses +// all logging output. This is the default logger when using Witness as a +// library, so that we don't interfere with the caller's stdout/stderr. Callers +// should supply their own Logger to capture Witness logging if desired. +type SilentLogger struct{} + +func (l SilentLogger) Errorf(format string, args ...interface{}) {} +func (l SilentLogger) Error(args ...interface{}) {} +func (l SilentLogger) Warnf(format string, args ...interface{}) {} +func (l SilentLogger) Warn(args ...interface{}) {} +func (l SilentLogger) Debugf(format string, args ...interface{}) {} +func (l SilentLogger) Debug(args ...interface{}) {} +func (l SilentLogger) Infof(format string, args ...interface{}) {} +func (l SilentLogger) Info(args ...interface{}) {} diff --git a/vendor/github.com/in-toto/go-witness/timestamp/fake.go b/vendor/github.com/in-toto/go-witness/timestamp/fake.go new file mode 100644 index 0000000000..1d50954683 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/timestamp/fake.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import ( + "context" + "fmt" + "io" + "time" +) + +type FakeTimestamper struct { + T time.Time +} + +func (ft FakeTimestamper) Timestamp(context.Context, io.Reader) ([]byte, error) { + return []byte(ft.T.Format(time.RFC3339)), nil +} + +func (ft FakeTimestamper) Verify(ctx context.Context, ts io.Reader, sig io.Reader) (time.Time, error) { + b, err := io.ReadAll(ts) + if err != nil { + return time.Time{}, err + } + + if string(b) != ft.T.Format(time.RFC3339) { + return time.Time{}, fmt.Errorf("mismatched time") + } + + return ft.T, nil +} diff --git a/vendor/github.com/in-toto/go-witness/timestamp/timestamp.go b/vendor/github.com/in-toto/go-witness/timestamp/timestamp.go new file mode 100644 index 0000000000..6408190056 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/timestamp/timestamp.go @@ -0,0 +1,29 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import ( + "context" + "io" + "time" +) + +type TimestampVerifier interface { + Verify(context.Context, io.Reader, io.Reader) (time.Time, error) +} + +type Timestamper interface { + Timestamp(context.Context, io.Reader) ([]byte, error) +} diff --git a/vendor/github.com/in-toto/go-witness/timestamp/tsp.go b/vendor/github.com/in-toto/go-witness/timestamp/tsp.go new file mode 100644 index 0000000000..e8a1e596d4 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/timestamp/tsp.go @@ -0,0 +1,176 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "fmt" + "io" + "net/http" + "time" + + "github.com/digitorus/pkcs7" + "github.com/digitorus/timestamp" + "github.com/in-toto/go-witness/cryptoutil" +) + +type TSPTimestamper struct { + url string + hash crypto.Hash + requestCertificate bool +} + +type TSPTimestamperOption func(*TSPTimestamper) + +func TimestampWithUrl(url string) TSPTimestamperOption { + return func(t *TSPTimestamper) { + t.url = url + } +} + +func TimestampWithHash(h crypto.Hash) TSPTimestamperOption { + return func(t *TSPTimestamper) { + t.hash = h + } +} + +func TimestampWithRequestCertificate(requestCertificate bool) TSPTimestamperOption { + return func(t *TSPTimestamper) { + t.requestCertificate = requestCertificate + } +} + +func NewTimestamper(opts ...TSPTimestamperOption) TSPTimestamper { + t := TSPTimestamper{ + hash: crypto.SHA256, + requestCertificate: true, + } + + for _, opt := range opts { + opt(&t) + } + + return t +} + +func (t TSPTimestamper) Timestamp(ctx context.Context, r io.Reader) ([]byte, error) { + tsq, err := timestamp.CreateRequest(r, ×tamp.RequestOptions{ + Hash: t.hash, + Certificates: t.requestCertificate, + }) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, "POST", t.url, bytes.NewReader(tsq)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", "application/timestamp-query") + client := http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusAccepted: + default: + return nil, fmt.Errorf("request to timestamp authority failed: %v", resp.Status) + } + + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + timestamp, err := timestamp.ParseResponse(bodyBytes) + if err != nil { + return nil, err + } + + return timestamp.RawToken, nil +} + +type TSPVerifier struct { + certChain *x509.CertPool + hash crypto.Hash +} + +type TSPVerifierOption func(*TSPVerifier) + +func VerifyWithCerts(certs []*x509.Certificate) TSPVerifierOption { + return func(t *TSPVerifier) { + t.certChain = x509.NewCertPool() + for _, cert := range certs { + t.certChain.AddCert(cert) + } + } +} + +func VerifyWithHash(h crypto.Hash) TSPVerifierOption { + return func(t *TSPVerifier) { + t.hash = h + } +} + +func NewVerifier(opts ...TSPVerifierOption) TSPVerifier { + v := TSPVerifier{ + hash: crypto.SHA256, + } + + for _, opt := range opts { + opt(&v) + } + + return v +} + +func (v TSPVerifier) Verify(ctx context.Context, tsrData, signedData io.Reader) (time.Time, error) { + tsrBytes, err := io.ReadAll(tsrData) + if err != nil { + return time.Time{}, err + } + + ts, err := timestamp.Parse(tsrBytes) + if err != nil { + return time.Time{}, err + } + + hashedData, err := cryptoutil.Digest(signedData, v.hash) + if err != nil { + return time.Time{}, err + } + + if !bytes.Equal(ts.HashedMessage, hashedData) { + return time.Time{}, fmt.Errorf("signed payload does not match timestamped payload") + } + + p7, err := pkcs7.Parse(tsrBytes) + if err != nil { + return time.Time{}, err + } + + if err := p7.VerifyWithChain(v.certChain); err != nil { + return time.Time{}, err + } + + return ts.Time, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index fef687db0e..56cdc7c21c 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -80,7 +80,6 @@ type handshakeTransport struct { pendingPackets [][]byte // Used when a key exchange is in progress. writePacketsLeft uint32 writeBytesLeft int64 - userAuthComplete bool // whether the user authentication phase is complete // If the read loop wants to schedule a kex, it pings this // channel, and the write loop will send out a kex @@ -553,25 +552,16 @@ func (t *handshakeTransport) sendKexInit() error { return nil } -var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase") - func (t *handshakeTransport) writePacket(p []byte) error { - t.mu.Lock() - defer t.mu.Unlock() - switch p[0] { case msgKexInit: return errors.New("ssh: only handshakeTransport can send kexInit") case msgNewKeys: return errors.New("ssh: only handshakeTransport can send newKeys") - case msgUserAuthBanner: - if t.userAuthComplete { - return errSendBannerPhase - } - case msgUserAuthSuccess: - t.userAuthComplete = true } + t.mu.Lock() + defer t.mu.Unlock() if t.writeError != nil { return t.writeError } diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 1839ddc6a4..5b5ccd96f4 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -59,27 +59,6 @@ type GSSAPIWithMICConfig struct { Server GSSAPIServer } -// SendAuthBanner implements [ServerPreAuthConn]. -func (s *connection) SendAuthBanner(msg string) error { - return s.transport.writePacket(Marshal(&userAuthBannerMsg{ - Message: msg, - })) -} - -func (*connection) unexportedMethodForFutureProofing() {} - -// ServerPreAuthConn is the interface available on an incoming server -// connection before authentication has completed. -type ServerPreAuthConn interface { - unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB - - ConnMetadata - - // SendAuthBanner sends a banner message to the client. - // It returns an error once the authentication phase has ended. - SendAuthBanner(string) error -} - // ServerConfig holds server specific configuration data. type ServerConfig struct { // Config contains configuration shared between client and server. @@ -139,12 +118,6 @@ type ServerConfig struct { // attempts. AuthLogCallback func(conn ConnMetadata, method string, err error) - // PreAuthConnCallback, if non-nil, is called upon receiving a new connection - // before any authentication has started. The provided ServerPreAuthConn - // can be used at any time before authentication is complete, including - // after this callback has returned. - PreAuthConnCallback func(ServerPreAuthConn) - // ServerVersion is the version identification string to announce in // the public handshake. // If empty, a reasonable default is used. @@ -515,10 +488,6 @@ func (b *BannerError) Error() string { } func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - if config.PreAuthConnCallback != nil { - config.PreAuthConnCallback(s) - } - sessionID := s.transport.getSessionID() var cache pubKeyCache var perms *Permissions @@ -526,7 +495,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err authFailures := 0 noneAuthCount := 0 var authErrs []error - var calledBannerCallback bool + var displayedBanner bool partialSuccessReturned := false // Set the initial authentication callbacks from the config. They can be // changed if a PartialSuccessError is returned. @@ -573,10 +542,14 @@ userAuthLoop: s.user = userAuthReq.User - if !calledBannerCallback && config.BannerCallback != nil { - calledBannerCallback = true - if msg := config.BannerCallback(s); msg != "" { - if err := s.SendAuthBanner(msg); err != nil { + if !displayedBanner && config.BannerCallback != nil { + displayedBanner = true + msg := config.BannerCallback(s) + if msg != "" { + bannerMsg := &userAuthBannerMsg{ + Message: msg, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { return nil, err } } @@ -789,7 +762,10 @@ userAuthLoop: var bannerErr *BannerError if errors.As(authErr, &bannerErr) { if bannerErr.Message != "" { - if err := s.SendAuthBanner(bannerErr.Message); err != nil { + bannerMsg := &userAuthBannerMsg{ + Message: bannerErr.Message, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { return nil, err } } diff --git a/vendor/golang.org/x/mod/sumdb/dirhash/hash.go b/vendor/golang.org/x/mod/sumdb/dirhash/hash.go new file mode 100644 index 0000000000..51ec4db873 --- /dev/null +++ b/vendor/golang.org/x/mod/sumdb/dirhash/hash.go @@ -0,0 +1,135 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dirhash defines hashes over directory trees. +// These hashes are recorded in go.sum files and in the Go checksum database, +// to allow verifying that a newly-downloaded module has the expected content. +package dirhash + +import ( + "archive/zip" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" +) + +// DefaultHash is the default hash function used in new go.sum entries. +var DefaultHash Hash = Hash1 + +// A Hash is a directory hash function. +// It accepts a list of files along with a function that opens the content of each file. +// It opens, reads, hashes, and closes each file and returns the overall directory hash. +type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error) + +// Hash1 is the "h1:" directory hash function, using SHA-256. +// +// Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary +// prepared as if by the Unix command: +// +// sha256sum $(find . -type f | sort) | sha256sum +// +// More precisely, the hashed summary contains a single line for each file in the list, +// ordered by sort.Strings applied to the file names, where each line consists of +// the hexadecimal SHA-256 hash of the file content, +// two spaces (U+0020), the file name, and a newline (U+000A). +// +// File names with newlines (U+000A) are disallowed. +func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) { + h := sha256.New() + files = append([]string(nil), files...) + sort.Strings(files) + for _, file := range files { + if strings.Contains(file, "\n") { + return "", errors.New("dirhash: filenames with newlines are not supported") + } + r, err := open(file) + if err != nil { + return "", err + } + hf := sha256.New() + _, err = io.Copy(hf, r) + r.Close() + if err != nil { + return "", err + } + fmt.Fprintf(h, "%x %s\n", hf.Sum(nil), file) + } + return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil +} + +// HashDir returns the hash of the local file system directory dir, +// replacing the directory name itself with prefix in the file names +// used in the hash function. +func HashDir(dir, prefix string, hash Hash) (string, error) { + files, err := DirFiles(dir, prefix) + if err != nil { + return "", err + } + osOpen := func(name string) (io.ReadCloser, error) { + return os.Open(filepath.Join(dir, strings.TrimPrefix(name, prefix))) + } + return hash(files, osOpen) +} + +// DirFiles returns the list of files in the tree rooted at dir, +// replacing the directory name dir with prefix in each name. +// The resulting names always use forward slashes. +func DirFiles(dir, prefix string) ([]string, error) { + var files []string + dir = filepath.Clean(dir) + err := filepath.Walk(dir, func(file string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } else if file == dir { + return fmt.Errorf("%s is not a directory", dir) + } + + rel := file + if dir != "." { + rel = file[len(dir)+1:] + } + f := filepath.Join(prefix, rel) + files = append(files, filepath.ToSlash(f)) + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} + +// HashZip returns the hash of the file content in the named zip file. +// Only the file names and their contents are included in the hash: +// the exact zip file format encoding, compression method, +// per-file modification times, and other metadata are ignored. +func HashZip(zipfile string, hash Hash) (string, error) { + z, err := zip.OpenReader(zipfile) + if err != nil { + return "", err + } + defer z.Close() + var files []string + zfiles := make(map[string]*zip.File) + for _, file := range z.File { + files = append(files, file.Name) + zfiles[file.Name] = file + } + zipOpen := func(name string) (io.ReadCloser, error) { + f := zfiles[name] + if f == nil { + return nil, fmt.Errorf("file %q not found in zip", name) // should never happen + } + return f.Open() + } + return hash(files, zipOpen) +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b8322598ae..948a3ee63d 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -118,7 +118,6 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. -// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 9c105f23af..02609d5b21 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -72,9 +72,6 @@ var X86 struct { HasSSSE3 bool // Supplemental streaming SIMD extension 3 HasSSE41 bool // Streaming SIMD extension 4 and 4.1 HasSSE42 bool // Streaming SIMD extension 4 and 4.2 - HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add - HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions - HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 1e642f3304..600a680786 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -53,9 +53,6 @@ func initOptions() { {Name: "sse41", Feature: &X86.HasSSE41}, {Name: "sse42", Feature: &X86.HasSSE42}, {Name: "ssse3", Feature: &X86.HasSSSE3}, - {Name: "avxifma", Feature: &X86.HasAVXIFMA}, - {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, - {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, // These capabilities should always be enabled on amd64: {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, @@ -109,7 +106,7 @@ func archInit() { return } - eax7, ebx7, ecx7, edx7 := cpuid(7, 0) + _, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) @@ -137,24 +134,14 @@ func archInit() { X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) } X86.HasAMXTile = isSet(24, edx7) X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXBF16 = isSet(22, edx7) - - // These features depend on the second level of extended features. - if eax7 >= 1 { - eax71, _, _, edx71 := cpuid(7, 1) - if X86.HasAVX512 { - X86.HasAVX512BF16 = isSet(5, eax71) - } - if X86.HasAVX { - X86.HasAVXIFMA = isSet(23, eax71) - X86.HasAVXVNNI = isSet(4, eax71) - X86.HasAVXVNNIInt8 = isSet(4, edx71) - } - } } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go deleted file mode 100644 index 37a82528f5..0000000000 --- a/vendor/golang.org/x/sys/unix/auxv.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) - -package unix - -import ( - "syscall" - "unsafe" -) - -//go:linkname runtime_getAuxv runtime.getAuxv -func runtime_getAuxv() []uintptr - -// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. -// The returned slice is always a fresh copy, owned by the caller. -// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, -// which happens in some locked-down environments and build modes. -func Auxv() ([][2]uintptr, error) { - vec := runtime_getAuxv() - vecLen := len(vec) - - if vecLen == 0 { - return nil, syscall.ENOENT - } - - if vecLen%2 != 0 { - return nil, syscall.EINVAL - } - - result := make([]uintptr, vecLen) - copy(result, vec) - return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil -} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go deleted file mode 100644 index 1200487f2e..0000000000 --- a/vendor/golang.org/x/sys/unix/auxv_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) - -package unix - -import "syscall" - -func Auxv() ([][2]uintptr, error) { - return nil, syscall.ENOTSUP -} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index abc3955477..21974af064 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,90 +1102,3 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } - -// Ucred Helpers -// See ucred(3c) and getpeerucred(3c) - -//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) -//sys ucredFree(ucred uintptr) = ucred_free -//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get -//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid -//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid -//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid -//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid -//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid -//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid -//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid - -// Ucred is an opaque struct that holds user credentials. -type Ucred struct { - ucred uintptr -} - -// We need to ensure that ucredFree is called on the underlying ucred -// when the Ucred is garbage collected. -func ucredFinalizer(u *Ucred) { - ucredFree(u.ucred) -} - -func GetPeerUcred(fd uintptr) (*Ucred, error) { - var ucred uintptr - err := getpeerucred(fd, &ucred) - if err != nil { - return nil, err - } - result := &Ucred{ - ucred: ucred, - } - // set the finalizer on the result so that the ucred will be freed - runtime.SetFinalizer(result, ucredFinalizer) - return result, nil -} - -func UcredGet(pid int) (*Ucred, error) { - ucred, err := ucredGet(pid) - if err != nil { - return nil, err - } - result := &Ucred{ - ucred: ucred, - } - // set the finalizer on the result so that the ucred will be freed - runtime.SetFinalizer(result, ucredFinalizer) - return result, nil -} - -func (u *Ucred) Geteuid() int { - defer runtime.KeepAlive(u) - return ucredGeteuid(u.ucred) -} - -func (u *Ucred) Getruid() int { - defer runtime.KeepAlive(u) - return ucredGetruid(u.ucred) -} - -func (u *Ucred) Getsuid() int { - defer runtime.KeepAlive(u) - return ucredGetsuid(u.ucred) -} - -func (u *Ucred) Getegid() int { - defer runtime.KeepAlive(u) - return ucredGetegid(u.ucred) -} - -func (u *Ucred) Getrgid() int { - defer runtime.KeepAlive(u) - return ucredGetrgid(u.ucred) -} - -func (u *Ucred) Getsgid() int { - defer runtime.KeepAlive(u) - return ucredGetsgid(u.ucred) -} - -func (u *Ucred) Getpid() int { - defer runtime.KeepAlive(u) - return ucredGetpid(u.ucred) -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4f432bfe8f..6ebc48b3fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,7 +1245,6 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 - FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1331,10 +1330,8 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 - F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 - F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1554,7 +1551,6 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 - IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1627,8 +1623,6 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1873,7 +1867,6 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 - MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1974,7 +1967,6 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 - MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2091,7 +2083,6 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 - NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2172,7 +2163,6 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 - NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2501,7 +2491,6 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b - PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2510,7 +2499,6 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 - PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2537,8 +2525,6 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c - PR_PMLEN_MASK = 0x7f000000 - PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2606,7 +2592,6 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c - PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2617,9 +2602,6 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 - PR_SHADOW_STACK_ENABLE = 0x1 - PR_SHADOW_STACK_PUSH = 0x4 - PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2929,6 +2911,7 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 + RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2937,7 +2920,6 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 - RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 75207613c7..c0d45e3205 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,8 +116,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -306,7 +304,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c68acda535..c731d24f02 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,8 +116,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -307,7 +305,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a8c607ab86..680018a4a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -312,7 +310,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 18563dd8d3..a63909f308 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,7 +109,6 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 - GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -120,8 +119,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +302,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 22912cdaa9..9b0a2573fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,8 +116,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -299,7 +297,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 29344eb37a..958e6e0645 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +303,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 20d51fb96a..50c7f25bd1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +303,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 321b60902a..ced21d66d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +303,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 9bacdf1e27..226c044190 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +303,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c224272615..3122737cd4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -360,7 +358,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6270c8ee13..eb5d3467ed 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -364,7 +362,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 9966c1941f..e921ebc60b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -364,7 +362,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 848e5fcc42..38ba81c55c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -296,7 +294,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 669b2adb80..71f0400977 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -368,7 +366,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 4834e57514..c44a313322 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,8 +119,6 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -359,7 +357,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 - SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c6545413c4..829b87feb8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,16 +141,6 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" -//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" -//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" -//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" -//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" -//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" -//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" -//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" -//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" -//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" -//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -290,16 +280,6 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom -//go:linkname procgetpeerucred libc_getpeerucred -//go:linkname procucred_get libc_ucred_get -//go:linkname procucred_geteuid libc_ucred_geteuid -//go:linkname procucred_getegid libc_ucred_getegid -//go:linkname procucred_getruid libc_ucred_getruid -//go:linkname procucred_getrgid libc_ucred_getrgid -//go:linkname procucred_getsuid libc_ucred_getsuid -//go:linkname procucred_getsgid libc_ucred_getsgid -//go:linkname procucred_getpid libc_ucred_getpid -//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -440,16 +420,6 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, - procgetpeerucred, - procucred_get, - procucred_geteuid, - procucred_getegid, - procucred_getruid, - procucred_getrgid, - procucred_getsuid, - procucred_getsgid, - procucred_getpid, - procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2059,90 +2029,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getpeerucred(fd uintptr, ucred *uintptr) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGet(pid int) (ucred uintptr, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) - ucred = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGeteuid(ucred uintptr) (uid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetegid(ucred uintptr) (gid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetruid(ucred uintptr) (uid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetrgid(ucred uintptr) (gid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetsuid(ucred uintptr) (uid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetsgid(ucred uintptr) (gid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetpid(ucred uintptr) (pid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredFree(ucred uintptr) { - sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff306..524b0820cb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,8 +458,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695e..f485dbf456 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,8 +381,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e5029744..70b35bf3b0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,8 +422,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51b..1893e2fe88 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,8 +325,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a18..16a4017da0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,8 +321,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336b..7e567f1eff 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,8 +442,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 - SYS_SETXATTRAT = 4463 - SYS_GETXATTRAT = 4464 - SYS_LISTXATTRAT = 4465 - SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b9962278..38ae55e5ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,8 +372,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 - SYS_SETXATTRAT = 5463 - SYS_GETXATTRAT = 5464 - SYS_LISTXATTRAT = 5465 - SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e6..55e92e60a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,8 +372,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 - SYS_SETXATTRAT = 5463 - SYS_GETXATTRAT = 5464 - SYS_LISTXATTRAT = 5465 - SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc22..60658d6a02 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,8 +442,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 - SYS_SETXATTRAT = 4463 - SYS_GETXATTRAT = 4464 - SYS_LISTXATTRAT = 4465 - SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb1..e203e8a7ed 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,8 +449,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b446365025..5944b97d54 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,8 +421,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c188..c66d416dad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,8 +421,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 8405391698..a5459e766f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,8 +326,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d6..01d86825bb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,8 +387,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9d..7b703e77cd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,8 +400,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index a46abe6472..5537148dcb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14d + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x7 + NL80211_MNTR_FLAG_MAX = 0x6 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,5 +6174,3 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } - -const RTM_NEWNVLAN = 0x70 diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto index e802a01439..13be7cbd8e 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -675,7 +675,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 256 such reservations. This may get increased in + // There can be at most 32 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index fb4d7041db..e3d7fd8945 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -687,7 +687,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 256 such reservations. This may get increased in + // There can be at most 32 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -715,9 +715,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ResourceClaimReservedForMaxSize is the maximum number of entries in +// ReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 256 +const ResourceClaimReservedForMaxSize = 32 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index b41609d118..1a71d64c10 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -291,7 +291,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/k8s.io/api/resource/v1beta1/generated.proto b/vendor/k8s.io/api/resource/v1beta1/generated.proto index 4ea13e0337..6d525d5b85 100644 --- a/vendor/k8s.io/api/resource/v1beta1/generated.proto +++ b/vendor/k8s.io/api/resource/v1beta1/generated.proto @@ -683,7 +683,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 256 such reservations. This may get increased in + // There can be at most 32 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1beta1/types.go b/vendor/k8s.io/api/resource/v1beta1/types.go index ca79c5a664..a7f1ee7b54 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types.go +++ b/vendor/k8s.io/api/resource/v1beta1/types.go @@ -695,7 +695,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 256 such reservations. This may get increased in + // There can be at most 32 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -723,9 +723,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ResourceClaimReservedForMaxSize is the maximum number of entries in +// ReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 256 +const ResourceClaimReservedForMaxSize = 32 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go index 4ecc35d08a..1d0176cbca 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go @@ -300,7 +300,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/modules.txt b/vendor/modules.txt index ad09c51697..01e3567f55 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -705,6 +705,9 @@ github.com/eapache/go-xerial-snappy # github.com/eapache/queue v1.1.0 ## explicit github.com/eapache/queue +# github.com/edwarnicke/gitoid v0.0.0-20220710194850-1be5bfda1f9d +## explicit; go 1.18 +github.com/edwarnicke/gitoid # github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -1358,10 +1361,20 @@ github.com/hashicorp/vault/api github.com/hexops/gotextdiff github.com/hexops/gotextdiff/myers github.com/hexops/gotextdiff/span -# github.com/in-toto/attestation v1.1.1 +# github.com/in-toto/archivista v0.9.0 +## explicit; go 1.23.0 +github.com/in-toto/archivista/pkg/api +github.com/in-toto/archivista/pkg/http-client +# github.com/in-toto/attestation v1.1.0 ## explicit; go 1.20 github.com/in-toto/attestation/go/predicates/provenance/v1 github.com/in-toto/attestation/go/v1 +# github.com/in-toto/go-witness v0.7.0 +## explicit; go 1.22.8 +github.com/in-toto/go-witness/cryptoutil +github.com/in-toto/go-witness/dsse +github.com/in-toto/go-witness/log +github.com/in-toto/go-witness/timestamp # github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 ## explicit; go 1.20 github.com/in-toto/in-toto-golang/in_toto @@ -2423,7 +2436,7 @@ gocloud.dev/docstore/mongodocstore # gocloud.dev/pubsub/kafkapubsub v0.40.0 ## explicit; go 1.21.0 gocloud.dev/pubsub/kafkapubsub -# golang.org/x/crypto v0.33.0 +# golang.org/x/crypto v0.32.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b @@ -2476,6 +2489,7 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver +golang.org/x/mod/sumdb/dirhash golang.org/x/mod/sumdb/note # golang.org/x/net v0.34.0 ## explicit; go 1.18 @@ -2502,22 +2516,22 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.11.0 +# golang.org/x/sync v0.10.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.30.0 +# golang.org/x/sys v0.29.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.29.0 +# golang.org/x/term v0.28.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.22.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/internal @@ -3109,7 +3123,7 @@ honnef.co/go/tools/stylecheck/st1021 honnef.co/go/tools/stylecheck/st1022 honnef.co/go/tools/stylecheck/st1023 honnef.co/go/tools/unused -# k8s.io/api v0.32.1 +# k8s.io/api v0.32.0 ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admissionregistration/v1 @@ -3173,7 +3187,7 @@ k8s.io/api/storagemigration/v1alpha1 ## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.32.1 +# k8s.io/apimachinery v0.32.0 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -3230,7 +3244,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.32.1 +# k8s.io/client-go v0.32.0 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -3563,7 +3577,7 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.32.1 +# k8s.io/code-generator v0.32.0 ## explicit; go 1.23.0 k8s.io/code-generator/cmd/deepcopy-gen k8s.io/code-generator/cmd/deepcopy-gen/args