From 054e67f99d9024251ac1a046e1d63a3bbb0b2757 Mon Sep 17 00:00:00 2001 From: Stacky McStackface Date: Sat, 31 May 2025 20:10:36 +0000 Subject: [PATCH 1/2] chore: Generated commit to update templated files since the last template run up to stackabletech/operator-templating@08477dea5814ce56a0e40fd2114d3b06370b2fed Reference-to: stackabletech/operator-templating@08477de (nightly rust, clippy fix, various updates, linter) --- .github/pull_request_template.md | 7 +++++- .github/workflows/build.yml | 6 ++--- .github/workflows/integration-test.yml | 2 +- .github/workflows/pr_pre-commit.yaml | 4 ++-- .hadolint.yaml | 11 ++++++++++ .markdownlint.yaml | 3 +++ .pre-commit-config.yaml | 8 +++---- .readme/partials/borrowed/documentation.md.j2 | 4 ++-- .readme/partials/borrowed/footer.md.j2 | 4 ++++ .readme/partials/borrowed/links.md.j2 | 1 - .vscode/settings.json | 2 +- README.md | 9 +++++--- bors.toml | 9 -------- default.nix | 6 ++++- docker/Dockerfile | 22 +++++++++++++++++-- rust-toolchain.toml | 1 + 16 files changed, 69 insertions(+), 30 deletions(-) create mode 100644 .hadolint.yaml delete mode 100644 bors.toml diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index d1357fe3..acad6c09 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -15,6 +15,8 @@ - [ ] Helm chart can be installed and deployed operator works - [ ] Integration tests passed (for non trivial changes) - [ ] Changes need to be "offline" compatible +- [ ] Links to generated (nightly) docs added +- [ ] Release note snippet added ### Reviewer @@ -29,4 +31,7 @@ - [ ] Feature Tracker has been updated - [ ] Proper release label has been added -- [ ] [Roadmap](https://github.com/orgs/stackabletech/projects/25/views/1) has been updated +- [ ] Links to generated (nightly) docs added +- [ ] Release note snippet added +- [ ] Add `type/deprecation` label & add to the [deprecation schedule](https://github.com/orgs/stackabletech/projects/44/views/1) +- [ ] Add `type/experimental` label & add to the [experimental features tracker](https://github.com/orgs/stackabletech/projects/47) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9dfee91a..de633f7c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -27,7 +27,7 @@ env: CARGO_INCREMENTAL: '0' CARGO_PROFILE_DEV_DEBUG: '0' RUST_TOOLCHAIN_VERSION: "1.85.0" - RUST_NIGHTLY_TOOLCHAIN_VERSION: "nightly-2025-01-15" + RUST_NIGHTLY_TOOLCHAIN_VERSION: "nightly-2025-05-26" PYTHON_VERSION: "3.12" RUSTFLAGS: "-D warnings" RUSTDOCFLAGS: "-D warnings" @@ -340,7 +340,7 @@ jobs: with: persist-credentials: false submodules: recursive - - uses: cachix/install-nix-action@754537aaedb35f72ab11a60cc162c49ef3016495 # v31.2.0 + - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31.4.0 - uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 with: toolchain: ${{ env.RUST_TOOLCHAIN_VERSION }} @@ -378,7 +378,7 @@ jobs: - name: Install cosign uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2 - name: Install syft - uses: anchore/sbom-action/download-syft@f325610c9f50a54015d37c8d16cb3b0e2c8f4de0 # v0.18.0 + uses: anchore/sbom-action/download-syft@e11c554f704a0b820cbf8c51673f6945e0731532 # v0.20.0 - name: Build Docker image and Helm chart run: | # Installing helm and yq on ubicloud-standard-8-arm only diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index e61a93fa..321a0387 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -97,7 +97,7 @@ jobs: if: ${{ failure() }} env: SLACK_BOT_TOKEN: ${{ secrets.SLACK_INTEGRATION_TEST_TOKEN }} - uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 # v1.27.0 + uses: slackapi/slack-github-action@fcfb566f8b0aab22203f066d80ca1d7e4b5d05b3 # v1.27.1 with: channel-id: "C07UYJYSMSN" # notifications-integration-tests payload: | diff --git a/.github/workflows/pr_pre-commit.yaml b/.github/workflows/pr_pre-commit.yaml index 4e5b9914..35dc6ac7 100644 --- a/.github/workflows/pr_pre-commit.yaml +++ b/.github/workflows/pr_pre-commit.yaml @@ -8,7 +8,7 @@ on: env: CARGO_TERM_COLOR: always NIX_PKG_MANAGER_VERSION: "2.28.3" - RUST_TOOLCHAIN_VERSION: "nightly-2025-01-15" + RUST_TOOLCHAIN_VERSION: "nightly-2025-05-26" HADOLINT_VERSION: "v2.12.0" PYTHON_VERSION: "3.12" @@ -26,7 +26,7 @@ jobs: persist-credentials: false submodules: recursive fetch-depth: 0 - - uses: stackabletech/actions/run-pre-commit@4bfd3b65f22af597fe784599c077dc34bf5894a7 # v0.8.0 + - uses: stackabletech/actions/run-pre-commit@9aae2d1c14239021bfa33c041010f6fb7adec815 # v0.8.2 with: python-version: ${{ env.PYTHON_VERSION }} rust: ${{ env.RUST_TOOLCHAIN_VERSION }} diff --git a/.hadolint.yaml b/.hadolint.yaml new file mode 100644 index 00000000..0e9084e7 --- /dev/null +++ b/.hadolint.yaml @@ -0,0 +1,11 @@ +--- +ignored: + # Warning: Use the -y switch to avoid manual input dnf install -y + # https://github.com/hadolint/hadolint/wiki/DL3038 + # Reason: We set `assumeyes=True` in dnf.conf in our base image + - DL3038 + + # Warning: Specify version with dnf install -y - + # https://github.com/hadolint/hadolint/wiki/DL3041 + # Reason: It's good advice, but we're not set up to pin versions just yet + - DL3041 diff --git a/.markdownlint.yaml b/.markdownlint.yaml index 5af20365..75212ec5 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -22,3 +22,6 @@ MD033: MD024: # Only check sibling headings siblings_only: true + +# MD041/first-line-heading/first-line-h1 First line in a file should be a top-level heading +MD041: false # Github issues and PRs already have titles, and H1 is enormous in the description box. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 906defdd..85bd438e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,12 +15,12 @@ repos: - id: detect-private-key - repo: https://github.com/adrienverge/yamllint - rev: 81e9f98ffd059efe8aa9c1b1a42e5cce61b640c6 # 1.35.1 + rev: 79a6b2b1392eaf49cdd32ac4f14be1a809bbd8f7 # 1.37.0 hooks: - id: yamllint - repo: https://github.com/igorshubovych/markdownlint-cli - rev: 586c3ea3f51230da42bab657c6a32e9e66c364f0 # 0.44.0 + rev: 192ad822316c3a22fb3d3cc8aa6eafa0b8488360 # 0.45.0 hooks: - id: markdownlint types: [text] @@ -36,7 +36,7 @@ repos: # If you do not, you will need to delete the cached ruff binary shown in the # error message - repo: https://github.com/astral-sh/ruff-pre-commit - rev: 2c8dce6094fa2b4b668e74f694ca63ceffd38614 # 0.9.9 + rev: d19233b89771be2d89273f163f5edc5a39bbc34a # 0.11.12 hooks: # Run the linter. - id: ruff @@ -82,7 +82,7 @@ repos: - id: cargo-rustfmt name: cargo-rustfmt language: system - entry: cargo +nightly-2025-01-15 fmt --all -- --check + entry: cargo +nightly-2025-05-26 fmt --all -- --check stages: [pre-commit, pre-merge-commit] pass_filenames: false files: \.rs$ diff --git a/.readme/partials/borrowed/documentation.md.j2 b/.readme/partials/borrowed/documentation.md.j2 index 88727cd5..e62e4d91 100644 --- a/.readme/partials/borrowed/documentation.md.j2 +++ b/.readme/partials/borrowed/documentation.md.j2 @@ -1,9 +1,9 @@ ## Documentation -The stable documentation for this operator can be found [here](https://docs.stackable.tech/home/stable/{{operator_docs_slug}}). +The stable documentation for this operator can be found in our [Stackable Data Platform documentation](https://docs.stackable.tech/home/stable/{{operator_docs_slug}}). If you are interested in the most recent state of this repository, check out the [nightly docs](https://docs.stackable.tech/home/nightly/{{operator_docs_slug}}) instead. The documentation for all Stackable products can be found at [docs.stackable.tech](https://docs.stackable.tech). -If you have a question about the Stackable Data Platform contact us via our [homepage](https://stackable.tech/) or ask a public questions in our [Discussions forum](https://github.com/orgs/stackabletech/discussions). +If you have a question about the Stackable Data Platform, contact us via our [homepage](https://stackable.tech/) or ask a public question in our [Discussions forum](https://github.com/orgs/stackabletech/discussions). diff --git a/.readme/partials/borrowed/footer.md.j2 b/.readme/partials/borrowed/footer.md.j2 index a494f3b0..3d279e4d 100644 --- a/.readme/partials/borrowed/footer.md.j2 +++ b/.readme/partials/borrowed/footer.md.j2 @@ -62,3 +62,7 @@ This is enforced automatically when you submit a Pull Request where a bot will g ## Support Get started with the community edition! If you want professional support, [we offer subscription plans and custom licensing](https://stackable.tech/en/plans/). + +## Sponsor + +If you want to support our work but don't need professional support please consider [sponsoring](https://github.com/sponsors/stackabletech) our work. diff --git a/.readme/partials/borrowed/links.md.j2 b/.readme/partials/borrowed/links.md.j2 index 39e4e614..420b89ea 100644 --- a/.readme/partials/borrowed/links.md.j2 +++ b/.readme/partials/borrowed/links.md.j2 @@ -1,5 +1,4 @@ -{% if no_jenkins_job_badge %}{% else %}![Build Actions Status](https://ci.stackable.tech/buildStatus/icon?job={{operator_name}}%2doperator%2dit%2dnightly&subject=Integration%20Tests){% endif %} [![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://GitHub.com/stackabletech/{{operator_name}}-operator/graphs/commit-activity) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-green.svg)](https://docs.stackable.tech/home/stable/contributor/index.html) [![License OSL3.0](https://img.shields.io/badge/license-OSL3.0-green)](./LICENSE) diff --git a/.vscode/settings.json b/.vscode/settings.json index 75be60a6..0818fc99 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,7 +1,7 @@ { "rust-analyzer.rustfmt.overrideCommand": [ "rustfmt", - "+nightly-2025-01-15", + "+nightly-2025-05-26", "--edition", "2024", "--" diff --git a/README.md b/README.md index 6d0a51d6..b354b07e 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,6 @@

Stackable Operator for Apache Hadoop

-![Build Actions Status](https://ci.stackable.tech/buildStatus/icon?job=hdfs%2doperator%2dit%2dnightly&subject=Integration%20Tests) [![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://GitHub.com/stackabletech/hdfs-operator/graphs/commit-activity) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-green.svg)](https://docs.stackable.tech/home/stable/contributor/index.html) [![License OSL3.0](https://img.shields.io/badge/license-OSL3.0-green)](./LICENSE) @@ -32,12 +31,12 @@ You can follow this [tutorial](https://docs.stackable.tech/home/stable/hdfs/gett ## Documentation -The stable documentation for this operator can be found [here](https://docs.stackable.tech/home/stable/hdfs). +The stable documentation for this operator can be found in our [Stackable Data Platform documentation](https://docs.stackable.tech/home/stable/hdfs). If you are interested in the most recent state of this repository, check out the [nightly docs](https://docs.stackable.tech/home/nightly/hdfs) instead. The documentation for all Stackable products can be found at [docs.stackable.tech](https://docs.stackable.tech). -If you have a question about the Stackable Data Platform contact us via our [homepage](https://stackable.tech/) or ask a public questions in our [Discussions forum](https://github.com/orgs/stackabletech/discussions). +If you have a question about the Stackable Data Platform, contact us via our [homepage](https://stackable.tech/) or ask a public question in our [Discussions forum](https://github.com/orgs/stackabletech/discussions). ## About The Stackable Data Platform @@ -104,4 +103,8 @@ This is enforced automatically when you submit a Pull Request where a bot will g Get started with the community edition! If you want professional support, [we offer subscription plans and custom licensing](https://stackable.tech/en/plans/). +## Sponsor + +If you want to support our work but don't need professional support please consider [sponsoring](https://github.com/sponsors/stackabletech) our work. + diff --git a/bors.toml b/bors.toml deleted file mode 100644 index 420d30c8..00000000 --- a/bors.toml +++ /dev/null @@ -1,9 +0,0 @@ -status = [ - 'All tests passed' -] -delete_merged_branches = true -use_squash_merge = true -pr_status = [ 'license/cla' ] -timeout_sec = 7200 -cut_body_after = "" -required_approvals = 1 diff --git a/default.nix b/default.nix index 263d8193..82f5928b 100644 --- a/default.nix +++ b/default.nix @@ -114,6 +114,10 @@ rec { # (see https://github.com/pre-commit/pre-commit-hooks?tab=readme-ov-file#trailing-whitespace). # So, remove the trailing newline already here to avoid that an # unnecessary change is shown in Git. - sed -i '$d' Cargo.nix + if [[ "$(uname)" == "Darwin" ]]; then + sed -i \"\" '$d' Cargo.nix + else + sed -i '$d' Cargo.nix + fi ''; } diff --git a/docker/Dockerfile b/docker/Dockerfile index 74c67ca3..616b1d38 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,7 @@ -# syntax=docker/dockerfile:1.10.0@sha256:865e5dd094beca432e8c0a1d5e1c465db5f998dca4e439981029b3b81fb39ed5 +# syntax=docker/dockerfile:1.16.0@sha256:e2dd261f92e4b763d789984f6eab84be66ab4f5f08052316d8eb8f173593acf7 # NOTE: The syntax directive needs to be the first line in a Dockerfile +# Find the latest versions here: https://hub.docker.com/r/docker/dockerfile/tags +# And the changelogs: https://docs.docker.com/build/buildkit/dockerfile-release-notes/ or https://github.com/moby/buildkit/releases # ============= # This file is automatically generated from the templates in stackabletech/operator-templating @@ -25,6 +27,13 @@ ARG RELEASE="1" ARG STACKABLE_USER_GID="574654813" ARG STACKABLE_USER_UID="782252253" +# Sets the default shell to Bash with strict error handling and robust pipeline processing. +# "-e": Exits immediately if a command exits with a non-zero status +# "-u": Treats unset variables as an error, preventing unexpected behavior from undefined variables. +# "-o pipefail": Causes a pipeline to return the exit status of the last command in the pipe that failed, ensuring errors in any part of a pipeline are not ignored. +# "-c": Allows the execution of commands passed as a string +SHELL ["/bin/bash", "-euo", "pipefail", "-c"] + # These labels have mostly been superceded by the OpenContainer spec annotations below but it doesn't hurt to include them # http://label-schema.org/rc1/ LABEL name="Stackable Operator for Apache HDFS" @@ -49,7 +58,7 @@ LABEL com.redhat.license_terms="" LABEL io.buildah.version="" LABEL io.openshift.expose-services="" -# https://github.com/opencontainers/image-spec/blob/036563a4a268d7c08b51a08f05a02a0fe74c7268/annotations.md#annotations +# https://github.com/opencontainers/image-spec/blob/64294bd7a2bf2537e1a6a34d687caae70300b0c4/annotations.md#annotations LABEL org.opencontainers.image.authors="info@stackable.tech" LABEL org.opencontainers.image.url="https://stackable.tech" LABEL org.opencontainers.image.vendor="Stackable GmbH" @@ -73,6 +82,15 @@ assumeyes=True tsflags=nodocs EOF +# SC2028 +# echo won't expand escape sequences. Consider printf. +# https://github.com/koalaman/shellcheck/wiki/SC2028 +# Reason: This is complaining about the complicated PS1 statement. +# It seems to work as intended so I'm not going to touch it! +# +# SC3037 +# It complains about echo flags not being available in POSIX sh but we set the shell to bash +# hadolint ignore=SC2028,SC3037 RUN < Date: Sat, 31 May 2025 22:31:19 +0200 Subject: [PATCH 2/2] rustfmt update --- rust/operator-binary/src/container.rs | 65 ++++++++----- rust/operator-binary/src/crd/affinity.rs | 112 ++++++++++++----------- rust/operator-binary/src/crd/mod.rs | 19 ++-- rust/operator-binary/src/crd/storage.rs | 69 ++++++++------ rust/operator-binary/src/main.rs | 11 ++- 5 files changed, 156 insertions(+), 120 deletions(-) diff --git a/rust/operator-binary/src/container.rs b/rust/operator-binary/src/container.rs index cdee2aa1..e1109f8c 100644 --- a/rust/operator-binary/src/container.rs +++ b/rust/operator-binary/src/container.rs @@ -880,37 +880,52 @@ wait_for_termination $! // See https://github.com/stackabletech/hdfs-operator/issues/138 for details if let ContainerConfig::Hdfs { role, .. } = self { let role_opts_name = role.hadoop_opts_env_var_for_role().to_string(); - env.insert(role_opts_name.clone(), EnvVar { - name: role_opts_name, - value: Some(self.build_hadoop_opts(hdfs, role_group, resources)?), - ..EnvVar::default() - }); + env.insert( + role_opts_name.clone(), + EnvVar { + name: role_opts_name, + value: Some(self.build_hadoop_opts(hdfs, role_group, resources)?), + ..EnvVar::default() + }, + ); } - env.insert("HADOOP_OPTS".to_string(), EnvVar { - name: "HADOOP_OPTS".to_string(), - value: Some(construct_global_jvm_args(hdfs.has_kerberos_enabled())), - ..EnvVar::default() - }); - if hdfs.has_kerberos_enabled() { - env.insert("KRB5_CONFIG".to_string(), EnvVar { - name: "KRB5_CONFIG".to_string(), - value: Some(format!("{KERBEROS_CONTAINER_PATH}/krb5.conf")), - ..EnvVar::default() - }); - env.insert("KRB5_CLIENT_KTNAME".to_string(), EnvVar { - name: "KRB5_CLIENT_KTNAME".to_string(), - value: Some(format!("{KERBEROS_CONTAINER_PATH}/keytab")), + env.insert( + "HADOOP_OPTS".to_string(), + EnvVar { + name: "HADOOP_OPTS".to_string(), + value: Some(construct_global_jvm_args(hdfs.has_kerberos_enabled())), ..EnvVar::default() - }); + }, + ); + if hdfs.has_kerberos_enabled() { + env.insert( + "KRB5_CONFIG".to_string(), + EnvVar { + name: "KRB5_CONFIG".to_string(), + value: Some(format!("{KERBEROS_CONTAINER_PATH}/krb5.conf")), + ..EnvVar::default() + }, + ); + env.insert( + "KRB5_CLIENT_KTNAME".to_string(), + EnvVar { + name: "KRB5_CLIENT_KTNAME".to_string(), + value: Some(format!("{KERBEROS_CONTAINER_PATH}/keytab")), + ..EnvVar::default() + }, + ); } // Needed for the `containerdebug` process to log it's tracing information to. - env.insert("CONTAINERDEBUG_LOG_DIRECTORY".to_string(), EnvVar { - name: "CONTAINERDEBUG_LOG_DIRECTORY".to_string(), - value: Some(format!("{STACKABLE_LOG_DIR}/containerdebug")), - value_from: None, - }); + env.insert( + "CONTAINERDEBUG_LOG_DIRECTORY".to_string(), + EnvVar { + name: "CONTAINERDEBUG_LOG_DIRECTORY".to_string(), + value: Some(format!("{STACKABLE_LOG_DIR}/containerdebug")), + value_from: None, + }, + ); // Overrides need to come last let mut env_override_vars: BTreeMap = diff --git a/rust/operator-binary/src/crd/affinity.rs b/rust/operator-binary/src/crd/affinity.rs index 6e169f62..4035cc13 100644 --- a/rust/operator-binary/src/crd/affinity.rs +++ b/rust/operator-binary/src/crd/affinity.rs @@ -74,58 +74,64 @@ spec: let hdfs: v1alpha1::HdfsCluster = serde_yaml::from_str(input).unwrap(); let merged_config = role.merged_config(&hdfs, "default").unwrap(); - assert_eq!(merged_config.affinity, StackableAffinity { - pod_affinity: Some(PodAffinity { - preferred_during_scheduling_ignored_during_execution: Some(vec![ - WeightedPodAffinityTerm { - pod_affinity_term: PodAffinityTerm { - label_selector: Some(LabelSelector { - match_expressions: None, - match_labels: Some(BTreeMap::from([ - ("app.kubernetes.io/name".to_string(), "hdfs".to_string(),), - ( - "app.kubernetes.io/instance".to_string(), - "simple-hdfs".to_string(), - ), - ])) - }), - namespace_selector: None, - namespaces: None, - topology_key: "kubernetes.io/hostname".to_string(), - ..PodAffinityTerm::default() - }, - weight: 20 - } - ]), - required_during_scheduling_ignored_during_execution: None, - }), - pod_anti_affinity: Some(PodAntiAffinity { - preferred_during_scheduling_ignored_during_execution: Some(vec![ - WeightedPodAffinityTerm { - pod_affinity_term: PodAffinityTerm { - label_selector: Some(LabelSelector { - match_expressions: None, - match_labels: Some(BTreeMap::from([ - ("app.kubernetes.io/name".to_string(), "hdfs".to_string(),), - ( - "app.kubernetes.io/instance".to_string(), - "simple-hdfs".to_string(), - ), - ("app.kubernetes.io/component".to_string(), role.to_string(),) - ])) - }), - namespace_selector: None, - namespaces: None, - topology_key: "kubernetes.io/hostname".to_string(), - ..PodAffinityTerm::default() - }, - weight: 70 - } - ]), - required_during_scheduling_ignored_during_execution: None, - }), - node_affinity: None, - node_selector: None, - }); + assert_eq!( + merged_config.affinity, + StackableAffinity { + pod_affinity: Some(PodAffinity { + preferred_during_scheduling_ignored_during_execution: Some(vec![ + WeightedPodAffinityTerm { + pod_affinity_term: PodAffinityTerm { + label_selector: Some(LabelSelector { + match_expressions: None, + match_labels: Some(BTreeMap::from([ + ("app.kubernetes.io/name".to_string(), "hdfs".to_string(),), + ( + "app.kubernetes.io/instance".to_string(), + "simple-hdfs".to_string(), + ), + ])) + }), + namespace_selector: None, + namespaces: None, + topology_key: "kubernetes.io/hostname".to_string(), + ..PodAffinityTerm::default() + }, + weight: 20 + } + ]), + required_during_scheduling_ignored_during_execution: None, + }), + pod_anti_affinity: Some(PodAntiAffinity { + preferred_during_scheduling_ignored_during_execution: Some(vec![ + WeightedPodAffinityTerm { + pod_affinity_term: PodAffinityTerm { + label_selector: Some(LabelSelector { + match_expressions: None, + match_labels: Some(BTreeMap::from([ + ("app.kubernetes.io/name".to_string(), "hdfs".to_string(),), + ( + "app.kubernetes.io/instance".to_string(), + "simple-hdfs".to_string(), + ), + ( + "app.kubernetes.io/component".to_string(), + role.to_string(), + ) + ])) + }), + namespace_selector: None, + namespaces: None, + topology_key: "kubernetes.io/hostname".to_string(), + ..PodAffinityTerm::default() + }, + weight: 70 + } + ]), + required_during_scheduling_ignored_during_execution: None, + }), + node_affinity: None, + node_selector: None, + } + ); } } diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index 104a4c7a..5646a9c7 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -1319,15 +1319,18 @@ impl DataNodeConfigFragment { limit: Some(Quantity("512Mi".to_owned())), runtime_limits: NoRuntimeLimitsFragment {}, }, - storage: BTreeMap::from([("data".to_string(), DataNodePvcFragment { - pvc: PvcConfigFragment { - capacity: Some(Quantity("10Gi".to_owned())), - storage_class: None, - selectors: None, + storage: BTreeMap::from([( + "data".to_string(), + DataNodePvcFragment { + pvc: PvcConfigFragment { + capacity: Some(Quantity("10Gi".to_owned())), + storage_class: None, + selectors: None, + }, + count: Some(1), + hdfs_storage_type: Some(HdfsStorageType::default()), }, - count: Some(1), - hdfs_storage_type: Some(HdfsStorageType::default()), - })]), + )]), }, logging: product_logging::spec::default_logging(), listener_class: Some(DEFAULT_LISTENER_CLASS.to_string()), diff --git a/rust/operator-binary/src/crd/storage.rs b/rust/operator-binary/src/crd/storage.rs index 317730dc..9ce56658 100644 --- a/rust/operator-binary/src/crd/storage.rs +++ b/rust/operator-binary/src/crd/storage.rs @@ -198,15 +198,18 @@ mod test { #[test] pub fn test_datanode_storage_defaults() { let data_node_storage = DataNodeStorageConfig { - pvcs: BTreeMap::from([("data".to_string(), DataNodePvc { - pvc: PvcConfig { - capacity: Some(Quantity("5Gi".to_owned())), - storage_class: None, - selectors: None, + pvcs: BTreeMap::from([( + "data".to_string(), + DataNodePvc { + pvc: PvcConfig { + capacity: Some(Quantity("5Gi".to_owned())), + storage_class: None, + selectors: None, + }, + count: 1, + hdfs_storage_type: HdfsStorageType::default(), }, - count: 1, - hdfs_storage_type: HdfsStorageType::default(), - })]), + )]), }; let pvcs = data_node_storage.build_pvcs(); @@ -236,30 +239,36 @@ mod test { pub fn test_datanode_storage_multiple_storage_types() { let data_node_storage = DataNodeStorageConfig { pvcs: BTreeMap::from([ - ("hdd".to_string(), DataNodePvc { - pvc: PvcConfig { - capacity: Some(Quantity("12Ti".to_owned())), - storage_class: Some("hdd-storage-class".to_string()), - selectors: Some(LabelSelector { - match_expressions: None, - match_labels: Some(BTreeMap::from([( - "foo".to_string(), - "bar".to_string(), - )])), - }), + ( + "hdd".to_string(), + DataNodePvc { + pvc: PvcConfig { + capacity: Some(Quantity("12Ti".to_owned())), + storage_class: Some("hdd-storage-class".to_string()), + selectors: Some(LabelSelector { + match_expressions: None, + match_labels: Some(BTreeMap::from([( + "foo".to_string(), + "bar".to_string(), + )])), + }), + }, + count: 8, + hdfs_storage_type: HdfsStorageType::Disk, }, - count: 8, - hdfs_storage_type: HdfsStorageType::Disk, - }), - ("ssd".to_string(), DataNodePvc { - pvc: PvcConfig { - capacity: Some(Quantity("2Ti".to_owned())), - storage_class: Some("premium-ssd".to_string()), - selectors: None, + ), + ( + "ssd".to_string(), + DataNodePvc { + pvc: PvcConfig { + capacity: Some(Quantity("2Ti".to_owned())), + storage_class: Some("premium-ssd".to_string()), + selectors: None, + }, + count: 4, + hdfs_storage_type: HdfsStorageType::Ssd, }, - count: 4, - hdfs_storage_type: HdfsStorageType::Ssd, - }), + ), ]), }; let pvcs = data_node_storage.build_pvcs(); diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 5e3f4f07..88500fb9 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -107,10 +107,13 @@ pub async fn create_controller( ) { let (store, store_w) = reflector::store(); - let hdfs_event_recorder = Arc::new(Recorder::new(client.as_kube_client(), Reporter { - controller: HDFS_FULL_CONTROLLER_NAME.to_string(), - instance: None, - })); + let hdfs_event_recorder = Arc::new(Recorder::new( + client.as_kube_client(), + Reporter { + controller: HDFS_FULL_CONTROLLER_NAME.to_string(), + instance: None, + }, + )); // The topology provider will need to build label information by querying kubernetes nodes and this // requires the clusterrole 'hdfs-clusterrole-nodes': this is bound to each deployed HDFS cluster