Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,8 @@ All notable changes to this project will be documented in this file.

### Changed

- BREAKING: Renamed headless rolegroup service from `<stacklet>-<role>-<rolegroup>` to `<stacklet>-<role>-<rolegroup>-headless` ([#721]).
- The `prometheus.io/scrape` label was moved to the metrics service
- The headless service now only exposes product / data ports, the metrics service only metrics ports
- The `prometheus.io/scrape` label was moved to the metrics service ([#721]).
- The headless service now only exposes product / data ports, the metrics service only metrics ports ([#721]).
- Bump stackable-operator to `0.100.1` and product-config to `0.8.0` ([#722]).

[#713]: https://github.com/stackabletech/hdfs-operator/pull/713
Expand Down
6 changes: 3 additions & 3 deletions docs/modules/hdfs/examples/getting_started/getting_started.sh
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ kubectl rollout status --watch --timeout=5m statefulset/webhdfs

file_status() {
# tag::file-status[]
kubectl exec -n default webhdfs-0 -- curl -s -XGET "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/?op=LISTSTATUS"
kubectl exec -n default webhdfs-0 -- curl -s -XGET "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/?op=LISTSTATUS"
# end::file-status[]
}

Expand All @@ -138,7 +138,7 @@ kubectl cp -n default ./testdata.txt webhdfs-0:/tmp
create_file() {
# tag::create-file[]
kubectl exec -n default webhdfs-0 -- \
curl -s -XPUT -T /tmp/testdata.txt "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE&noredirect=true"
curl -s -XPUT -T /tmp/testdata.txt "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE&noredirect=true"
# end::create-file[]
}

Expand All @@ -157,7 +157,7 @@ echo "Created file: $found_file with status $(file_status)"
echo "Delete file"
delete_file() {
# tag::delete-file[]
kubectl exec -n default webhdfs-0 -- curl -s -XDELETE "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=DELETE"
kubectl exec -n default webhdfs-0 -- curl -s -XDELETE "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=DELETE"
# end::delete-file[]
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ kubectl rollout status --watch --timeout=5m statefulset/webhdfs

file_status() {
# tag::file-status[]
kubectl exec -n default webhdfs-0 -- curl -s -XGET "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/?op=LISTSTATUS"
kubectl exec -n default webhdfs-0 -- curl -s -XGET "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/?op=LISTSTATUS"
# end::file-status[]
}

Expand All @@ -138,7 +138,7 @@ kubectl cp -n default ./testdata.txt webhdfs-0:/tmp
create_file() {
# tag::create-file[]
kubectl exec -n default webhdfs-0 -- \
curl -s -XPUT -T /tmp/testdata.txt "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE&noredirect=true"
curl -s -XPUT -T /tmp/testdata.txt "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE&noredirect=true"
# end::create-file[]
}

Expand All @@ -157,7 +157,7 @@ echo "Created file: $found_file with status $(file_status)"
echo "Delete file"
delete_file() {
# tag::delete-file[]
kubectl exec -n default webhdfs-0 -- curl -s -XDELETE "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=DELETE"
kubectl exec -n default webhdfs-0 -- curl -s -XDELETE "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=DELETE"
# end::delete-file[]
}

Expand Down
2 changes: 1 addition & 1 deletion rust/operator-binary/src/crd/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,7 @@ impl v1alpha1::HdfsCluster {
let ns = ns.clone();
(0..*replicas).map(move |i| HdfsPodRef {
namespace: ns.clone(),
role_group_service_name: rolegroup_ref.rolegroup_headless_service_name(),
role_group_service_name: rolegroup_ref.object_name(),
pod_name: format!("{}-{}", rolegroup_ref.object_name(), i),
ports: self
.data_ports(role)
Expand Down
2 changes: 1 addition & 1 deletion rust/operator-binary/src/hdfs_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -887,7 +887,7 @@ fn rolegroup_statefulset(
match_labels: Some(rolegroup_selector_labels.into()),
..LabelSelector::default()
},
service_name: Some(rolegroup_ref.rolegroup_headless_service_name()),
service_name: Some(rolegroup_ref.object_name()),
template: pod_template,

volume_claim_templates: Some(pvcs),
Expand Down
2 changes: 1 addition & 1 deletion rust/operator-binary/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ pub(crate) fn rolegroup_headless_service(
let mut metadata_builder = ObjectMetaBuilder::new();
metadata_builder
.name_and_namespace(hdfs)
.name(rolegroup_ref.rolegroup_headless_service_name())
.name(rolegroup_ref.object_name())
.ownerreference_from_resource(hdfs, None, Some(true))
.with_context(|_| ObjectMissingMetadataForOwnerRefSnafu {
obj_ref: ObjectRef::from_obj(hdfs),
Expand Down
2 changes: 1 addition & 1 deletion tests/templates/kuttl/orphaned-resources/04-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@ metadata:
apiVersion: v1
kind: Service
metadata:
name: test-hdfs-datanode-newrolegroup-headless
name: test-hdfs-datanode-newrolegroup
2 changes: 1 addition & 1 deletion tests/templates/kuttl/profiling/run-profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def fetch_flamegraph(service_url, refresh_path):

def test_profiling(role, port):
service_url = (
f"http://test-hdfs-{role}-default-0.test-hdfs-{role}-default-headless:{port}"
f"http://test-hdfs-{role}-default-0.test-hdfs-{role}-default:{port}"
)

print(f"Test profiling on {service_url}")
Expand Down
6 changes: 3 additions & 3 deletions tests/templates/kuttl/smoke/30-assert.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ status:
apiVersion: v1
kind: Service
metadata:
name: hdfs-namenode-default-headless
name: hdfs-namenode-default
spec:
ports:
- name: rpc
Expand Down Expand Up @@ -110,7 +110,7 @@ spec:
apiVersion: v1
kind: Service
metadata:
name: hdfs-datanode-default-headless
name: hdfs-datanode-default
spec:
ports:
- name: data
Expand Down Expand Up @@ -144,7 +144,7 @@ spec:
apiVersion: v1
kind: Service
metadata:
name: hdfs-journalnode-default-headless
name: hdfs-journalnode-default
spec:
ports:
- name: rpc
Expand Down
4 changes: 2 additions & 2 deletions tests/templates/kuttl/smoke/test_jmx_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@ def check_datanode_metrics(
# Kind "FSDatasetState"
'hadoop_datanode_capacity{fsdatasetid=".+",kind="FSDatasetState",role="DataNode",service="HDFS"}',
# Kind "DataNodeActivity" suffixed with "_info"
'hadoop_datanode_blocks_get_local_path_info_{host="hdfs-datanode-default-\\d+\\.hdfs-datanode-default-headless\\..+\\.svc\\.cluster\\.local",kind="DataNodeActivity",port="9866",role="DataNode",service="HDFS"}',
'hadoop_datanode_blocks_get_local_path_info_{host="hdfs-datanode-default-\\d+\\.hdfs-datanode-default\\..+\\.svc\\.cluster\\.local",kind="DataNodeActivity",port="9866",role="DataNode",service="HDFS"}',
# Kind "DataNodeActivity"
'hadoop_datanode_blocks_read{host="hdfs-datanode-default-\\d+\\.hdfs-datanode-default-headless\\..+\\.svc\\.cluster\\.local",kind="DataNodeActivity",port="9866",role="DataNode",service="HDFS"}',
'hadoop_datanode_blocks_read{host="hdfs-datanode-default-\\d+\\.hdfs-datanode-default\\..+\\.svc\\.cluster\\.local",kind="DataNodeActivity",port="9866",role="DataNode",service="HDFS"}',
# Counter suffixed with "_total"
'hadoop_datanode_estimated_capacity_lost_total{kind="FSDatasetState",role="DataNode",service="HDFS"}',
# Boolean metric
Expand Down
4 changes: 2 additions & 2 deletions tests/templates/kuttl/smoke/webhdfs.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def main() -> int:

if command == "ls":
http_code = requests.get(
f"http://hdfs-namenode-default-0.hdfs-namenode-default-headless.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=LISTSTATUS"
f"http://hdfs-namenode-default-0.hdfs-namenode-default.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=LISTSTATUS"
).status_code
if http_code != 200:
result = 1
Expand All @@ -31,7 +31,7 @@ def main() -> int:
)
}
http_code = requests.put(
f"http://hdfs-namenode-default-0.hdfs-namenode-default-headless.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE",
f"http://hdfs-namenode-default-0.hdfs-namenode-default.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE",
files=files,
allow_redirects=True,
).status_code
Expand Down
Loading