Skip to content

Commit 1d2e31f

Browse files
authored
[Feature] Add arm option to databricks_node_type instead of graviton (#5028)
## Changes <!-- Summary of your changes that are easy to understand --> `graviton` name is AWS specific and could be confusing for Azure users. So deprecated `graviton` and added `arm` attribute instead. Resolves #4128 ## Tests <!-- How is this tested? Please see the checklist below and also describe any other relevant tests --> - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] has entry in `NEXT_CHANGELOG.md` file
1 parent 23eb4f2 commit 1d2e31f

File tree

8 files changed

+42
-14
lines changed

8 files changed

+42
-14
lines changed

NEXT_CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66

77
### New Features and Improvements
88

9+
* Add `arm` option to `databricks_node_type` instead of `graviton` ([#5028](https://github.com/databricks/terraform-provider-databricks/pull/5028))
10+
911
### Bug Fixes
1012

1113
### Documentation

access/resource_sql_permissions.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,7 @@ func (ta *SqlPermissions) getOrCreateCluster(clustersAPI clusters.ClustersAPI) (
280280
Latest: true,
281281
LongTermSupport: true,
282282
})
283-
nodeType := clustersAPI.GetSmallestNodeType(compute.NodeTypeRequest{LocalDisk: true})
283+
nodeType := clustersAPI.GetSmallestNodeType(clusters.NodeTypeRequest{NodeTypeRequest: compute.NodeTypeRequest{LocalDisk: true}})
284284
aclCluster, err := clustersAPI.GetOrCreateRunningCluster(
285285
"terraform-table-acl", clusters.Cluster{
286286
ClusterName: "terraform-table-acl",

catalog/resource_sql_table.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ func (ti *SqlTableInfo) getOrCreateCluster(clusterName string, clustersAPI clust
187187
sparkVersion := clusters.LatestSparkVersionOrDefault(clustersAPI.Context(), clustersAPI.WorkspaceClient(), compute.SparkVersionRequest{
188188
Latest: true,
189189
})
190-
nodeType := clustersAPI.GetSmallestNodeType(compute.NodeTypeRequest{LocalDisk: true})
190+
nodeType := clustersAPI.GetSmallestNodeType(clusters.NodeTypeRequest{NodeTypeRequest: compute.NodeTypeRequest{LocalDisk: true}})
191191
aclCluster, err := clustersAPI.GetOrCreateRunningCluster(
192192
clusterName, clusters.Cluster{
193193
ClusterName: clusterName,

clusters/clusters_api.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -892,8 +892,10 @@ func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) (
892892
}
893893
}
894894
}
895-
smallestNodeType := a.GetSmallestNodeType(compute.NodeTypeRequest{
896-
LocalDisk: true,
895+
smallestNodeType := a.GetSmallestNodeType(NodeTypeRequest{
896+
NodeTypeRequest: compute.NodeTypeRequest{
897+
LocalDisk: true,
898+
},
897899
})
898900
log.Printf("[INFO] Creating an autoterminating cluster with node type %s", smallestNodeType)
899901
r := Cluster{

clusters/data_node_type.go

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,29 @@ import (
66

77
"github.com/databricks/databricks-sdk-go/service/compute"
88
"github.com/databricks/terraform-provider-databricks/common"
9+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
910

1011
"github.com/databricks/databricks-sdk-go"
1112
)
1213

13-
func defaultSmallestNodeType(w *databricks.WorkspaceClient, request compute.NodeTypeRequest) string {
14+
type NodeTypeRequest struct {
15+
compute.NodeTypeRequest
16+
Arm bool `json:"arm,omitempty"`
17+
}
18+
19+
func defaultSmallestNodeType(w *databricks.WorkspaceClient, request NodeTypeRequest) string {
20+
if request.Arm || request.Graviton {
21+
if w.Config.IsAws() {
22+
if request.Fleet {
23+
return "rgd-fleet.xlarge"
24+
}
25+
return "m6g.xlarge"
26+
} else if w.Config.IsAzure() {
27+
return "Standard_D4pds_v6"
28+
}
29+
}
1430
if w.Config.IsAzure() {
15-
return "Standard_D3_v2"
31+
return "Standard_D4ds_v5"
1632
} else if w.Config.IsGcp() {
1733
return "n1-standard-4"
1834
}
@@ -22,28 +38,33 @@ func defaultSmallestNodeType(w *databricks.WorkspaceClient, request compute.Node
2238
return "i3.xlarge"
2339
}
2440

25-
func smallestNodeType(ctx context.Context, request compute.NodeTypeRequest, w *databricks.WorkspaceClient) string {
41+
func smallestNodeType(ctx context.Context, request NodeTypeRequest, w *databricks.WorkspaceClient) string {
2642
nodeTypes, err := w.Clusters.ListNodeTypes(ctx)
2743
if err != nil {
2844
return defaultSmallestNodeType(w, request)
2945
}
30-
nodeType, err := nodeTypes.Smallest(request)
46+
// if arm is true, then graviton is true
47+
request.Graviton = request.Arm || request.Graviton
48+
nodeType, err := nodeTypes.Smallest(request.NodeTypeRequest)
3149
if err != nil {
3250
nodeType = defaultSmallestNodeType(w, request)
3351
}
3452
return nodeType
3553
}
3654

37-
func (a ClustersAPI) GetSmallestNodeType(request compute.NodeTypeRequest) string {
55+
func (a ClustersAPI) GetSmallestNodeType(request NodeTypeRequest) string {
3856
w, _ := a.client.WorkspaceClient()
3957
return smallestNodeType(a.context, request, w)
4058
}
4159

4260
// DataSourceNodeType returns smallest node depedning on the cloud
4361
func DataSourceNodeType() common.Resource {
44-
return common.WorkspaceData(func(ctx context.Context, data *compute.NodeTypeRequest, w *databricks.WorkspaceClient) error {
62+
return common.WorkspaceDataWithCustomizeFunc(func(ctx context.Context, data *NodeTypeRequest, w *databricks.WorkspaceClient) error {
4563
data.Id = smallestNodeType(ctx, *data, w)
4664
log.Printf("[DEBUG] smallest node: %s", data.Id)
4765
return nil
66+
}, func(s map[string]*schema.Schema) map[string]*schema.Schema {
67+
common.CustomizeSchemaPath(s, "graviton").SetDeprecated("Use `arm` instead")
68+
return s
4869
})
4970
}

clusters/data_node_type_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,7 @@ func TestNodeTypeEmptyList(t *testing.T) {
336336
ID: ".",
337337
}.Apply(t)
338338
assert.NoError(t, err)
339-
assert.Equal(t, "Standard_D3_v2", d.Id())
339+
assert.Equal(t, "Standard_D4ds_v5", d.Id())
340340
}
341341

342342
func TestNodeTypeFleetEmptyList(t *testing.T) {

docs/data-sources/node_type.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,11 @@ Data source allows you to pick groups by the following attributes
5656
* `GPU Accelerated` (AWS, Azure)
5757
* `photon_worker_capable` - (Optional) Pick only nodes that can run Photon workers. Defaults to _false_.
5858
* `photon_driver_capable` - (Optional) Pick only nodes that can run Photon driver. Defaults to _false_.
59-
* `graviton` - (boolean, optional) if we should limit the search only to nodes with AWS Graviton or Azure Cobalt CPUs. Default to _false_.
59+
* `arm` - (boolean, optional) if we should limit the search only to nodes with AWS Graviton or Azure Cobalt CPUs. Default to _false_.
6060
* `fleet` - (boolean, optional) if we should limit the search only to [AWS fleet instance types](https://docs.databricks.com/compute/aws-fleet-instances.html). Default to _false_.
6161
* `is_io_cache_enabled` - (Optional) . Pick only nodes that have IO Cache. Defaults to _false_.
6262
* `support_port_forwarding` - (Optional) Pick only nodes that support port forwarding. Defaults to _false_.
63+
* `graviton` - (Deprecated. boolean, optional) if we should limit the search only to nodes with AWS Graviton or Azure Cobalt CPUs. Default to _false_. *Use `arm` instead!*
6364

6465
## Attribute Reference
6566

storage/mounts.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -147,8 +147,10 @@ func getCommonClusterObject(clustersAPI clusters.ClustersAPI, clusterName string
147147
LongTermSupport: true,
148148
}),
149149
NodeTypeID: clustersAPI.GetSmallestNodeType(
150-
compute.NodeTypeRequest{
151-
LocalDisk: true,
150+
clusters.NodeTypeRequest{
151+
NodeTypeRequest: compute.NodeTypeRequest{
152+
LocalDisk: true,
153+
},
152154
}),
153155
AutoterminationMinutes: 10,
154156
SparkConf: map[string]string{

0 commit comments

Comments
 (0)