You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: app-dev/devops-and-containers/oke/oke-rm/README.md
+2-2Lines changed: 2 additions & 2 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -16,13 +16,13 @@ This stack is used to create the initial network infrastructure for OKE. When co
16
16
* By default, everything is private, but there is the possibility to create public subnets
17
17
* Be careful when modifying the default values, as inputs are not validated
18
18
19
-
[](https://cloud.oracle.com/resourcemanager/stacks/create?zipUrl=https://github.com/oracle-devrel/technology-engineering/releases/download/oke-rm-1.1.3/infra.zip)
19
+
[](https://cloud.oracle.com/resourcemanager/stacks/create?zipUrl=https://github.com/oracle-devrel/technology-engineering/releases/download/oke-rm-1.1.4/infra.zip)
20
20
21
21
## Step 2: Create the OKE control plane
22
22
23
23
This stack is used to create the OKE control plane ONLY.
24
24
25
-
[](https://cloud.oracle.com/resourcemanager/stacks/create?zipUrl=https://github.com/oracle-devrel/technology-engineering/releases/download/oke-rm-1.1.3/oke.zip)
25
+
[](https://cloud.oracle.com/resourcemanager/stacks/create?zipUrl=https://github.com/oracle-devrel/technology-engineering/releases/download/oke-rm-1.1.4/oke.zip)
26
26
27
27
Also note that if the network infrastructure is located in a different compartment than the OKE cluster AND you are planning to use the OCI_VCN_NATIVE CNI,
# Use this cloud init script for Oracle Linux nodes, it is important to expand the boot volume in any case
14
-
cloud_init_ol={
15
-
runcmd =compact([
16
-
local.runcmd_growfs_oracle_linux, # Modify here depending on the OS selected for the worker nodes
17
-
])
18
-
}
19
-
20
-
# UBUNTU NODES: use this cloud init, and make sure to disable the default cloud init
21
-
cloud_init_ubuntu={
22
-
runcmd =compact([
23
-
local.runcmd_bootstrap_ubuntu, # Modify here depending on the OS selected for the worker nodes
24
-
])
25
-
}
26
-
27
12
# Cloud init to taint nodes using Oracle Linux nodes. Make sure to disable the default cloud init
28
13
cloud_init_with_taint_ol={
29
14
runcmd = [
@@ -37,7 +22,7 @@ locals {
37
22
38
23
module"oke" {
39
24
source="oracle-terraform-modules/oke/oci"
40
-
version="5.3.1"
25
+
version="5.3.2"
41
26
compartment_id=var.oke_compartment_id
42
27
# IAM - Policies
43
28
create_iam_autoscaler_policy="never"
@@ -99,142 +84,220 @@ module "oke" {
99
84
# Operator
100
85
create_operator=false
101
86
102
-
# OKE data plane, node workers
103
-
worker_pool_mode="node-pool"
104
-
worker_is_public=false
87
+
# OKE DATA PLANE (to configure)
88
+
89
+
90
+
# These are global configurations valid for all the node pools declared. You can see that the prefix is "worker_" because they apply to all workers of the cluster
91
+
# You can override these global configurations in the node pool definition, and it will have precedence over the global ones.
92
+
93
+
worker_pool_mode="node-pool"# Default mode should be node-pool for managed nodes, other modes are available for self-managed nodes, like instance and instance-pool, but be careful to have the required policy: https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengdynamicgrouppolicyforselfmanagednodes.htm
94
+
worker_is_public=false# Workers should never be allowed to have a public ip
105
95
#ssh_public_key = local.ssh_public_key # De-comment if you want a ssh key to access the worker nodes, be sure to set the local variable
106
-
worker_image_type="oke"# NOTE: Better to use "custom" and specify the image id in a production environment
96
+
worker_image_type="oke"# NOTE: the oke mode will fetch the latest OKE Oracle Linux image released by the OKE team. If you want more control, better to use "custom" and specify the image id. This is because an image id is always fixed, and controlled by you.
107
97
#worker_image_id = "" # The image id to use for the worker nodes. For Oracle Linux images, check this link: https://docs.oracle.com/en-us/iaas/images/oke-worker-node-oracle-linux-8x/index.htm
108
-
# For Ubuntu images, you need to import it in your tenancy, see: https://canonical-oracle.readthedocs-hosted.com/en/latest/oracle-how-to/deploy-oke-nodes-using-ubuntu-images/
109
-
98
+
# For Ubuntu images, you need to create an Ubuntu custom image in your tenancy first, and then set the OCID of the custom image here
110
99
111
100
# Set this to true to enable in-transit encryption on all node pools by default
112
-
# NOTE: in-transit encryption is supported only for paravirtualized attached block volumes (NOT boot volumes), hence you will need to create another StorageClass in the cluster as the default oci-bv StorageClass uses iSCSI
113
-
# Also note that Bare Metal instances do not support paravirtualized volumes, so do not enable this in node pools that require BM instances
114
-
worker_pv_transit_encryption=false
101
+
# NOTE: in-transit encryption is supported only for paravirtualized attached block volumes and boot volumes, hence you will need to create another StorageClass in the cluster to attach volume through paravirtualization, as the default oci-bv StorageClass uses iSCSI
102
+
# Also note that Bare Metal instances do not support paravirtualized volumes, the oke module won't enable it on BM shapes, even if you set this to true
103
+
worker_pv_transit_encryption=true
104
+
115
105
# Enable encryption of volumes with a key managed by you, in your OCI Vault
# When using OCI_VCN_NATIVE_CNI, set the maximum number of pods for all nodes, must be between 1 and 110
110
+
119
111
#max_pods_per_node = 31
120
112
121
-
worker_disable_default_cloud_init=false# If set to true, will let you full control over the cloud init, set it when using ubuntu nodes or nodes with taints (can even be set individually at the node pool level)
122
-
worker_cloud_init=[{ content_type ="text/cloud-config", content =yamlencode(local.cloud_init_ol)}] # Cloud init is different, depending if you are using Ubuntu or Oracle Linux nodes. You can also set taints with the cloud init
113
+
#worker_cloud_init = [{ content_type = "text/cloud-config", content = yamlencode(local.cloud_init_with_taint_ol)}] # Cloud init to add to all node pools. This will be added to the default_cloud_init
114
+
115
+
/* ABOUT CLOUD INIT
116
+
The OKE module will automatically generate an optimal cloud-init for both Oracle Linux and Ubuntu nodes. This auto-generated cloud-init is called "default cloud-init".
117
+
There is the possibility to disable this and to define your own cloud-init. This is not suggested unless you know what you are doing.
118
+
For Oracle Linux, the oci-growfs command is already inserted in the default cloud-init.
119
+
*/
123
120
124
121
# GLOBAL TAGS TO BE APPLIED ON ALL NODES
125
122
# NOTE: tags will be applied to both the node pool and the nodes
123
+
126
124
/*workers_freeform_tags = {
127
125
"oke-cluster-name" = var.cluster_name
128
126
}
129
127
workers_defined_tags = {}
130
128
*/
131
129
132
130
# GLOBAL NODE POOL LABELS TO BE APPLIED ON ALL NODES (Kubernetes labels)
131
+
133
132
#worker_node_labels = {}
134
133
134
+
# This is a collection of example node pools that you can use with the OKE module. Set create = true to provision them
135
135
worker_pools={
136
136
137
-
#SAMPLE NODE POOL, SET create = true TO PROVISION IT
137
+
#ORACLE LINUX - MANAGED NODE POOL
138
138
np-ad1 = {
139
139
shape ="VM.Standard.E4.Flex"
140
140
size =1
141
-
kubernetes_version = var.kubernetes_version # You can set this value as fixed, so that control plane and data plane are upgraded separately
141
+
kubernetes_version = var.kubernetes_version # You can set this variable with a constant, so that control plane and data plane are upgraded separately
142
142
placement_ads = ["1"] # As best practice, one node pool should be associated only to one specific AD
143
143
ocpus =1# No need to specify ocpus and memory if you are not using a Flex shape
144
144
memory =16
145
-
#image_type = "custom"
146
-
#image_id = "" # You can override global worker node parameters individually in the node pool
147
145
node_cycling_enabled =false# Option to enable/disable node pool cycling through Terraform. Only works with Enhanced clusters!
148
146
node_cycling_max_surge ="50%"
149
147
node_cycling_max_unavailable ="25%"
150
-
151
-
node_cycling_mode = ["boot_volume"] # Valid values are instance and boot_volume. Only works when (kubernetes_version, image_id, boot_volume_size, node_metadata, ssh_public_key, volume_kms_key_id) are modified. If you need to change something else, switch to instance
148
+
node_cycling_mode = ["boot_volume"] # Valid values are instance and boot_volume. Only works when (kubernetes_version, image_id, boot_volume_size, node_metadata, ssh_public_key, volume_kms_key_id) are modified. If you need to change something else, switch to "instance"
152
149
# NOTE: boot_volume mode seems to work only for Flannel clusters for now
153
-
boot_volume_size =100# For Oracle Linux, make sure the oci-growfs command is specified in the cloud-init script. This module already implements this
150
+
boot_volume_size =100
154
151
freeform_tags = { # Nodes in the node pool will be tagged with these freeform tags
155
152
"oke-cluster-name"= var.cluster_name
156
153
}
157
154
# max_pods_per_node = 10 # When using VCN_NATIVE CNI, configure maximum number of pods for each node in the node pool
158
-
ignore_initial_pool_size =false# If set to true, node pool size drift won't be accounted in Terraform, useful also if this pool is autoscaled by an external component (cluster-autoscaler) or manually by a user
159
-
create =false# Set it to true so that the node pool is created
155
+
create =false# Set it to true so that the node pool is created
160
156
}
161
157
162
-
#SYSTEM NODE POOL TO BE ENABLED FOR THE CLUSTER AUTOSCALER
163
-
np-system-ad1 = {
158
+
#UBUNTU - MANAGED NODE POOL
159
+
np-ad1-ubuntu= {
164
160
shape ="VM.Standard.E4.Flex"
165
161
size =1
162
+
kubernetes_version = var.kubernetes_version
166
163
placement_ads = ["1"]
167
164
ocpus =1
168
165
memory =16
169
-
node_cycling_enabled =true# Only works with Enhanced clusters!
166
+
# NOTE! The OKE module will automatically verify the image and install the OKE Ubuntu Node package. You just need to create a custom image based on Ubuntu 22.04 or 24.04. Ubuntu Minimal is recommended
167
+
image_type ="custom"
168
+
image_id ="ocid1.image.oc1..."# Put your custom Ubuntu image here
169
+
node_cycling_enabled =false
170
170
node_cycling_max_surge ="50%"
171
171
node_cycling_max_unavailable ="25%"
172
172
node_cycling_mode = ["boot_volume"]
173
-
node_labels = {
174
-
role ="system"
175
-
}
173
+
# NOTE! Make sure you create the original Ubuntu VM with a boot volume of size 50 (the default). Depending on the boot volume size of the original VM, the custom image will require that minimum storage
174
+
boot_volume_size =100
176
175
create =false
177
176
}
178
177
179
178
180
-
#SAMPLE NODE POOL WITH A CLOUD INIT TO SET NODE TAINTS
181
-
np-taints = { # An example of a node pool using a custom cloud-init script to define taints at the node pool level
182
-
shape ="VM.Standard.E4.Flex"# No need to specify ocpus and memory if you are not using a Flex shape
179
+
#ORACLE LINUX - MANAGED NODE POOL WITH TAINTS
180
+
np-ad1-taints = { # An example of a node pool using a custom cloud-init script to define taints at the node pool level
181
+
shape ="VM.Standard.E4.Flex"
183
182
size =1
184
-
placement_ads = ["1"]# As best practice, one node pool should be associated only to one specific AD
185
-
ocpus =2
183
+
placement_ads = ["1"]
184
+
ocpus =1
186
185
memory =16
187
-
disable_default_cloud_init =true# If you want to configure some kubelet arguments, make sure to disable the default cloud-init and remember to include it in your custom cloud-init
186
+
disable_default_cloud_init =true# If you want to configure some kubelet arguments, make sure to disable the default cloud-init as the taints are defined through kubelet extra arguments
#burst = "BASELINE_1_2" # Valid values BASELINE_1_8,BASELINE_1_2, only for Flex shapes!
214
+
215
+
# Enable/disable compute plugins
216
+
agent_config = {
217
+
are_all_plugins_disabled =false
218
+
is_management_disabled =false
219
+
is_monitoring_disabled =false
220
+
plugins_config = {
221
+
"Bastion"="DISABLED"
222
+
"Block Volume Management"="DISABLED"
223
+
"Compute HPC RDMA Authentication"="DISABLED"
224
+
"Compute HPC RDMA Auto-Configuration"="DISABLED"
225
+
"Compute Instance Monitoring"="ENABLED"
226
+
"Compute Instance Run Command"="DISABLED"
227
+
"Compute RDMA GPU Monitoring"="DISABLED"
228
+
"Custom Logs Monitoring"="DISABLED"
229
+
"Management Agent"="DISABLED"
230
+
"Oracle Autonomous Linux"="DISABLED"
231
+
"OS Management Service Agent"="DISABLED"
232
+
}
233
+
}
234
+
235
+
create =false
236
+
}
197
237
198
-
# SAMPLE AUTOSCALED NODE POOL
199
-
# This is a sample pool where autoscaling is enabled, note the freeform tag
200
-
# REQUIREMENTS FOR ENABLING THE CLUSTER AUTOSCALER
201
-
# - THE CLUSTER AUTOSCALER ADDON MUST BE ENABLED
202
-
# - POLICIES MUST BE IN PLACE FOR THE CLUSTER AUTOSCALER
203
-
# - THE SYSTEM NODE POOL MUST BE CREATED, will feature nodes labelled with role:system
204
-
# - THE "override_coredns" local variable must be set to true in addons.tf
205
-
# - NODE POOLS with freeform_tags cluster_autoscaler = "enabled" will be autoscaled
238
+
239
+
# CLUSTER AUTOSCALER
240
+
241
+
# ORACLE LINUX SYSTEM NODES - MANAGED NODE POOL
242
+
np-system-ad1 = {
243
+
shape ="VM.Standard.E4.Flex"
244
+
size =1
245
+
placement_ads = ["1"]
246
+
ocpus =1
247
+
memory =16
248
+
node_cycling_enabled =false
249
+
node_cycling_max_surge ="50%"
250
+
node_cycling_max_unavailable ="25%"
251
+
node_cycling_mode = ["boot_volume"]
252
+
node_labels = {
253
+
role ="system"
254
+
}
255
+
create =false
256
+
}
257
+
258
+
259
+
# ORACLE LINUX AUTOSCALED - MANAGED NODE POOL
260
+
/* This is a sample pool where autoscaling is enabled, note the freeform tag
261
+
REQUIREMENTS FOR ENABLING THE CLUSTER AUTOSCALER
262
+
- THE CLUSTER AUTOSCALER ADDON MUST BE ENABLED
263
+
- POLICIES MUST BE IN PLACE FOR THE CLUSTER AUTOSCALER
264
+
- THE SYSTEM NODE POOL MUST BE CREATED, will feature nodes labelled with role:system
265
+
- THE "override_coredns" local variable must be set to true in addons.tf
266
+
- NODE POOLS with freeform_tags cluster_autoscaler = "enabled" will be autoscaled
267
+
- NODE POOL IS A MANAGED TYPE, CLUSTER AUTOSCALER DOES NOT WORK WITH SELF-MANAGED WORKER POOLS!
268
+
*/
206
269
np-autoscaled-ad1 = {
207
270
shape ="VM.Standard.E4.Flex"
208
271
size =0
209
272
placement_ads = ["1"]
210
273
ocpus =1
211
274
memory =16
212
-
node_cycling_enabled =true
275
+
node_cycling_enabled =false
213
276
node_cycling_max_surge ="50%"
214
277
node_cycling_max_unavailable ="25%"
215
278
node_cycling_mode = ["boot_volume"]
216
279
boot_volume_size =100
217
-
ignore_initial_pool_size =true
280
+
ignore_initial_pool_size =true# If set to true, node pool size drift won't be accounted in Terraform, useful also if this pool is autoscaled by an external component (cluster-autoscaler) or manually by a user
218
281
freeform_tags = {
219
282
cluster_autoscaler ="enabled"
220
283
}
221
284
create =false
222
285
}
223
286
224
-
#SAMPLE AUTOSCALED PREEMPTIBLE NODE POOL
287
+
#ORACLE LINUX AUTOSCALED PREEMPTIBLE - MANAGED NODE POOL
225
288
# Often, to save money it makes sense to provision preemptible instances, as autoscaled node pools are already very dynamic
226
289
np-autoscaled-preemptible-ad1 = {
227
290
shape ="VM.Standard.E4.Flex"
228
291
size =1
229
292
placement_ads = ["1"]
230
293
ocpus =1
231
294
memory =16
232
-
node_cycling_enabled =true
295
+
node_cycling_enabled =false
233
296
node_cycling_max_surge ="50%"
234
297
node_cycling_max_unavailable ="25%"
235
298
node_cycling_mode = ["boot_volume"]
236
-
boot_volume_size =70
237
-
ignore_initial_pool_size =true
299
+
boot_volume_size =100
300
+
ignore_initial_pool_size =true# If set to true, node pool size drift won't be accounted in Terraform, useful also if this pool is autoscaled by an external component (cluster-autoscaler) or manually by a user
0 commit comments