3
3
# 1. NEW CLUSTER WITH NEW NOTEBOOKS
4
4
# ------------------------------------------------
5
5
resource "databricks_job" "new_cluster_new_job_new_notebooks" {
6
- for_each = (var. deploy_jobs == true && var. cluster_id == null && var. local_notebooks != null ) ? { for p in var . local_notebooks : " ${ p . job_name } -${ p . local_path } " => p } : {}
6
+ for_each = (var. deploy_jobs == true && var. cluster_id == null && var. deploy_job_cluster == true && var . local_notebooks != null ) ? { for p in var . local_notebooks : " ${ p . job_name } -${ p . local_path } " => p } : {}
7
7
8
8
name = " ${ each . value . job_name } (Terraform managed)"
9
9
10
10
new_cluster {
11
- num_workers = var. num_workers
12
- spark_version = data. databricks_spark_version . latest . id
13
- node_type_id = join (" " , data. databricks_node_type . cluster_node_type . * . id )
11
+ policy_id = var. cluster_policy_id == null && var. deploy_cluster_policy == false ? null : local. cluster_policy_id
12
+ spark_version = var. spark_version != null ? var. spark_version : data. databricks_spark_version . latest . id
13
+ node_type_id = var. deploy_worker_instance_pool != true ? local. worker_node_type : null
14
+ instance_pool_id = var. deploy_worker_instance_pool == true ? join (" " , databricks_instance_pool. worker_instance_nodes . * . id ) : null
15
+ driver_node_type_id = var. deploy_worker_instance_pool != true ? local. driver_node_type : null
16
+ num_workers = var. fixed_value != null ? var. fixed_value : null
17
+
18
+ dynamic "autoscale" {
19
+ for_each = var. auto_scaling != null ? [var . auto_scaling ] : []
20
+ content {
21
+ min_workers = autoscale. value [0 ]
22
+ max_workers = autoscale. value [1 ]
23
+ }
24
+ }
25
+
26
+ dynamic "aws_attributes" {
27
+ for_each = var. aws_attributes == null ? [] : [var . aws_attributes ]
28
+ content {
29
+ instance_profile_arn = var. add_instance_profile_to_workspace == true ? join (" " , databricks_instance_profile. shared . * . id ) : lookup (aws_attributes. value , " instance_profile_arn" , null )
30
+ zone_id = lookup (aws_attributes. value , " zone_id" , null )
31
+ first_on_demand = lookup (aws_attributes. value , " first_on_demand" , null )
32
+ availability = lookup (aws_attributes. value , " availability" , null )
33
+ spot_bid_price_percent = lookup (aws_attributes. value , " spot_bid_price_percent " , null )
34
+ ebs_volume_count = lookup (aws_attributes. value , " ebs_volume_count " , null )
35
+ ebs_volume_size = lookup (aws_attributes. value , " ebs_volume_size " , null )
36
+ }
37
+ }
38
+
39
+ autotermination_minutes = var. cluster_autotermination_minutes
40
+ custom_tags = var. custom_tags != null ? merge (var. custom_tags , local. shared_tags ) : merge (local. shared_tags )
41
+
42
+ spark_conf = var. spark_conf
14
43
}
15
44
16
45
notebook_task {
@@ -43,18 +72,51 @@ resource "databricks_job" "new_cluster_new_job_new_notebooks" {
43
72
}
44
73
}
45
74
}
46
-
47
75
# ------------------------------------------------
48
- # 2. EXISTING CLUSTER WITH NEW NOTEBOOKS
76
+ # 2. NEW CLUSTER WITH EXITING NOTEBOOKS
49
77
# ------------------------------------------------
50
- resource "databricks_job" "existing_cluster_new_job_new_notebooks " {
51
- for_each = (var. deploy_jobs == true && var. cluster_id != null && var. local_notebooks != null ) ? { for p in var . local_notebooks : " ${ p . job_name } -${ p . local_path } " => p } : {}
78
+ resource "databricks_job" "new_cluster_new_job_existing_notebooks " {
79
+ for_each = (var. deploy_jobs == true && var. cluster_id == null && var. deploy_job_cluster == true && var . remote_notebooks != null ) ? { for p in var . remote_notebooks : " ${ p . job_name } -${ p . path } " => p } : {}
52
80
53
- name = " ${ each . value . job_name } (Terraform managed)"
54
- existing_cluster_id = local. cluster_info
81
+ name = " ${ each . value . job_name } (Terraform managed)"
82
+
83
+ new_cluster {
84
+ policy_id = var. cluster_policy_id == null && var. deploy_cluster_policy == false ? null : local. cluster_policy_id
85
+ spark_version = var. spark_version != null ? var. spark_version : data. databricks_spark_version . latest . id
86
+ node_type_id = var. deploy_worker_instance_pool != true ? local. worker_node_type : null
87
+ instance_pool_id = var. deploy_worker_instance_pool == true ? join (" " , databricks_instance_pool. worker_instance_nodes . * . id ) : null
88
+ driver_node_type_id = var. deploy_worker_instance_pool != true ? local. driver_node_type : null
89
+ num_workers = var. fixed_value != null ? var. fixed_value : null
90
+
91
+ dynamic "autoscale" {
92
+ for_each = var. auto_scaling != null ? [var . auto_scaling ] : []
93
+ content {
94
+ min_workers = autoscale. value [0 ]
95
+ max_workers = autoscale. value [1 ]
96
+ }
97
+ }
98
+
99
+ dynamic "aws_attributes" {
100
+ for_each = var. aws_attributes == null ? [] : [var . aws_attributes ]
101
+ content {
102
+ instance_profile_arn = var. add_instance_profile_to_workspace == true ? join (" " , databricks_instance_profile. shared . * . id ) : lookup (aws_attributes. value , " instance_profile_arn" , null )
103
+ zone_id = lookup (aws_attributes. value , " zone_id" , null )
104
+ first_on_demand = lookup (aws_attributes. value , " first_on_demand" , null )
105
+ availability = lookup (aws_attributes. value , " availability" , null )
106
+ spot_bid_price_percent = lookup (aws_attributes. value , " spot_bid_price_percent " , null )
107
+ ebs_volume_count = lookup (aws_attributes. value , " ebs_volume_count " , null )
108
+ ebs_volume_size = lookup (aws_attributes. value , " ebs_volume_size " , null )
109
+ }
110
+ }
111
+
112
+ autotermination_minutes = var. cluster_autotermination_minutes
113
+ custom_tags = var. custom_tags != null ? merge (var. custom_tags , local. shared_tags ) : merge (local. shared_tags )
114
+
115
+ spark_conf = var. spark_conf
116
+ }
55
117
56
118
notebook_task {
57
- notebook_path = lookup (each. value , " path" , " ${ data . databricks_current_user . me . home } / ${ each . value . job_name } " )
119
+ notebook_path = lookup (each. value , " path" )
58
120
base_parameters = var. task_parameters
59
121
}
60
122
@@ -83,22 +145,18 @@ resource "databricks_job" "existing_cluster_new_job_new_notebooks" {
83
145
}
84
146
}
85
147
}
148
+
86
149
# ------------------------------------------------
87
- # 3. NEW CLUSTER WITH EXITING NOTEBOOKS
150
+ # 3. EXISTING CLUSTER WITH NEW NOTEBOOKS
88
151
# ------------------------------------------------
89
- resource "databricks_job" "new_cluster_new_job_existing_notebooks" {
90
- for_each = (var. deploy_jobs == true && var. cluster_id == null && var. remote_notebooks != null ) ? { for p in var . remote_notebooks : " ${ p . job_name } -${ p . path } " => p } : {}
91
-
92
- name = " ${ each . value . job_name } (Terraform managed)"
152
+ resource "databricks_job" "existing_cluster_new_job_new_notebooks" {
153
+ for_each = (var. deploy_jobs == true && (var. cluster_id != null || var. deploy_cluster == true ) && var. local_notebooks != null ) ? { for p in var . local_notebooks : " ${ p . job_name } -${ p . local_path } " => p } : {}
93
154
94
- new_cluster {
95
- num_workers = var. num_workers
96
- spark_version = data. databricks_spark_version . latest . id
97
- node_type_id = join (" " , data. databricks_node_type . cluster_node_type . * . id )
98
- }
155
+ name = " ${ each . value . job_name } (Terraform managed)"
156
+ existing_cluster_id = local. cluster_info
99
157
100
158
notebook_task {
101
- notebook_path = lookup (each. value , " path" )
159
+ notebook_path = lookup (each. value , " path" , " ${ data . databricks_current_user . me . home } / ${ each . value . job_name } " )
102
160
base_parameters = var. task_parameters
103
161
}
104
162
@@ -132,7 +190,7 @@ resource "databricks_job" "new_cluster_new_job_existing_notebooks" {
132
190
# 4. EXISTING CLUSTER WITH EXITING NOTEBOOKS
133
191
# ------------------------------------------------
134
192
resource "databricks_job" "existing_cluster_new_job_existing_notebooks" {
135
- for_each = ( var. deploy_jobs == true && var. cluster_id != null && var. remote_notebooks != null ) ? { for p in var . remote_notebooks : " ${ p . job_name } -${ p . path } " => p } : {}
193
+ for_each = var. deploy_jobs == true && ( var. cluster_id != null || var . deploy_cluster == true ) && var. remote_notebooks != null ? { for p in var . remote_notebooks : " ${ p . job_name } -${ p . path } " => p } : {}
136
194
137
195
name = " ${ each . value . job_name } (Terraform managed)"
138
196
existing_cluster_id = local. cluster_info
@@ -166,4 +224,4 @@ resource "databricks_job" "existing_cluster_new_job_existing_notebooks" {
166
224
pause_status = lookup (schedule. value , " pause_status" , null )
167
225
}
168
226
}
169
- }
227
+ }
0 commit comments