Skip to content

Commit 6239f24

Browse files
Implement 'no-limit' and 'max' options for CPU/Memory (#183)
* implement no-limit and max for memory * remove unused transform/validate functions * update test output * add tests for memory util * regenerate schema * ensure quotes are escaped during build * revert cpu type temporarily * feat: Add CPU max and no-limit support with new state utility function * fix: Parse CPU string to integer in GetCPUs function * fix: Handle negative CPU count and error cases in GetCPUs function * chore: Import fmt package for potential error formatting in CPU utility * feat: Add memory converter and validator utility functions with comprehensive tests * feat: Update minikube cluster schema with memory and CPU type improvements * test: Add unit tests for memory and CPU configuration scenarios in minikube cluster * test: Update test configurations for memory and CPU limit scenarios * test: Enhance cluster tests with memory and CPU configurations, integrating new state utilities for validation and conversion
1 parent 5aaebe0 commit 6239f24

11 files changed

+770
-45
lines changed

minikube/generator/schema_builder.go

+8-6
Original file line numberDiff line numberDiff line change
@@ -58,10 +58,10 @@ var updateFields = []string{
5858
var schemaOverrides map[string]SchemaOverride = map[string]SchemaOverride{
5959
"memory": {
6060
Default: "4g",
61-
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>(case-insensitive)], where unit = b, k, kb, m, mb, g or gb)",
61+
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g). Use \\\"max\\\" to use the maximum amount of memory. Use \\\"no-limit\\\" to not specify a limit (Docker/Podman only))",
6262
Type: String,
63-
StateFunc: "state_utils.ResourceSizeConverter()",
64-
ValidateDiagFunc: "state_utils.ResourceSizeValidator()",
63+
StateFunc: "state_utils.MemoryConverter()",
64+
ValidateDiagFunc: "state_utils.MemoryValidator()",
6565
},
6666
"disk_size": {
6767
Default: "20000mb",
@@ -71,9 +71,11 @@ var schemaOverrides map[string]SchemaOverride = map[string]SchemaOverride{
7171
ValidateDiagFunc: "state_utils.ResourceSizeValidator()",
7272
},
7373
"cpus": {
74-
Default: "2",
75-
Description: "Amount of CPUs to allocate to Kubernetes",
76-
Type: Int,
74+
Default: "2",
75+
Description: "Number of CPUs allocated to Kubernetes. Use \\\"max\\\" to use the maximum number of CPUs. Use \\\"no-limit\\\" to not specify a limit (Docker/Podman only)",
76+
Type: String,
77+
StateFunc: "state_utils.CPUConverter()",
78+
ValidateDiagFunc: "state_utils.CPUValidator()",
7779
},
7880
// Customize the description to be the fullset of drivers
7981
"driver": {

minikube/generator/schema_builder_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -360,14 +360,14 @@ func TestOverride(t *testing.T) {
360360
assert.Equal(t, header+`
361361
"memory": {
362362
Type: schema.TypeString,
363-
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>(case-insensitive)], where unit = b, k, kb, m, mb, g or gb)",
363+
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g). Use \"max\" to use the maximum amount of memory. Use \"no-limit\" to not specify a limit (Docker/Podman only))",
364364
365365
Optional: true,
366366
ForceNew: true,
367367
368368
Default: "4g",
369-
StateFunc: state_utils.ResourceSizeConverter(),
370-
ValidateDiagFunc: state_utils.ResourceSizeValidator(),
369+
StateFunc: state_utils.MemoryConverter(),
370+
ValidateDiagFunc: state_utils.MemoryValidator(),
371371
},
372372
373373
}

minikube/lib/memory.go

+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
package lib
2+
3+
import (
4+
"k8s.io/minikube/pkg/minikube/machine"
5+
)
6+
7+
8+
var NoLimit = "no-limit"
9+
var Max = "max"
10+
11+
// MemoryInfo holds system and container memory information
12+
type MemoryInfo struct {
13+
SystemMemory int
14+
}
15+
16+
// GetMemoryLimits returns the amount of memory allocated to the system and container
17+
// The return values are in MiB
18+
func GetMemoryLimit() (*MemoryInfo, error) {
19+
info, _, memErr, _ := machine.LocalHostInfo()
20+
21+
if memErr != nil {
22+
return nil, memErr
23+
}
24+
25+
// Subtract 1gb for overhead
26+
memInfo := &MemoryInfo{
27+
SystemMemory: int(info.Memory) - 1024,
28+
}
29+
30+
return memInfo, nil
31+
}
32+

minikube/resource_cluster.go

+8-2
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,13 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste
271271
}
272272

273273
memoryStr := d.Get("memory").(string)
274-
memoryMb, err := pkgutil.CalculateSizeInMB(memoryStr)
274+
memoryMb, err := state_utils.GetMemory(memoryStr)
275+
if err != nil {
276+
return nil, err
277+
}
278+
279+
cpuStr := d.Get("cpus").(string)
280+
cpus, err := state_utils.GetCPUs(cpuStr)
275281
if err != nil {
276282
return nil, err
277283
}
@@ -380,7 +386,7 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste
380386
KicBaseImage: d.Get("base_image").(string),
381387
Network: d.Get("network").(string),
382388
Memory: memoryMb,
383-
CPUs: d.Get("cpus").(int),
389+
CPUs: cpus,
384390
DiskSize: diskMb,
385391
Driver: driver,
386392
ListenAddress: d.Get("listen_address").(string),

minikube/resource_cluster_test.go

+111-10
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import (
1515
"time"
1616

1717
"github.com/scott-the-programmer/terraform-provider-minikube/minikube/lib"
18+
"github.com/scott-the-programmer/terraform-provider-minikube/minikube/state_utils"
1819

1920
"github.com/golang/mock/gomock"
2021
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
@@ -35,12 +36,14 @@ type mockClusterClientProperties struct {
3536
haNodes int
3637
workerNodes int
3738
diskSize int
39+
memory string
40+
cpu string
3841
}
3942

4043
func TestClusterCreation(t *testing.T) {
4144
resource.Test(t, resource.TestCase{
4245
IsUnitTest: true,
43-
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreation", 1, 0, 20000}))},
46+
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreation", 1, 0, 20000, "4096mb", "1"}))},
4447
Steps: []resource.TestStep{
4548
{
4649
Config: testUnitClusterConfig("some_driver", "TestClusterCreation"),
@@ -55,7 +58,7 @@ func TestClusterCreation(t *testing.T) {
5558
func TestClusterUpdate(t *testing.T) {
5659
resource.Test(t, resource.TestCase{
5760
IsUnitTest: true,
58-
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockUpdate(mockClusterClientProperties{t, "TestClusterUpdate", 1, 0, 20000}))},
61+
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockUpdate(mockClusterClientProperties{t, "TestClusterUpdate", 1, 0, 20000, "4096mb", "1"}))},
5962
Steps: []resource.TestStep{
6063
{
6164
Config: testUnitClusterConfig("some_driver", "TestClusterUpdate"),
@@ -73,7 +76,7 @@ func TestClusterUpdate(t *testing.T) {
7376
func TestClusterHA(t *testing.T) {
7477
resource.Test(t, resource.TestCase{
7578
IsUnitTest: true,
76-
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationHA", 3, 5, 20000}))},
79+
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationHA", 3, 5, 20000, "4096mb", "1"}))},
7780
Steps: []resource.TestStep{
7881
{
7982
Config: testUnitClusterHAConfig("some_driver", "TestClusterCreationHA"),
@@ -85,7 +88,7 @@ func TestClusterHA(t *testing.T) {
8588
func TestClusterDisk(t *testing.T) {
8689
resource.Test(t, resource.TestCase{
8790
IsUnitTest: true,
88-
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationDisk", 1, 0, 20480}))},
91+
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationDisk", 1, 0, 20480, "4096mb", "1"}))},
8992
Steps: []resource.TestStep{
9093
{
9194
Config: testUnitClusterDiskConfig("some_driver", "TestClusterCreationDisk"),
@@ -97,7 +100,7 @@ func TestClusterDisk(t *testing.T) {
97100
func TestClusterWait(t *testing.T) {
98101
resource.Test(t, resource.TestCase{
99102
IsUnitTest: true,
100-
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationWait", 1, 0, 20000}))},
103+
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationWait", 1, 0, 20000, "4096mb", "1"}))},
101104
Steps: []resource.TestStep{
102105
{
103106
Config: testUnitClusterWaitConfig("some_driver", "TestClusterCreationWait"),
@@ -326,10 +329,58 @@ func TestClusterCreation_HyperV(t *testing.T) {
326329
})
327330
}
328331

332+
func TestClusterNoLimitMemory(t *testing.T) {
333+
resource.Test(t, resource.TestCase{
334+
IsUnitTest: true,
335+
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterNoLimitMemory", 1, 0, 20000, "no-limit", "1"}))},
336+
Steps: []resource.TestStep{
337+
{
338+
Config: testUnitClusterNoLimitMemoryConfig("some_driver", "TestClusterNoLimitMemory"),
339+
},
340+
},
341+
})
342+
}
343+
344+
func TestClusterMaxMemory(t *testing.T) {
345+
resource.Test(t, resource.TestCase{
346+
IsUnitTest: true,
347+
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterMaxMemory", 1, 0, 20000, "max", "1"}))},
348+
Steps: []resource.TestStep{
349+
{
350+
Config: testUnitClusterMaxMemoryConfig("some_driver", "TestClusterMaxMemory"),
351+
},
352+
},
353+
})
354+
}
355+
356+
func TestClusterNoLimitCPU(t *testing.T) {
357+
resource.Test(t, resource.TestCase{
358+
IsUnitTest: true,
359+
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterNoLimitCPU", 1, 0, 20000, "4096mb", "no-limit"}))},
360+
Steps: []resource.TestStep{
361+
{
362+
Config: testUnitClusterNoLimitCPUConfig("some_driver", "TestClusterNoLimitCPU"),
363+
},
364+
},
365+
})
366+
}
367+
368+
func TestClusterMaxCPU(t *testing.T) {
369+
resource.Test(t, resource.TestCase{
370+
IsUnitTest: true,
371+
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterMaxCPU", 1, 0, 20000, "4096mb", "max"}))},
372+
Steps: []resource.TestStep{
373+
{
374+
Config: testUnitClusterMaxCPUConfig("some_driver", "TestClusterMaxCPU"),
375+
},
376+
},
377+
})
378+
}
379+
329380
func mockUpdate(props mockClusterClientProperties) schema.ConfigureContextFunc {
330381
ctrl := gomock.NewController(props.t)
331382

332-
mockClusterClient := getBaseMockClient(ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize)
383+
mockClusterClient := getBaseMockClient(props.t, ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize, props.memory, props.cpu)
333384

334385
gomock.InOrder(
335386
mockClusterClient.EXPECT().
@@ -366,7 +417,7 @@ func mockUpdate(props mockClusterClientProperties) schema.ConfigureContextFunc {
366417
func mockSuccess(props mockClusterClientProperties) schema.ConfigureContextFunc {
367418
ctrl := gomock.NewController(props.t)
368419

369-
mockClusterClient := getBaseMockClient(ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize)
420+
mockClusterClient := getBaseMockClient(props.t, ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize, props.memory, props.cpu)
370421

371422
mockClusterClient.EXPECT().
372423
GetAddons().
@@ -384,7 +435,7 @@ func mockSuccess(props mockClusterClientProperties) schema.ConfigureContextFunc
384435
return configureContext
385436
}
386437

387-
func getBaseMockClient(ctrl *gomock.Controller, clusterName string, haNodes int, workerNodes int, diskSize int) *lib.MockClusterClient {
438+
func getBaseMockClient(t *testing.T, ctrl *gomock.Controller, clusterName string, haNodes int, workerNodes int, diskSize int, memory string, cpu string) *lib.MockClusterClient {
388439
mockClusterClient := lib.NewMockClusterClient(ctrl)
389440

390441
os.Mkdir("test_output", 0755)
@@ -424,6 +475,16 @@ func getBaseMockClient(ctrl *gomock.Controller, clusterName string, haNodes int,
424475
Worker: true,
425476
}
426477

478+
mem, err := state_utils.GetMemory(memory)
479+
if err != nil {
480+
t.Fatalf("Failed to get memory: %v", err)
481+
}
482+
483+
c, err := state_utils.GetCPUs(cpu)
484+
if err != nil {
485+
t.Fatalf("Failed to get cpu: %v", err)
486+
}
487+
427488
cc := config.ClusterConfig{
428489
Name: "terraform-provider-minikube-acc",
429490
APIServerPort: clusterSchema["apiserver_port"].Default.(int),
@@ -432,8 +493,8 @@ func getBaseMockClient(ctrl *gomock.Controller, clusterName string, haNodes int,
432493
MinikubeISO: defaultIso,
433494
KicBaseImage: clusterSchema["base_image"].Default.(string),
434495
Network: clusterSchema["network"].Default.(string),
435-
Memory: 4096,
436-
CPUs: 2,
496+
Memory: mem,
497+
CPUs: c,
437498
DiskSize: diskSize,
438499
Driver: "some_driver",
439500
ListenAddress: clusterSchema["listen_address"].Default.(string),
@@ -827,3 +888,43 @@ func testPropertyExists(n string, id string) resource.TestCheckFunc {
827888
return nil
828889
}
829890
}
891+
892+
func testUnitClusterNoLimitMemoryConfig(driver string, clusterName string) string {
893+
return fmt.Sprintf(`
894+
resource "minikube_cluster" "new" {
895+
driver = "%s"
896+
cluster_name = "%s"
897+
memory = "no-limit"
898+
}
899+
`, driver, clusterName)
900+
}
901+
902+
func testUnitClusterMaxMemoryConfig(driver string, clusterName string) string {
903+
return fmt.Sprintf(`
904+
resource "minikube_cluster" "new" {
905+
driver = "%s"
906+
cluster_name = "%s"
907+
memory = "max"
908+
}
909+
`, driver, clusterName)
910+
}
911+
912+
func testUnitClusterNoLimitCPUConfig(driver string, clusterName string) string {
913+
return fmt.Sprintf(`
914+
resource "minikube_cluster" "new" {
915+
driver = "%s"
916+
cluster_name = "%s"
917+
cpus = "no-limit"
918+
}
919+
`, driver, clusterName)
920+
}
921+
922+
func testUnitClusterMaxCPUConfig(driver string, clusterName string) string {
923+
return fmt.Sprintf(`
924+
resource "minikube_cluster" "new" {
925+
driver = "%s"
926+
cluster_name = "%s"
927+
cpus = "max"
928+
}
929+
`, driver, clusterName)
930+
}

minikube/schema_cluster.go

+11-9
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ var (
137137
Optional: true,
138138
ForceNew: true,
139139

140-
Default: "gcr.io/k8s-minikube/kicbase:v0.0.45@sha256:81df288595202a317b1a4dc2506ca2e4ed5f22373c19a441b88cfbf4b9867c85",
140+
Default: "gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279",
141141
},
142142

143143
"binary_mirror": {
@@ -191,13 +191,15 @@ var (
191191
},
192192

193193
"cpus": {
194-
Type: schema.TypeInt,
195-
Description: "Amount of CPUs to allocate to Kubernetes",
194+
Type: schema.TypeString,
195+
Description: "Number of CPUs allocated to Kubernetes. Use \"max\" to use the maximum number of CPUs. Use \"no-limit\" to not specify a limit (Docker/Podman only)",
196196

197197
Optional: true,
198198
ForceNew: true,
199199

200-
Default: 2,
200+
Default: "2",
201+
StateFunc: state_utils.CPUConverter(),
202+
ValidateDiagFunc: state_utils.CPUValidator(),
201203
},
202204

203205
"cri_socket": {
@@ -413,7 +415,7 @@ var (
413415

414416
"gpus": {
415417
Type: schema.TypeString,
416-
Description: "Allow pods to use your NVIDIA GPUs. Options include: [all,nvidia] (Docker driver with Docker container-runtime only)",
418+
Description: "Allow pods to use your GPUs. Options include: [all,nvidia,amd] (Docker driver with Docker container-runtime only)",
417419

418420
Optional: true,
419421
ForceNew: true,
@@ -598,7 +600,7 @@ var (
598600

599601
"kubernetes_version": {
600602
Type: schema.TypeString,
601-
Description: "The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.31.0, 'latest' for v1.31.0). Defaults to 'stable'.",
603+
Description: "The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.32.0, 'latest' for v1.32.0). Defaults to 'stable'.",
602604

603605
Optional: true,
604606
ForceNew: true,
@@ -668,14 +670,14 @@ var (
668670

669671
"memory": {
670672
Type: schema.TypeString,
671-
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>(case-insensitive)], where unit = b, k, kb, m, mb, g or gb)",
673+
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g). Use \"max\" to use the maximum amount of memory. Use \"no-limit\" to not specify a limit (Docker/Podman only))",
672674

673675
Optional: true,
674676
ForceNew: true,
675677

676678
Default: "4g",
677-
StateFunc: state_utils.ResourceSizeConverter(),
678-
ValidateDiagFunc: state_utils.ResourceSizeValidator(),
679+
StateFunc: state_utils.MemoryConverter(),
680+
ValidateDiagFunc: state_utils.MemoryValidator(),
679681
},
680682

681683
"mount": {

0 commit comments

Comments
 (0)