Skip to content

Commit 8c5d18f

Browse files
authored
fix: remove multi-az flag from the cluster scale command, as that is a (#92)
create-only field
1 parent 1f9fda3 commit 8c5d18f

2 files changed

Lines changed: 5 additions & 43 deletions

File tree

internal/cmd/cluster/scale.go

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@ match.`,
5858
cmd.Flags().Var(new(resource.ByteQuantity), "ram", "RAM per node (e.g. \"8\", \"8G\", \"8Gi\", or \"8GiB\")")
5959
cmd.Flags().Var(new(resource.ByteQuantity), "disk", "Total disk size per node (e.g. \"200GiB\"); if larger than the node's included disk, the difference is provisioned as additional storage")
6060
cmd.Flags().Var(new(resource.Millicores), "gpu", "Number of GPUs per node (e.g. \"1\", \"2\", or \"1000m\")")
61-
cmd.Flags().Bool("multi-az", false, "Schedule nodes in multiple availability zones")
6261
cmd.Flags().Bool("wait", false, "Wait for the cluster to become healthy")
6362
cmd.Flags().Duration("wait-timeout", 10*time.Minute, "Maximum time to wait for cluster health")
6463
cmd.Flags().Duration("wait-poll-interval", 5*time.Second, "How often to poll for cluster health")
@@ -150,17 +149,12 @@ match.`,
150149
}
151150

152151
multiAz := currentPkg.GetPackage().GetMultiAz()
153-
if cmd.Flags().Changed("multi-az") {
154-
multiAz, _ = cmd.Flags().GetBool("multi-az")
155-
}
156152

157153
// If no resource flags changed, keep the current package — avoids a
158154
// ListPackages round-trip and prevents spurious failures when the current
159155
// package is deprecated or shares specs with another active package.
160156
var newPkg *bookingv1.Package
161-
if cmd.Flags().Changed("cpu") || cmd.Flags().Changed("ram") || cmd.Flags().Changed("gpu") || cmd.Flags().Changed("multi-az") {
162-
// scale doesn't allow changing multi-az so any new package selected needs to
163-
// use the same multi-az value
157+
if util.AnyFlagChanged(cmd, []string{"cpu", "ram", "gpu"}) {
164158
newPkg, err = clusterutil.ResolvePackageByResources(ctx, client.Booking(), clusterutil.PackageResourceQuery{
165159
AccountID: accountID,
166160
CloudProvider: cluster.CloudProviderId,
@@ -336,13 +330,12 @@ func scaleConfirmPrompt(
336330
}
337331

338332
prompt := fmt.Sprintf(
339-
"Cluster %s (%s) will be scaled to:\n Nodes: %s\n CPU: %s\n RAM: %s\n Disk: %s\n Multi AZ: %s",
333+
"Cluster %s (%s) will be scaled to:\n Nodes: %s\n CPU: %s\n RAM: %s\n Disk: %s",
340334
cluster.GetId(), cluster.GetName(),
341335
output.DiffValue(fmt.Sprintf("%d", oldNodes), fmt.Sprintf("%d", cluster.Configuration.NumberOfNodes)),
342336
output.DiffValue(oldRC.GetCpu(), newRC.GetCpu()),
343337
output.DiffValue(oldRC.GetRam(), newRC.GetRam()),
344338
diskLine,
345-
output.DiffValue(output.BoolYesNo(oldPkg.GetMultiAz()), output.BoolYesNo(newPkg.GetMultiAz())),
346339
)
347340
if oldRC.GetGpu() != "" || newRC.GetGpu() != "" {
348341
prompt += fmt.Sprintf("\n GPU: %s", output.DiffValue(oldRC.GetGpu(), newRC.GetGpu()))

internal/cmd/cluster/scale_test.go

Lines changed: 3 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -55,19 +55,6 @@ func newPkg(id, cpu, ram, disk string) *bookingv1.Package {
5555
}
5656
}
5757

58-
func newMultiAzPkg(id, cpu, ram, disk string) *bookingv1.Package {
59-
return &bookingv1.Package{
60-
Id: id,
61-
Name: id,
62-
ResourceConfiguration: &bookingv1.ResourceConfiguration{
63-
Cpu: cpu,
64-
Ram: ram,
65-
Disk: disk,
66-
},
67-
MultiAz: true,
68-
}
69-
}
70-
7158
type scaleEnv struct {
7259
cluster *clusterv1.Cluster
7360
currentPkg *bookingv1.Package
@@ -237,36 +224,18 @@ func TestScale_AbortWithoutForce(t *testing.T) {
237224
assert.Equal(t, 0, env.Server.UpdateClusterCalls.Count())
238225
}
239226

240-
func TestScale_ConfirmPromptShowsDiskAndMultiAzCorrectly(t *testing.T) {
227+
func TestScale_ConfirmPromptShowsDiskCorrectly(t *testing.T) {
241228
env := testutil.NewTestEnv(t)
242229
setupScale(env, scaleEnv{
243230
cluster: baseCluster(),
244231
currentPkg: newPkg(pkgID1, "1000m", "4GiB", "50GiB"),
245-
newPkg: newMultiAzPkg(pkgID2, "2000m", "4GiB", "50GiB"),
232+
newPkg: newPkg(pkgID2, "2000m", "4GiB", "50GiB"),
246233
})
247234

248-
_, stderr, err := testutil.Exec(t, env, "cluster", "scale", "cluster-123", "--cpu", "2", "--multi-az")
235+
_, stderr, err := testutil.Exec(t, env, "cluster", "scale", "cluster-123", "--cpu", "2")
249236
require.NoError(t, err)
250237

251238
assert.Contains(t, stderr, "Disk: 50GiB")
252-
assert.Contains(t, stderr, "Multi AZ: no => yes")
253-
}
254-
255-
func TestScale_MultiAz(t *testing.T) {
256-
env := testutil.NewTestEnv(t)
257-
setupScale(env, scaleEnv{
258-
cluster: baseCluster(),
259-
currentPkg: newPkg(pkgID1, "1000m", "4GiB", "50GiB"),
260-
newPkg: newMultiAzPkg(pkgID2, "2000m", "4GiB", "50GiB"),
261-
})
262-
263-
_, _, err := testutil.Exec(t, env, "cluster", "scale", "cluster-123", "--cpu", "2", "--multi-az", "--force")
264-
require.NoError(t, err)
265-
266-
req, ok := env.Server.UpdateClusterCalls.Last()
267-
require.True(t, ok)
268-
assert.Equal(t, pkgID2, req.GetCluster().GetConfiguration().GetPackageId())
269-
assert.Equal(t, 1, env.BookingServer.ListPackagesCalls.Count())
270239
}
271240

272241
func TestScale_MissingClusterID(t *testing.T) {

0 commit comments

Comments
 (0)