Skip to content

Commit b8a2b13

Browse files
authored
Merge branch 'master' into master
2 parents 0d1a88b + 3a9a539 commit b8a2b13

104 files changed

Lines changed: 1847 additions & 920 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

bin/experiment/experiment.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ import (
5757
podNetworkLatency "github.com/litmuschaos/litmus-go/experiments/generic/pod-network-latency/experiment"
5858
podNetworkLoss "github.com/litmuschaos/litmus-go/experiments/generic/pod-network-loss/experiment"
5959
podNetworkPartition "github.com/litmuschaos/litmus-go/experiments/generic/pod-network-partition/experiment"
60+
podNetworkRateLimit "github.com/litmuschaos/litmus-go/experiments/generic/pod-network-rate-limit/experiment"
6061
kafkaBrokerPodFailure "github.com/litmuschaos/litmus-go/experiments/kafka/kafka-broker-pod-failure/experiment"
6162
ebsLossByID "github.com/litmuschaos/litmus-go/experiments/kube-aws/ebs-loss-by-id/experiment"
6263
ebsLossByTag "github.com/litmuschaos/litmus-go/experiments/kube-aws/ebs-loss-by-tag/experiment"
@@ -155,6 +156,8 @@ func main() {
155156
podNetworkLoss.PodNetworkLoss(ctx, clients)
156157
case "pod-network-partition":
157158
podNetworkPartition.PodNetworkPartition(ctx, clients)
159+
case "pod-network-rate-limit":
160+
podNetworkRateLimit.PodNetworkRateLimit(ctx, clients)
158161
case "pod-memory-hog":
159162
podMemoryHog.PodMemoryHog(ctx, clients)
160163
case "pod-cpu-hog":

chaoslib/litmus/container-kill/helper/container-kill.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,10 +208,11 @@ func stopDockerContainer(containerIDs []string, socketPath, signal, source strin
208208

209209
// getRestartCount return the restart count of target container
210210
func getRestartCount(target targetDetails, clients clients.ClientSets) (int, error) {
211-
pod, err := clients.KubeClient.CoreV1().Pods(target.Namespace).Get(context.Background(), target.Name, v1.GetOptions{})
211+
pod, err := clients.GetPod(target.Namespace, target.Name, 180, 2)
212212
if err != nil {
213213
return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: target.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", target.Name, target.Namespace), Reason: err.Error()}
214214
}
215+
215216
restartCount := 0
216217
for _, container := range pod.Status.ContainerStatuses {
217218
if container.Name == target.TargetContainer {

chaoslib/litmus/container-kill/lib/container-kill.go

Lines changed: 7 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ import (
1616
experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/types"
1717
"github.com/litmuschaos/litmus-go/pkg/log"
1818
"github.com/litmuschaos/litmus-go/pkg/probe"
19-
"github.com/litmuschaos/litmus-go/pkg/status"
2019
"github.com/litmuschaos/litmus-go/pkg/types"
2120
"github.com/litmuschaos/litmus-go/pkg/utils/common"
2221
"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
@@ -59,7 +58,7 @@ func PrepareContainerKill(ctx context.Context, experimentsDetails *experimentTyp
5958
if experimentsDetails.ChaosServiceAccount == "" {
6059
experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients)
6160
if err != nil {
62-
return stacktrace.Propagate(err, "could not experiment service account")
61+
return stacktrace.Propagate(err, "could not get experiment service account")
6362
}
6463
}
6564

@@ -118,26 +117,8 @@ func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experiment
118117

119118
appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
120119

121-
//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
122-
log.Info("[Status]: Checking the status of the helper pods")
123-
if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
124-
common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
125-
return stacktrace.Propagate(err, "could not check helper status")
126-
}
127-
128-
// Wait till the completion of the helper pod
129-
// set an upper limit for the waiting time
130-
log.Info("[Wait]: waiting till the completion of the helper pod")
131-
podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
132-
if err != nil || podStatus == "Failed" {
133-
common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
134-
return common.HelperFailedError(err, appLabel, experimentsDetails.ChaosNamespace, true)
135-
}
136-
137-
//Deleting all the helper pod for container-kill chaos
138-
log.Info("[Cleanup]: Deleting all the helper pods")
139-
if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
140-
return stacktrace.Propagate(err, "could not delete helper pod(s)")
120+
if err := common.ManagerHelperLifecycle(appLabel, chaosDetails, clients, true); err != nil {
121+
return err
141122
}
142123
}
143124
return nil
@@ -170,26 +151,8 @@ func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experime
170151

171152
appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
172153

173-
//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
174-
log.Info("[Status]: Checking the status of the helper pods")
175-
if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
176-
common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
177-
return stacktrace.Propagate(err, "could not check helper status")
178-
}
179-
180-
// Wait till the completion of the helper pod
181-
// set an upper limit for the waiting time
182-
log.Info("[Wait]: waiting till the completion of the helper pod")
183-
podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
184-
if err != nil || podStatus == "Failed" {
185-
common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
186-
return common.HelperFailedError(err, appLabel, experimentsDetails.ChaosNamespace, true)
187-
}
188-
189-
//Deleting all the helper pod for container-kill chaos
190-
log.Info("[Cleanup]: Deleting all the helper pods")
191-
if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
192-
return stacktrace.Propagate(err, "could not delete helper pod(s)")
154+
if err := common.ManagerHelperLifecycle(appLabel, chaosDetails, clients, true); err != nil {
155+
return err
193156
}
194157

195158
return nil
@@ -262,10 +225,10 @@ func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.Ex
262225
helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
263226
}
264227

265-
_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
266-
if err != nil {
228+
if err := clients.CreatePod(experimentsDetails.ChaosNamespace, helperPod); err != nil {
267229
return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
268230
}
231+
269232
return nil
270233
}
271234

chaoslib/litmus/disk-fill/helper/disk-fill.go

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,6 @@ package helper
33
import (
44
"context"
55
"fmt"
6-
"github.com/litmuschaos/litmus-go/pkg/cerrors"
7-
"github.com/litmuschaos/litmus-go/pkg/telemetry"
8-
"github.com/palantir/stacktrace"
9-
"go.opentelemetry.io/otel"
106
"os"
117
"os/exec"
128
"os/signal"
@@ -15,6 +11,11 @@ import (
1511
"syscall"
1612
"time"
1713

14+
"github.com/litmuschaos/litmus-go/pkg/cerrors"
15+
"github.com/litmuschaos/litmus-go/pkg/telemetry"
16+
"github.com/palantir/stacktrace"
17+
"go.opentelemetry.io/otel"
18+
1819
"github.com/litmuschaos/litmus-go/pkg/clients"
1920
"github.com/litmuschaos/litmus-go/pkg/events"
2021
experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/disk-fill/types"
@@ -197,7 +198,7 @@ func fillDisk(t targetDetails, bs int) error {
197198
// getEphemeralStorageAttributes derive the ephemeral storage attributes from the target pod
198199
func getEphemeralStorageAttributes(t targetDetails, clients clients.ClientSets) (int64, error) {
199200

200-
pod, err := clients.KubeClient.CoreV1().Pods(t.Namespace).Get(context.Background(), t.Name, v1.GetOptions{})
201+
pod, err := clients.GetPod(t.Namespace, t.Name, 180, 2)
201202
if err != nil {
202203
return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: err.Error()}
203204
}
@@ -251,7 +252,7 @@ func getSizeToBeFilled(experimentsDetails *experimentTypes.ExperimentDetails, us
251252
// revertDiskFill will delete the target pod if target pod is evicted
252253
// if target pod is still running then it will delete the files, which was created during chaos execution
253254
func revertDiskFill(t targetDetails, clients clients.ClientSets) error {
254-
pod, err := clients.KubeClient.CoreV1().Pods(t.Namespace).Get(context.Background(), t.Name, v1.GetOptions{})
255+
pod, err := clients.GetPod(t.Namespace, t.Name, 180, 2)
255256
if err != nil {
256257
return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s,namespace: %s}", t.Name, t.Namespace), Reason: err.Error()}
257258
}

chaoslib/litmus/disk-fill/lib/disk-fill.go

Lines changed: 8 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ import (
1616
experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/disk-fill/types"
1717
"github.com/litmuschaos/litmus-go/pkg/log"
1818
"github.com/litmuschaos/litmus-go/pkg/probe"
19-
"github.com/litmuschaos/litmus-go/pkg/status"
2019
"github.com/litmuschaos/litmus-go/pkg/types"
2120
"github.com/litmuschaos/litmus-go/pkg/utils/common"
2221
"github.com/litmuschaos/litmus-go/pkg/utils/exec"
@@ -64,7 +63,7 @@ func PrepareDiskFill(ctx context.Context, experimentsDetails *experimentTypes.Ex
6463
if experimentsDetails.ChaosServiceAccount == "" {
6564
experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients)
6665
if err != nil {
67-
return stacktrace.Propagate(err, "could not experiment service account")
66+
return stacktrace.Propagate(err, "could not get experiment service account")
6867
}
6968
}
7069

@@ -122,26 +121,8 @@ func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experiment
122121

123122
appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
124123

125-
//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
126-
log.Info("[Status]: Checking the status of the helper pods")
127-
if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
128-
common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
129-
return stacktrace.Propagate(err, "could not check helper status")
130-
}
131-
132-
// Wait till the completion of the helper pod
133-
// set an upper limit for the waiting time
134-
log.Info("[Wait]: waiting till the completion of the helper pod")
135-
podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
136-
if err != nil || podStatus == "Failed" {
137-
common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
138-
return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
139-
}
140-
141-
//Deleting all the helper pod for disk-fill chaos
142-
log.Info("[Cleanup]: Deleting the helper pod")
143-
if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
144-
return stacktrace.Propagate(err, "could not delete helper pod(s)")
124+
if err := common.ManagerHelperLifecycle(appLabel, chaosDetails, clients, true); err != nil {
125+
return err
145126
}
146127
}
147128

@@ -153,7 +134,7 @@ func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experiment
153134
func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, execCommandDetails exec.PodDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
154135
ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectDiskFillFaultInParallelMode")
155136
defer span.End()
156-
var err error
137+
157138
// run the probes during chaos
158139
if len(resultDetails.ProbeDetails) != 0 {
159140
if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
@@ -177,26 +158,8 @@ func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experime
177158

178159
appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
179160

180-
//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
181-
log.Info("[Status]: Checking the status of the helper pods")
182-
if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
183-
common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
184-
return stacktrace.Propagate(err, "could not check helper status")
185-
}
186-
187-
// Wait till the completion of the helper pod
188-
// set an upper limit for the waiting time
189-
log.Info("[Wait]: waiting till the completion of the helper pod")
190-
podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
191-
if err != nil || podStatus == "Failed" {
192-
common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
193-
return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
194-
}
195-
196-
//Deleting all the helper pod for disk-fill chaos
197-
log.Info("[Cleanup]: Deleting all the helper pod")
198-
if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
199-
return stacktrace.Propagate(err, "could not delete helper pod(s)")
161+
if err := common.ManagerHelperLifecycle(appLabel, chaosDetails, clients, true); err != nil {
162+
return err
200163
}
201164

202165
return nil
@@ -268,10 +231,10 @@ func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.Ex
268231
helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
269232
}
270233

271-
_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
272-
if err != nil {
234+
if err := clients.CreatePod(experimentsDetails.ChaosNamespace, helperPod); err != nil {
273235
return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
274236
}
237+
275238
return nil
276239
}
277240

chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go

Lines changed: 2 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,6 @@ import (
1414
"github.com/litmuschaos/litmus-go/pkg/events"
1515
experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/docker-service-kill/types"
1616
"github.com/litmuschaos/litmus-go/pkg/log"
17-
"github.com/litmuschaos/litmus-go/pkg/probe"
18-
"github.com/litmuschaos/litmus-go/pkg/status"
1917
"github.com/litmuschaos/litmus-go/pkg/types"
2018
"github.com/litmuschaos/litmus-go/pkg/utils/common"
2119
"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
@@ -69,40 +67,8 @@ func PrepareDockerServiceKill(ctx context.Context, experimentsDetails *experimen
6967

7068
appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID)
7169

72-
//Checking the status of helper pod
73-
log.Info("[Status]: Checking the status of the helper pod")
74-
if err = status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
75-
common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
76-
return stacktrace.Propagate(err, "could not check helper status")
77-
}
78-
79-
// run the probes during chaos
80-
if len(resultDetails.ProbeDetails) != 0 {
81-
if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
82-
common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
83-
return err
84-
}
85-
}
86-
87-
// Checking for the node to be in not-ready state
88-
log.Info("[Status]: Check for the node to be in NotReady state")
89-
if err = status.CheckNodeNotReadyState(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
90-
common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
91-
return stacktrace.Propagate(err, "could not check for NOT READY state")
92-
}
93-
94-
// Wait till the completion of helper pod
95-
log.Info("[Wait]: Waiting till the completion of the helper pod")
96-
podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
97-
if err != nil || podStatus == "Failed" {
98-
common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
99-
return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false)
100-
}
101-
102-
//Deleting the helper pod
103-
log.Info("[Cleanup]: Deleting the helper pod")
104-
if err = common.DeletePod(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
105-
return stacktrace.Propagate(err, "could not delete helper pod")
70+
if err := common.ManagerHelperLifecycle(appLabel, chaosDetails, clients, true); err != nil {
71+
return err
10672
}
10773

10874
//Waiting for the ramp time after chaos injection

0 commit comments

Comments
 (0)