Skip to content

Commit

Permalink
chore(ci): rework iscsi-tools extensions test
Browse files Browse the repository at this point in the history
Rework `iscsi-tools` extensions tested based on siderolabs/extensions#577

Signed-off-by: Noel Georgi <[email protected]>
  • Loading branch information
frezbo committed Jan 20, 2025
1 parent e1efbf6 commit bb4c11b
Show file tree
Hide file tree
Showing 9 changed files with 215 additions and 139 deletions.
5 changes: 3 additions & 2 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2025-01-16T14:10:07Z by kres 3b3f992.
# Generated on 2025-01-20T15:13:45Z by kres 3b3f992.

name: default
concurrency:
Expand Down Expand Up @@ -2848,7 +2848,7 @@ jobs:
runs-on:
- self-hosted
- talos
if: contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi') || contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi-longhorn')
if: contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi') || contains(fromJSON(needs.default.outputs.labels), 'integration/extensions') || contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi-longhorn')
needs:
- default
steps:
Expand Down Expand Up @@ -2946,6 +2946,7 @@ jobs:
EXTRA_TEST_ARGS: -talos.csi=longhorn
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_MEMORY_WORKERS: "3072"
QEMU_WORKERS: "3"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml'
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/integration-qemu-csi-longhorn-cron.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2025-01-16T11:00:37Z by kres 3b3f992.
# Generated on 2025-01-20T15:13:45Z by kres 3b3f992.

name: integration-qemu-csi-longhorn-cron
concurrency:
Expand Down Expand Up @@ -109,6 +109,7 @@ jobs:
EXTRA_TEST_ARGS: -talos.csi=longhorn
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_MEMORY_WORKERS: "3072"
QEMU_WORKERS: "3"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml'
Expand Down
2 changes: 2 additions & 0 deletions .kres.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1369,6 +1369,7 @@ spec:
- '30 3 * * *'
triggerLabels:
- integration/qemu-csi
- integration/extensions # since iscsi is tested with longhorn
- integration/qemu-csi-longhorn
steps:
- name: download-artifacts
Expand Down Expand Up @@ -1424,6 +1425,7 @@ spec:
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
SHORT_INTEGRATION_TEST: yes
QEMU_WORKERS: 3
QEMU_MEMORY_WORKERS: 3072
WITH_CONFIG_PATCH: "@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml"
EXTRA_TEST_ARGS: -talos.csi=longhorn
IMAGE_REGISTRY: registry.dev.siderolabs.io
Expand Down
134 changes: 0 additions & 134 deletions internal/integration/api/extensions_qemu.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,140 +133,6 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsExpectedModules() {
suite.AssertExpectedModules(suite.ctx, node, expectedModulesModDep)
}

// TestExtensionsISCSI verifies expected services are running.
func (suite *ExtensionsSuiteQEMU) TestExtensionsISCSI() {
expectedServices := map[string]string{
"ext-iscsid": "Running",
"ext-tgtd": "Running",
}

node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
suite.AssertServicesRunning(suite.ctx, node, expectedServices)

ctx := client.WithNode(suite.ctx, node)

iscsiCreatePodDef, err := suite.NewPrivilegedPod("iscsi-create")
suite.Require().NoError(err)

suite.Require().NoError(iscsiCreatePodDef.Create(suite.ctx, 5*time.Minute))

defer iscsiCreatePodDef.Delete(suite.ctx) //nolint:errcheck

reader, err := suite.Client.Read(ctx, "/system/iscsi/initiatorname.iscsi")
suite.Require().NoError(err)

defer reader.Close() //nolint:errcheck

body, err := io.ReadAll(reader)
suite.Require().NoError(err)

initiatorName := strings.TrimPrefix(strings.TrimSpace(string(body)), "InitiatorName=")

stdout, stderr, err := iscsiCreatePodDef.Exec(
suite.ctx,
fmt.Sprintf("nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op new --mode target --tid 1 -T %s", initiatorName),
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"dd if=/dev/zero of=/proc/$(pgrep tgtd)/root/var/run/tgtd/iscsi.disk bs=1M count=100",
)
suite.Require().NoError(err)

suite.Require().Contains(stderr, "100+0 records in\n100+0 records out\n")
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 -b /var/run/tgtd/iscsi.disk",
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL",
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
fmt.Sprintf("nsenter --mount=/proc/$(pgrep iscsid)/ns/mnt --net=/proc/$(pgrep iscsid)/ns/net -- iscsiadm --mode discovery --type sendtargets --portal %s:3260", node),
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal(fmt.Sprintf("%s:3260,1 %s\n", node, initiatorName), stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
fmt.Sprintf("nsenter --mount=/proc/$(pgrep iscsid)/ns/mnt --net=/proc/$(pgrep iscsid)/ns/net -- iscsiadm --mode node --targetname %s --portal %s:3260 --login", initiatorName, node),
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Contains(stdout, "successful.")

defer func() {
stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
fmt.Sprintf("nsenter --mount=/proc/$(pgrep iscsid)/ns/mnt --net=/proc/$(pgrep iscsid)/ns/net -- iscsiadm --mode node --targetname %s --portal %s:3260 --logout", initiatorName, node),
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op delete --mode logicalunit --tid 1 --lun 1",
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op delete --mode target --tid 1",
)

suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)
}()

suite.Eventually(func() bool {
return suite.iscsiTargetExists()
}, 5*time.Second, 1*time.Second, "expected iscsi target to exist")
}

func (suite *ExtensionsSuiteQEMU) iscsiTargetExists() bool {
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)

ctx := client.WithNode(suite.ctx, node)

disks, err := safe.ReaderListAll[*block.Disk](ctx, suite.Client.COSI)
suite.Require().NoError(err)

for disk := range disks.All() {
if disk.TypedSpec().Transport == "iscsi" {
return true
}
}

return false
}

// TestExtensionsNutClient verifies nut client is working.
func (suite *ExtensionsSuiteQEMU) TestExtensionsNutClient() {
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
Expand Down
22 changes: 21 additions & 1 deletion internal/integration/base/k8s.go
Original file line number Diff line number Diff line change
Expand Up @@ -636,6 +636,26 @@ func (k8sSuite *K8sSuite) WaitForResource(ctx context.Context, namespace, group,
return nil
}

// GetUnstructuredResource gets the unstructured resource with the given namespace, group, kind, version and name.
func (k8sSuite *K8sSuite) GetUnstructuredResource(ctx context.Context, namespace, group, kind, version, resourceName string) (*unstructured.Unstructured, error) {
mapping, err := k8sSuite.Mapper.RESTMapping(schema.GroupKind{
Group: group,
Kind: kind,
}, version)
if err != nil {
return nil, fmt.Errorf("error creating mapping for resource %s/%s/%s", group, kind, version)
}

dr := k8sSuite.DynamicClient.Resource(mapping.Resource).Namespace(namespace)

result, err := dr.Get(ctx, resourceName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error getting resource %s/%s/%s/%s: %v", group, version, kind, resourceName, err)
}

return result, nil
}

// RunFIOTest runs the FIO test with the given storage class and size using kubestr.
func (k8sSuite *K8sSuite) RunFIOTest(ctx context.Context, storageClasss, size string) error {
args := []string{
Expand Down Expand Up @@ -793,7 +813,7 @@ func (k8sSuite *K8sSuite) DeleteManifests(ctx context.Context, manifests []unstr
return event.Type == watch.Deleted, nil
})

k8sSuite.Require().NoError(err, "error waiting for the object to be deleted %s", obj.GetName())
k8sSuite.Require().NoError(err, "error waiting for the object to be deleted %s/%s/%s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName())

k8sSuite.T().Logf("deleted object %s/%s/%s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName())
}
Expand Down
133 changes: 132 additions & 1 deletion internal/integration/k8s/longhorn.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,27 @@
package k8s

import (
"bytes"
"context"
_ "embed"
"strings"
"testing"
"text/template"
"time"

"github.com/siderolabs/talos/internal/integration/base"
"github.com/siderolabs/talos/pkg/machinery/config/machine"
)

var (
//go:embed testdata/longhorn-iscsi-volume.yaml
longHornISCSIVolumeManifest []byte

//go:embed testdata/longhorn-volumeattachment.yaml
longHornISCSIVolumeAttachmentManifestTemplate []byte

//go:embed testdata/pod-iscsi-volume.yaml
podWithISCSIVolumeTemplate []byte
)

// LongHornSuite tests deploying Longhorn.
Expand All @@ -24,7 +41,11 @@ func (suite *LongHornSuite) SuiteName() string {
}

// TestDeploy tests deploying Longhorn and running a simple test.
//
//nolint:gocyclo
func (suite *LongHornSuite) TestDeploy() {
suite.T().Parallel()

if suite.Cluster == nil {
suite.T().Skip("without full cluster state reaching out to the node IP is not reliable")
}
Expand Down Expand Up @@ -53,7 +74,117 @@ func (suite *LongHornSuite) TestDeploy() {
suite.T().Fatalf("failed to install Longhorn chart: %v", err)
}

suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn", "10G"))
suite.T().Run("fio", func(t *testing.T) {
t.Parallel()

suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn", "10G"))
})

suite.T().Run("iscsi", func(t *testing.T) {
t.Parallel()

longHornISCSIVolumeManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeManifest)

defer func() {
cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cleanupCancel()

suite.DeleteManifests(cleanUpCtx, longHornISCSIVolumeManifestUnstructured)
}()

suite.ApplyManifests(ctx, longHornISCSIVolumeManifestUnstructured)

tmpl, err := template.New("longhorn-iscsi-volumeattachment").Parse(string(longHornISCSIVolumeAttachmentManifestTemplate))
suite.Require().NoError(err)

var longHornISCSIVolumeAttachmentManifest bytes.Buffer

node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)

nodeInfo, err := suite.GetK8sNodeByInternalIP(ctx, node)
if err != nil {
suite.T().Fatalf("failed to get K8s node by internal IP: %v", err)
}

if err := tmpl.Execute(&longHornISCSIVolumeAttachmentManifest, struct {
NodeID string
}{
NodeID: nodeInfo.Name,
}); err != nil {
suite.T().Fatalf("failed to render Longhorn ISCSI volume manifest: %v", err)
}

longHornISCSIVolumeAttachmentManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeAttachmentManifest.Bytes())

suite.ApplyManifests(ctx, longHornISCSIVolumeAttachmentManifestUnstructured)

if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.robustness}", "healthy"); err != nil {
suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
}

if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.state}", "attached"); err != nil {
suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
}

if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0", "{.status.currentState}", "running"); err != nil {
suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
}

unstructured, err := suite.GetUnstructuredResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0")
if err != nil {
suite.T().Fatalf("failed to get LongHorn Engine resource: %v", err)
}

var endpointData string

if status, ok := unstructured.Object["status"].(map[string]interface{}); ok {
endpointData, ok = status["endpoint"].(string)
if !ok {
suite.T().Fatalf("failed to get LongHorn Engine endpoint")
}
}

tmpl, err = template.New("pod-iscsi-volume").Parse(string(podWithISCSIVolumeTemplate))
suite.Require().NoError(err)

// endpoint is of the form `iscsi://10.244.0.5:3260/iqn.2019-10.io.longhorn:iscsi/1`
// trim the iscsi:// prefix
endpointData = strings.TrimPrefix(endpointData, "iscsi://")
// trim the /1 suffix
endpointData = strings.TrimSuffix(endpointData, "/1")

targetPortal, IQN, ok := strings.Cut(endpointData, "/")
if !ok {
suite.T().Fatalf("failed to parse endpoint data from %s", endpointData)
}

var podWithISCSIVolume bytes.Buffer

if err := tmpl.Execute(&podWithISCSIVolume, struct {
NodeName string
TargetPortal string
IQN string
}{
NodeName: nodeInfo.Name,
TargetPortal: targetPortal,
IQN: IQN,
}); err != nil {
suite.T().Fatalf("failed to render pod with ISCSI volume manifest: %v", err)
}

podWithISCSIVolumeUnstructured := suite.ParseManifests(podWithISCSIVolume.Bytes())

defer func() {
cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute)
defer cleanupCancel()

suite.DeleteManifests(cleanUpCtx, podWithISCSIVolumeUnstructured)
}()

suite.ApplyManifests(ctx, podWithISCSIVolumeUnstructured)

suite.Require().NoError(suite.WaitForPodToBeRunning(ctx, 3*time.Minute, "default", "iscsipd"))
})
}

func init() {
Expand Down
Loading

0 comments on commit bb4c11b

Please sign in to comment.