From d81a66398d0b89c32fffbf6ee37f5658edb6e7d5 Mon Sep 17 00:00:00 2001 From: Wenqi Qiu Date: Fri, 30 Sep 2022 09:02:48 +0800 Subject: [PATCH 1/4] unit test coverage Signed-off-by: Wenqi Qiu codecov comment after 2 build Signed-off-by: Wenqi Qiu --- .github/workflows/go.yml | 14 ++-- .github/workflows/kind.yml | 168 ++++++++++++++++++------------------- ci/jenkins/test-mc.sh | 2 +- ci/jenkins/test-vmc.sh | 6 -- codecov.yaml | 14 +--- 5 files changed, 93 insertions(+), 111 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 7cdf3d4621c..6d2937df64b 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -138,13 +138,13 @@ jobs: run: | cd multicluster make test-integration - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: .coverage/coverage-integration.txt,multicluster/.coverage/coverage-integration.txt - flags: integration-tests - name: codecov-integration-test +# - name: Codecov +# uses: codecov/codecov-action@v3 +# with: +# token: ${{ secrets.CODECOV_TOKEN }} +# files: .coverage/coverage-integration.txt,multicluster/.coverage/coverage-integration.txt +# flags: integration-tests +# name: codecov-integration-test # golangci-lint-ubuntu and golangci-lint-macos are intentionally not merged into one job with os matrix, otherwise the # job wouldn't be expanded if it's skipped and the report of the required check would be missing. diff --git a/.github/workflows/kind.yml b/.github/workflows/kind.yml index 3e51e29d4a5..2ff571f8df0 100644 --- a/.github/workflows/kind.yml +++ b/.github/workflows/kind.yml @@ -98,20 +98,20 @@ jobs: ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-encap-coverage ./ci/kind/test-e2e-kind.sh --encap-mode encap --coverage - name: Tar coverage files run: tar -czf test-e2e-encap-coverage.tar.gz test-e2e-encap-coverage - - name: Upload coverage for test-e2e-encap-coverage - uses: actions/upload-artifact@v3 - with: - name: test-e2e-encap-coverage - path: test-e2e-encap-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-encap - directory: test-e2e-encap-coverage +# - name: Upload coverage for test-e2e-encap-coverage +# uses: actions/upload-artifact@v3 +# with: +# name: test-e2e-encap-coverage +# path: test-e2e-encap-coverage.tar.gz +# retention-days: 30 +# - name: Codecov +# uses: codecov/codecov-action@v3 +# with: +# token: ${{ secrets.CODECOV_TOKEN }} +# file: '*.cov.out*' +# flags: kind-e2e-tests +# name: codecov-test-e2e-encap +# directory: test-e2e-encap-coverage - name: Tar log files if: ${{ failure() }} run: tar -czf log.tar.gz log @@ -156,20 +156,20 @@ jobs: ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-encap-no-proxy-coverage ./ci/kind/test-e2e-kind.sh --encap-mode encap --feature-gates AntreaProxy=false --coverage --skip mode-irrelevant - name: Tar coverage files run: tar -czf test-e2e-encap-no-proxy-coverage.tar.gz test-e2e-encap-no-proxy-coverage - - name: Upload coverage for test-e2e-encap-no-proxy-coverage - uses: actions/upload-artifact@v3 - with: - name: test-e2e-encap-no-proxy-coverage - path: test-e2e-encap-no-proxy-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-encap-no-proxy - directory: test-e2e-encap-no-proxy-coverage +# - name: Upload coverage for test-e2e-encap-no-proxy-coverage +# uses: actions/upload-artifact@v3 +# with: +# name: test-e2e-encap-no-proxy-coverage +# path: test-e2e-encap-no-proxy-coverage.tar.gz +# retention-days: 30 +# - name: Codecov +# uses: codecov/codecov-action@v3 +# with: +# token: ${{ secrets.CODECOV_TOKEN }} +# file: '*.cov.out*' +# flags: kind-e2e-tests +# name: codecov-test-e2e-encap-no-proxy +# directory: test-e2e-encap-no-proxy-coverage - name: Tar log files if: ${{ failure() }} run: tar -czf log.tar.gz log @@ -215,20 +215,20 @@ jobs: ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-encap-all-features-enabled-coverage ./ci/kind/test-e2e-kind.sh --encap-mode encap --coverage --feature-gates AllAlpha=true,AllBeta=true,Multicast=false --proxy-all - name: Tar coverage files run: tar -czf test-e2e-encap-all-features-enabled-coverage.tar.gz test-e2e-encap-all-features-enabled-coverage - - name: Upload coverage for test-e2e-encap-all-features-enabled-coverage - uses: actions/upload-artifact@v3 - with: - name: test-e2e-encap-all-features-enabled-coverage - path: test-e2e-encap-all-features-enabled-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-encap-all-features-enabled - directory: test-e2e-encap-all-features-enabled-coverage +# - name: Upload coverage for test-e2e-encap-all-features-enabled-coverage +# uses: actions/upload-artifact@v3 +# with: +# name: test-e2e-encap-all-features-enabled-coverage +# path: test-e2e-encap-all-features-enabled-coverage.tar.gz +# retention-days: 30 +# - name: Codecov +# uses: codecov/codecov-action@v3 +# with: +# token: ${{ secrets.CODECOV_TOKEN }} +# file: '*.cov.out*' +# flags: kind-e2e-tests +# name: codecov-test-e2e-encap-all-features-enabled +# directory: test-e2e-encap-all-features-enabled-coverage - name: Tar log files if: ${{ failure() }} run: tar -czf log.tar.gz log @@ -273,20 +273,20 @@ jobs: ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-noencap-coverage ./ci/kind/test-e2e-kind.sh --encap-mode noEncap --coverage --skip mode-irrelevant - name: Tar coverage files run: tar -czf test-e2e-noencap-coverage.tar.gz test-e2e-noencap-coverage - - name: Upload coverage for test-e2e-noencap-coverage - uses: actions/upload-artifact@v3 - with: - name: test-e2e-noencap-coverage - path: test-e2e-noencap-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-noencap - directory: test-e2e-noencap-coverage +# - name: Upload coverage for test-e2e-noencap-coverage +# uses: actions/upload-artifact@v3 +# with: +# name: test-e2e-noencap-coverage +# path: test-e2e-noencap-coverage.tar.gz +# retention-days: 30 +# - name: Codecov +# uses: codecov/codecov-action@v3 +# with: +# token: ${{ secrets.CODECOV_TOKEN }} +# file: '*.cov.out*' +# flags: kind-e2e-tests +# name: codecov-test-e2e-noencap +# directory: test-e2e-noencap-coverage - name: Tar log files if: ${{ failure() }} run: tar -czf log.tar.gz log @@ -331,20 +331,20 @@ jobs: ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-hybrid-coverage ./ci/kind/test-e2e-kind.sh --encap-mode hybrid --coverage --skip mode-irrelevant - name: Tar coverage files run: tar -czf test-e2e-hybrid-coverage.tar.gz test-e2e-hybrid-coverage - - name: Upload coverage for test-e2e-hybrid-coverage - uses: actions/upload-artifact@v3 - with: - name: test-e2e-hybrid-coverage - path: test-e2e-hybrid-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-hybrid - directory: test-e2e-hybrid-coverage +# - name: Upload coverage for test-e2e-hybrid-coverage +# uses: actions/upload-artifact@v3 +# with: +# name: test-e2e-hybrid-coverage +# path: test-e2e-hybrid-coverage.tar.gz +# retention-days: 30 +# - name: Codecov +# uses: codecov/codecov-action@v3 +# with: +# token: ${{ secrets.CODECOV_TOKEN }} +# file: '*.cov.out*' +# flags: kind-e2e-tests +# name: codecov-test-e2e-hybrid +# directory: test-e2e-hybrid-coverage - name: Tar log files if: ${{ failure() }} run: tar -czf log.tar.gz log @@ -396,20 +396,20 @@ jobs: ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-fa-coverage ./ci/kind/test-e2e-kind.sh --encap-mode encap --coverage --flow-visibility - name: Tar coverage files run: tar -czf test-e2e-fa-coverage.tar.gz test-e2e-fa-coverage - - name: Upload coverage for test-e2e-fa-coverage - uses: actions/upload-artifact@v3 - with: - name: test-e2e-fa-coverage - path: test-e2e-fa-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-fa - directory: test-e2e-fa-coverage +# - name: Upload coverage for test-e2e-fa-coverage +# uses: actions/upload-artifact@v3 +# with: +# name: test-e2e-fa-coverage +# path: test-e2e-fa-coverage.tar.gz +# retention-days: 30 +# - name: Codecov +# uses: codecov/codecov-action@v3 +# with: +# token: ${{ secrets.CODECOV_TOKEN }} +# file: '*.cov.out*' +# flags: kind-e2e-tests +# name: codecov-test-e2e-fa +# directory: test-e2e-fa-coverage - name: Tar log files if: ${{ failure() }} run: tar -czf log.tar.gz log diff --git a/ci/jenkins/test-mc.sh b/ci/jenkins/test-mc.sh index 99d1c3ea5cc..7cd64070427 100755 --- a/ci/jenkins/test-mc.sh +++ b/ci/jenkins/test-mc.sh @@ -243,7 +243,7 @@ function run_codecov { (set -e shasum -a 256 -c codecov.SHA256SUM chmod +x codecov - ./codecov -c -t ${CODECOV_TOKEN} -F ${flag} -f ${file} -s ${dir} -C ${GIT_COMMIT} -r antrea-io/antrea + #./codecov -c -t ${CODECOV_TOKEN} -F ${flag} -f ${file} -s ${dir} -C ${GIT_COMMIT} -r antrea-io/antrea rm -f trustedkeys.gpg codecov )} diff --git a/ci/jenkins/test-vmc.sh b/ci/jenkins/test-vmc.sh index 6837548b354..ab641801077 100755 --- a/ci/jenkins/test-vmc.sh +++ b/ci/jenkins/test-vmc.sh @@ -326,12 +326,6 @@ function run_codecov { (set -e chmod +x codecov - if [[ $remote == true ]]; then - ${SCP_WITH_UTILS_KEY} codecov jenkins@${ip}:~ - ${SSH_WITH_UTILS_KEY} -n jenkins@${ip} "cd antrea; ~/codecov -c -t ${CODECOV_TOKEN} -F ${flag} -f ${file} -C ${GIT_COMMIT} -r antrea-io/antrea" - else - ./codecov -c -t ${CODECOV_TOKEN} -F ${flag} -f ${file} -s ${dir} -C ${GIT_COMMIT} -r antrea-io/antrea - fi rm -f trustedkeys.gpg codecov )} diff --git a/codecov.yaml b/codecov.yaml index e946dd107ae..0a250b66223 100644 --- a/codecov.yaml +++ b/codecov.yaml @@ -8,7 +8,7 @@ comment: require_changes: no require_base: no require_head: no - after_n_builds: 1 + after_n_builds: 2 show_carryforward_flags: true github_checks: @@ -31,18 +31,6 @@ coverage: target: auto flags: - unit-tests - antrea-integration-tests: - target: auto - flags: - - integration-tests - antrea-e2e-tests: - target: auto - flags: - - e2e-tests - antrea-kind-e2e-tests: - target: auto - flags: - - kind-e2e-tests flag_management: default_rules: From f3d618183c0377cf3e36e542e8a069fd5a1ff0b5 Mon Sep 17 00:00:00 2001 From: Wenqi Qiu Date: Wed, 19 Oct 2022 08:58:05 +0800 Subject: [PATCH 2/4] diable carryforward Signed-off-by: Wenqi Qiu --- codecov.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/codecov.yaml b/codecov.yaml index 0a250b66223..a1fc7a490e4 100644 --- a/codecov.yaml +++ b/codecov.yaml @@ -9,7 +9,7 @@ comment: require_base: no require_head: no after_n_builds: 2 - show_carryforward_flags: true + show_carryforward_flags: false github_checks: annotations: true @@ -34,7 +34,7 @@ coverage: flag_management: default_rules: - carryforward: true + carryforward: false ignore: - "**/testing/*.go" From 06a3fa9f423f4da4a6680eaa8089f461782541ea Mon Sep 17 00:00:00 2001 From: Wenqi Qiu Date: Mon, 24 Oct 2022 10:22:56 +0800 Subject: [PATCH 3/4] comments set false Signed-off-by: Wenqi Qiu --- codecov.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/codecov.yaml b/codecov.yaml index a1fc7a490e4..ec549fe36df 100644 --- a/codecov.yaml +++ b/codecov.yaml @@ -2,14 +2,14 @@ codecov: branch: main require_ci_to_pass: no -comment: - layout: "reach,diff,flags,tree" - behavior: default - require_changes: no - require_base: no - require_head: no - after_n_builds: 2 - show_carryforward_flags: false +comment: false +# layout: "reach,diff,flags,tree" +# behavior: default +# require_changes: no +# require_base: no +# require_head: no +# after_n_builds: 2 +# show_carryforward_flags: false github_checks: annotations: true From 391c885f43ef825e600f7216e0bbf8e9e7380215 Mon Sep 17 00:00:00 2001 From: wenyingd Date: Sun, 6 Nov 2022 23:19:20 +0800 Subject: [PATCH 4/4] Add unit test in path /pkg/agent/cniserver 1. Add unit test 2. Remove unused code 3. Fix issues hit by unit test Signed-off-by: wenyingd --- hack/update-codegen-dockerized.sh | 1 + pkg/agent/agent.go | 4 +- .../interface_configuration_linux.go | 175 +-- .../interface_configuration_linux_test.go | 852 +++++++++++++ .../interface_configuration_windows.go | 74 +- pkg/agent/cniserver/interfaces.go | 44 + pkg/agent/cniserver/ipam/ipam_service.go | 9 + pkg/agent/cniserver/pod_configuration.go | 22 +- .../cniserver/pod_configuration_linux_test.go | 514 ++++++++ .../cniserver/pod_configuration_windows.go | 2 +- pkg/agent/cniserver/server.go | 15 +- pkg/agent/cniserver/server_linux_test.go | 743 ++++++++++++ pkg/agent/cniserver/server_test.go | 173 +-- pkg/agent/cniserver/server_windows_test.go | 1079 +++++++++++++++++ pkg/agent/cniserver/sriov_linux.go | 45 +- pkg/agent/cniserver/testing/mock_cniserver.go | 153 +++ pkg/agent/util/netlink/netlink_linux.go | 6 + .../netlink/testing/mock_netlink_linux.go | 42 + 18 files changed, 3666 insertions(+), 287 deletions(-) create mode 100644 pkg/agent/cniserver/interface_configuration_linux_test.go create mode 100644 pkg/agent/cniserver/interfaces.go create mode 100644 pkg/agent/cniserver/pod_configuration_linux_test.go create mode 100644 pkg/agent/cniserver/server_linux_test.go create mode 100644 pkg/agent/cniserver/server_windows_test.go create mode 100644 pkg/agent/cniserver/testing/mock_cniserver.go diff --git a/hack/update-codegen-dockerized.sh b/hack/update-codegen-dockerized.sh index 38b376c8719..2e3789bc664 100755 --- a/hack/update-codegen-dockerized.sh +++ b/hack/update-codegen-dockerized.sh @@ -39,6 +39,7 @@ ANTREA_PROTO_PKG="antrea_io.antrea" function generate_mocks { # Generate mocks for testing with mockgen. MOCKGEN_TARGETS=( + "pkg/agent/cniserver SriovNet testing" "pkg/agent/cniserver/ipam IPAMDriver testing" "pkg/agent/flowexporter/connections ConnTrackDumper,NetFilterConnTrack testing" "pkg/agent/interfacestore InterfaceStore testing" diff --git a/pkg/agent/agent.go b/pkg/agent/agent.go index c61ec531018..cdcf78b88af 100644 --- a/pkg/agent/agent.go +++ b/pkg/agent/agent.go @@ -319,7 +319,7 @@ func (i *Initializer) initInterfaceStore() error { } case interfacestore.AntreaContainer: // The port should be for a container interface. - intf = cniserver.ParseOVSPortInterfaceConfig(port, ovsPort, true) + intf = cniserver.ParseOVSPortInterfaceConfig(port, ovsPort) case interfacestore.AntreaTrafficControl: intf = trafficcontrol.ParseTrafficControlInterfaceConfig(port, ovsPort) if err := ovsCtlClient.SetPortNoFlood(int(ovsPort.OFPort)); err != nil { @@ -354,7 +354,7 @@ func (i *Initializer) initInterfaceStore() error { antreaIFType = interfacestore.AntreaHost default: // The port should be for a container interface. - intf = cniserver.ParseOVSPortInterfaceConfig(port, ovsPort, true) + intf = cniserver.ParseOVSPortInterfaceConfig(port, ovsPort) antreaIFType = interfacestore.AntreaContainer } updatedExtIDs := make(map[string]interface{}) diff --git a/pkg/agent/cniserver/interface_configuration_linux.go b/pkg/agent/cniserver/interface_configuration_linux.go index d871efdf745..a09b80e08d3 100644 --- a/pkg/agent/cniserver/interface_configuration_linux.go +++ b/pkg/agent/cniserver/interface_configuration_linux.go @@ -22,7 +22,6 @@ import ( "net" "time" - "github.com/Mellanox/sriovnet" cnitypes "github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/cni/pkg/types/current" "github.com/containernetworking/plugins/pkg/ip" @@ -35,6 +34,7 @@ import ( "antrea.io/antrea/pkg/agent/util/arping" "antrea.io/antrea/pkg/agent/util/ethtool" "antrea.io/antrea/pkg/agent/util/ndp" + netlinkutil "antrea.io/antrea/pkg/agent/util/netlink" cnipb "antrea.io/antrea/pkg/apis/cni/v1beta1" "antrea.io/antrea/pkg/ovs/ovsconfig" cniip "antrea.io/antrea/third_party/containernetworking/ip" @@ -46,76 +46,80 @@ const ( netDeviceTypeVF = "vf" ) +// Declared variables for test +var ( + ipSetupVethWithName = cniip.SetupVethWithName + ipamConfigureIface = ipam.ConfigureIface + ethtoolTXHWCsumOff = ethtool.EthtoolTXHWCsumOff + renameInterface = util.RenameInterface + netInterfaceByName = net.InterfaceByName + arpingGratuitousARPOverIface = arping.GratuitousARPOverIface + ndpGratuitousNDPOverIface = ndp.GratuitousNDPOverIface + ipValidateExpectedInterfaceIPs = ip.ValidateExpectedInterfaceIPs + ipValidateExpectedRoute = ip.ValidateExpectedRoute + ipGetVethPeerIfindex = ip.GetVethPeerIfindex + getNSDevInterface = util.GetNSDevInterface + getNSPeerDevBridge = util.GetNSPeerDevBridge + nsGetNS = ns.GetNS + nsWithNetNSPath = ns.WithNetNSPath + nsIsNSorErr = ns.IsNSorErr +) + type ifConfigurator struct { ovsDatapathType ovsconfig.OVSDatapathType isOvsHardwareOffloadEnabled bool disableTXChecksumOffload bool + netlink netlinkutil.Interface + sriovnet SriovNet } func newInterfaceConfigurator(ovsDatapathType ovsconfig.OVSDatapathType, isOvsHardwareOffloadEnabled bool, disableTXChecksumOffload bool) (*ifConfigurator, error) { - return &ifConfigurator{ovsDatapathType: ovsDatapathType, isOvsHardwareOffloadEnabled: isOvsHardwareOffloadEnabled, disableTXChecksumOffload: disableTXChecksumOffload}, nil + configurator := &ifConfigurator{ovsDatapathType: ovsDatapathType, isOvsHardwareOffloadEnabled: isOvsHardwareOffloadEnabled, disableTXChecksumOffload: disableTXChecksumOffload, netlink: &netlink.Handle{}, sriovnet: newSriovNet()} + return configurator, nil } -func renameLink(curName, newName string) error { - link, err := netlink.LinkByName(curName) - if err != nil { - return err - } - if err := netlink.LinkSetDown(link); err != nil { - return err - } - if err := netlink.LinkSetName(link, newName); err != nil { - return err - } - if err := netlink.LinkSetUp(link); err != nil { - return err - } - - return nil -} - -func moveIfToNetns(ifname string, netns ns.NetNS) error { - vfDev, err := netlink.LinkByName(ifname) +func (ic *ifConfigurator) moveIfToNetns(ifname string, netns ns.NetNS) error { + vfDev, err := ic.netlink.LinkByName(ifname) if err != nil { return fmt.Errorf("failed to lookup VF device %v: %q", ifname, err) } - // move VF device to ns - if err = netlink.LinkSetNsFd(vfDev, int(netns.Fd())); err != nil { + // Move VF device to ns + if err = ic.netlink.LinkSetNsFd(vfDev, int(netns.Fd())); err != nil { return fmt.Errorf("failed to move VF device %+v to netns: %q", ifname, err) } return nil } -func moveVFtoContainerNS(vfNetDevice string, containerID string, containerNetNS string, containerIfaceName string, mtu int, result *current.Result) error { +func (ic *ifConfigurator) moveVFtoContainerNS(vfNetDevice string, containerID string, containerNetNS string, containerIfaceName string, mtu int, result *current.Result) error { hostIface := result.Interfaces[0] containerIface := result.Interfaces[1] // Move VF to Container namespace - netns, err := ns.GetNS(containerNetNS) + netns, err := nsGetNS(containerNetNS) if err != nil { return fmt.Errorf("failed to open container netns %s: %v", containerNetNS, err) } - err = moveIfToNetns(vfNetDevice, netns) + err = ic.moveIfToNetns(vfNetDevice, netns) if err != nil { return fmt.Errorf("failed to move VF %s to container netns %s: %v", vfNetDevice, containerNetNS, err) } netns.Close() - if err := ns.WithNetNSPath(containerNetNS, func(hostNS ns.NetNS) error { - err = renameLink(vfNetDevice, containerIfaceName) + if err := nsWithNetNSPath(containerNetNS, func(hostNS ns.NetNS) error { + err = renameInterface(vfNetDevice, containerIfaceName) if err != nil { return fmt.Errorf("failed to rename VF netdevice as containerIfaceName %s: %v", containerIfaceName, err) } - link, err := netlink.LinkByName(containerIfaceName) + link, err := ic.netlink.LinkByName(containerIfaceName) if err != nil { return fmt.Errorf("failed to find VF netdevice %s: %v", containerIfaceName, err) } - err = netlink.LinkSetMTU(link, mtu) + err = ic.netlink.LinkSetMTU(link, mtu) if err != nil { return fmt.Errorf("failed to set MTU for VF netdevice %s: %v", containerIfaceName, err) } - err = netlink.LinkSetUp(link) + err = ic.netlink.LinkSetUp(link) if err != nil { return fmt.Errorf("failed to set link up to VF netdevice %s: %v", containerIfaceName, err) } @@ -125,7 +129,7 @@ func moveVFtoContainerNS(vfNetDevice string, containerID string, containerNetNS klog.V(2).Infof("hostIface: %+v, containerIface: %+v", hostIface, containerIface) klog.V(2).Infof("Configuring IP address for container %s", containerID) // result.Interfaces must be set before this. - if err := ipam.ConfigureIface(containerIface.Name, result); err != nil { + if err := ipamConfigureIface(containerIface.Name, result); err != nil { return fmt.Errorf("failed to configure IP address for container %s: %v", containerID, err) } klog.V(2).Infof("ipam.ConfigureIface result: %+v, err: %v", result, err) @@ -155,7 +159,7 @@ func (ic *ifConfigurator) configureContainerSriovLinkOnBridge( result.Interfaces = []*current.Interface{hostIface, containerIface} // 1. get VF netdevice from PCI - vfNetdevices, err := sriovnet.GetNetDevicesFromPci(pciAddress) + vfNetdevices, err := ic.sriovnet.GetNetDevicesFromPci(pciAddress) if err != nil { return err } @@ -165,32 +169,32 @@ func (ic *ifConfigurator) configureContainerSriovLinkOnBridge( } vfNetdevice := vfNetdevices[0] // 2. get Uplink netdevice - uplink, err := sriovnet.GetUplinkRepresentor(pciAddress) + uplink, err := ic.sriovnet.GetUplinkRepresentor(pciAddress) if err != nil { return fmt.Errorf("failed to get uplink representor error: %s", err) } // 3. get VF index from PCI - vfIndex, err := sriovnet.GetVfIndexByPciAddress(pciAddress) + vfIndex, err := ic.sriovnet.GetVfIndexByPciAddress(pciAddress) if err != nil { return fmt.Errorf("failed to get VF index error: %s", err) } // 4. lookup representor - repPortName, err := sriovnet.GetVfRepresentor(uplink, vfIndex) + repPortName, err := ic.sriovnet.GetVfRepresentor(uplink, vfIndex) if err != nil { return fmt.Errorf("failed to get VF representor error: %s", err) } // 5. rename VF representor to hostIfaceName - if err = renameLink(repPortName, hostIfaceName); err != nil { + if err = renameInterface(repPortName, hostIfaceName); err != nil { return fmt.Errorf("failed to rename %s to %s: %v", repPortName, hostIfaceName, err) } hostIface.Name = hostIfaceName - link, err := netlink.LinkByName(hostIface.Name) + link, err := ic.netlink.LinkByName(hostIface.Name) if err != nil { return err } hostIface.Mac = link.Attrs().HardwareAddr.String() - return moveVFtoContainerNS(vfNetdevice, containerID, containerNetNS, containerIfaceName, mtu, result) + return ic.moveVFtoContainerNS(vfNetdevice, containerID, containerNetNS, containerIfaceName, mtu, result) } // configureContainerSriovLink moves the VF to the container namespace for Pod link SR-IOV interface; @@ -209,23 +213,19 @@ func (ic *ifConfigurator) configureContainerSriovLink( containerIface := ¤t.Interface{Name: containerIfaceName, Sandbox: containerNetNS} result.Interfaces = []*current.Interface{hostIface, containerIface} - if pciAddress != "" { - // Get rest of the VF information - pfName, vfID, err := getVFInfo(pciAddress) - klog.V(2).Infof("pfName and vfID of pciAddress: %v, %v, %s", pfName, vfID, pciAddress) - if err != nil { - return fmt.Errorf("failed to get VF information: %v", err) - } - } else { - return fmt.Errorf("VF PCI address is required") + // Get rest of the VF information + pfName, vfID, err := ic.getVFInfo(pciAddress) + klog.V(2).InfoS("Get pfName and vfID of pciAddress", "pfName", pfName, "vfID", vfID, "pciAddress", pciAddress) + if err != nil { + return fmt.Errorf("failed to get VF information: %v", err) } - vfIFName, err := getVFLinkName(pciAddress) + vfIFName, err := ic.getVFLinkName(pciAddress) if err != nil || vfIFName == "" { return fmt.Errorf("VF interface not found for pciAddress %s: %v", pciAddress, err) } - link, err := netlink.LinkByName(vfIFName) + link, err := ic.netlink.LinkByName(vfIFName) klog.V(2).Infof("Get link of vfIFName: %s, link: %+v", vfIFName, link) if err != nil { return fmt.Errorf("error getting VF link: %v", err) @@ -235,7 +235,7 @@ func (ic *ifConfigurator) configureContainerSriovLink( hostIface.Name = vfIFName klog.V(2).Infof("hostIface.Name: %s, hostIface.Mac: %s, vfIFName: %s", hostIface.Name, hostIface.Mac, vfIFName) - return moveVFtoContainerNS(vfIFName, containerID, containerNetNS, containerIfaceName, mtu, result) + return ic.moveVFtoContainerNS(vfIFName, containerID, containerNetNS, containerIfaceName, mtu, result) } // configureContainerLinkVeth creates a veth pair: one in the container netns and one in the host netns, and configures IP @@ -256,9 +256,9 @@ func (ic *ifConfigurator) configureContainerLinkVeth( result.Interfaces = []*current.Interface{hostIface, containerIface} podMAC := util.GenerateRandomMAC() - if err := ns.WithNetNSPath(containerNetNS, func(hostNS ns.NetNS) error { + if err := nsWithNetNSPath(containerNetNS, func(hostNS ns.NetNS) error { klog.V(2).Infof("Creating veth devices (%s, %s) for container %s", containerIfaceName, hostIfaceName, containerID) - hostVeth, containerVeth, err := cniip.SetupVethWithName(containerIfaceName, hostIfaceName, mtu, podMAC.String(), hostNS) + hostVeth, containerVeth, err := ipSetupVethWithName(containerIfaceName, hostIfaceName, mtu, podMAC.String(), hostNS) if err != nil { return fmt.Errorf("failed to create veth devices for container %s: %v", containerID, err) } @@ -266,14 +266,14 @@ func (ic *ifConfigurator) configureContainerLinkVeth( hostIface.Mac = hostVeth.HardwareAddr.String() // Disable TX checksum offloading when it's configured explicitly. if ic.disableTXChecksumOffload { - if err := ethtool.EthtoolTXHWCsumOff(containerVeth.Name); err != nil { + if err := ethtoolTXHWCsumOff(containerVeth.Name); err != nil { return fmt.Errorf("error when disabling TX checksum offload on container veth: %v", err) } } klog.V(2).Infof("Configuring IP address for container %s", containerID) // result.Interfaces must be set before this. - if err := ipam.ConfigureIface(containerIface.Name, result); err != nil { + if err := ipamConfigureIface(containerIface.Name, result); err != nil { return fmt.Errorf("failed to configure IP address for container %s: %v", containerID, err) } return nil @@ -288,7 +288,7 @@ func (ic *ifConfigurator) configureContainerLinkVeth( // asynchronously, and the gratuitous ARP could be sent out after the Openflow entries are // installed. Using another goroutine to ensure the processing of CNI ADD request is not blocked. func (ic *ifConfigurator) advertiseContainerAddr(containerNetNS string, containerIfaceName string, result *current.Result) error { - if err := ns.IsNSorErr(containerNetNS); err != nil { + if err := nsIsNSorErr(containerNetNS); err != nil { return fmt.Errorf("%s is not a valid network namespace: %v", containerNetNS, err) } if len(result.IPs) == 0 { @@ -296,8 +296,8 @@ func (ic *ifConfigurator) advertiseContainerAddr(containerNetNS string, containe return nil } // Sending Gratuitous ARP is a best-effort action and is unlikely to fail as we have ensured the netns is valid. - go ns.WithNetNSPath(containerNetNS, func(_ ns.NetNS) error { - iface, err := net.InterfaceByName(containerIfaceName) + go nsWithNetNSPath(containerNetNS, func(_ ns.NetNS) error { + iface, err := netInterfaceByName(containerIfaceName) if err != nil { klog.Errorf("Failed to find container interface %s in ns %s: %v", containerIfaceName, containerNetNS, err) return nil @@ -321,12 +321,12 @@ func (ic *ifConfigurator) advertiseContainerAddr(containerNetNS string, containe // Send gratuitous ARP/NDP to network in case of stale mappings for this IP address // (e.g. if a previous - deleted - Pod was using the same IP). if targetIPv4 != nil { - if err := arping.GratuitousARPOverIface(targetIPv4, iface); err != nil { + if err := arpingGratuitousARPOverIface(targetIPv4, iface); err != nil { klog.Warningf("Failed to send gratuitous ARP #%d: %v", count, err) } } if targetIPv6 != nil { - if err := ndp.GratuitousNDPOverIface(targetIPv6, iface); err != nil { + if err := ndpGratuitousNDPOverIface(targetIPv6, iface); err != nil { klog.Warningf("Failed to send gratuitous NDP #%d: %v", count, err) } } @@ -410,27 +410,28 @@ func (ic *ifConfigurator) checkContainerInterface( } // Check container interface configuration - if err := ns.WithNetNSPath(containerNetns, func(_ ns.NetNS) error { + err := nsWithNetNSPath(containerNetns, func(_ ns.NetNS) error { var errlink error // Check container link config if sriovVFDeviceID != "" { - networkInterface, errlink = validateContainerVFInterface(containerIface, sriovVFDeviceID) + networkInterface, errlink = ic.validateContainerVFInterface(containerIface, sriovVFDeviceID) } else { - networkInterface, errlink = validateContainerVethInterface(containerIface) + networkInterface, errlink = ic.validateContainerVethInterface(containerIface) } if errlink != nil { return errlink } // Check container IP config - if err := ip.ValidateExpectedInterfaceIPs(containerIface.Name, containerIPs); err != nil { + if err := ipValidateExpectedInterfaceIPs(containerIface.Name, containerIPs); err != nil { return err } // Check container route config - if err := ip.ValidateExpectedRoute(containerRoutes); err != nil { + if err := ipValidateExpectedRoute(containerRoutes); err != nil { return err } return nil - }); err != nil { + }) + if err != nil { klog.Errorf("Failed to check container %s interface configurations in netns %s: %v", containerID, containerNetns, err) return nil, err @@ -438,12 +439,12 @@ func (ic *ifConfigurator) checkContainerInterface( return networkInterface, nil } -func validateContainerVFInterface(intf *current.Interface, sriovVFDeviceID string) (netlink.Link, error) { - link, err := validateInterface(intf, true, netDeviceTypeVF) +func (ic *ifConfigurator) validateContainerVFInterface(intf *current.Interface, sriovVFDeviceID string) (netlink.Link, error) { + link, err := ic.validateInterface(intf, true, netDeviceTypeVF) if err != nil { return nil, err } - netdevices, _ := sriovnet.GetNetDevicesFromPci(sriovVFDeviceID) + netdevices, err := ic.sriovnet.GetNetDevicesFromPci(sriovVFDeviceID) if err != nil { return nil, fmt.Errorf("failed to find netdevice to PCI address %s: %v", sriovVFDeviceID, err) } @@ -463,15 +464,15 @@ func validateContainerVFInterface(intf *current.Interface, sriovVFDeviceID strin return link, nil } -func validateContainerVethInterface(intf *current.Interface) (*vethPair, error) { - link, err := validateInterface(intf, true, netDeviceTypeVeth) +func (ic *ifConfigurator) validateContainerVethInterface(intf *current.Interface) (*vethPair, error) { + link, err := ic.validateInterface(intf, true, netDeviceTypeVeth) if err != nil { return nil, err } veth := &vethPair{} linkName := link.Attrs().Name veth.ifIndex = link.Attrs().Index - _, veth.peerIndex, err = ip.GetVethPeerIfindex(linkName) + _, veth.peerIndex, err = ipGetVethPeerIfindex(linkName) if err != nil { return nil, fmt.Errorf("failed to get veth peer index for veth %s: %v", linkName, err) } @@ -484,15 +485,15 @@ func validateContainerVethInterface(intf *current.Interface) (*vethPair, error) } func (ic *ifConfigurator) validateVFRepInterface(sriovVFDeviceID string) (string, error) { - uplink, err := sriovnet.GetUplinkRepresentor(sriovVFDeviceID) + uplink, err := ic.sriovnet.GetUplinkRepresentor(sriovVFDeviceID) if err != nil { return "", fmt.Errorf("failed to get uplink representor for PCI Address %s", sriovVFDeviceID) } - vfIndex, err := sriovnet.GetVfIndexByPciAddress(sriovVFDeviceID) + vfIndex, err := ic.sriovnet.GetVfIndexByPciAddress(sriovVFDeviceID) if err != nil { return "", fmt.Errorf("failed to vf index for PCI Address %s", sriovVFDeviceID) } - return sriovnet.GetVfRepresentor(uplink, vfIndex) + return ic.sriovnet.GetVfRepresentor(uplink, vfIndex) } func (ic *ifConfigurator) validateContainerPeerInterface(interfaces []*current.Interface, containerVeth *vethPair) (*vethPair, error) { @@ -503,7 +504,7 @@ func (ic *ifConfigurator) validateContainerPeerInterface(interfaces []*current.I // Not in the default Namespace. Must be the container interface. continue } - link, err := validateInterface(hostIntf, false, netDeviceTypeVeth) + link, err := ic.validateInterface(hostIntf, false, netDeviceTypeVeth) if err != nil { klog.Errorf("Failed to validate interface %s: %v", hostIntf.Name, err) continue @@ -514,7 +515,7 @@ func (ic *ifConfigurator) validateContainerPeerInterface(interfaces []*current.I } hostVeth := &vethPair{ifIndex: link.Attrs().Index, name: link.Attrs().Name} - _, hostVeth.peerIndex, err = ip.GetVethPeerIfindex(hostVeth.name) + _, hostVeth.peerIndex, err = ipGetVethPeerIfindex(hostVeth.name) if err != nil { return nil, fmt.Errorf("failed to get veth peer index for host interface %s: %v", hostIntf.Name, err) @@ -547,7 +548,7 @@ func (ic *ifConfigurator) getInterceptedInterfaces( containerIFDev string, ) (*current.Interface, *current.Interface, error) { containerIface := ¤t.Interface{} - intf, err := util.GetNSDevInterface(containerNetNS, containerIFDev) + intf, err := getNSDevInterface(containerNetNS, containerIFDev) if err != nil { return nil, nil, fmt.Errorf("connectInterceptedInterface failed to get veth info: %w", err) } @@ -557,7 +558,7 @@ func (ic *ifConfigurator) getInterceptedInterfaces( // Setup dev in host ns. hostIface := ¤t.Interface{} - intf, br, err := util.GetNSPeerDevBridge(containerNetNS, containerIFDev) + intf, br, err := getNSPeerDevBridge(containerNetNS, containerIFDev) if err != nil { return nil, nil, fmt.Errorf("connectInterceptedInterface failed to get veth peer info: %w", err) } @@ -569,7 +570,13 @@ func (ic *ifConfigurator) getInterceptedInterfaces( return containerIface, hostIface, nil } -func validateInterface(intf *current.Interface, inNetns bool, ifType string) (netlink.Link, error) { +// addPostInterfaceCreateHook is called only on Windows. Adding this function in this file because it is defined in the +// interface `podInterfaceConfigurator`. +func (ic *ifConfigurator) addPostInterfaceCreateHook(containerID, endpointName string, containerAccess *containerAccessArbitrator, hook postInterfaceCreateHook) error { + return nil +} + +func (ic *ifConfigurator) validateInterface(intf *current.Interface, inNetns bool, ifType string) (netlink.Link, error) { if intf.Name == "" { return nil, fmt.Errorf("interface name is missing") } @@ -582,7 +589,7 @@ func validateInterface(intf *current.Interface, inNetns bool, ifType string) (ne return nil, fmt.Errorf("interface %s is expected not in netns", intf.Name) } } - link, err := netlink.LinkByName(intf.Name) + link, err := ic.netlink.LinkByName(intf.Name) if err != nil { return nil, fmt.Errorf("failed to find link for interface %s", intf.Name) } @@ -590,8 +597,8 @@ func validateInterface(intf *current.Interface, inNetns bool, ifType string) (ne if !isVeth(link) { return nil, fmt.Errorf("interface %s is not of type veth", intf.Name) } + return link, nil } else if ifType == netDeviceTypeVF { - return link, nil } return nil, fmt.Errorf("unknown device type %s", ifType) @@ -602,6 +609,6 @@ func isVeth(link netlink.Link) bool { return isVeth } -func (ic *ifConfigurator) getOVSInterfaceType(ovsPortName string) int { +func getOVSInterfaceType(ovsPortName string) int { return defaultOVSInterfaceType } diff --git a/pkg/agent/cniserver/interface_configuration_linux_test.go b/pkg/agent/cniserver/interface_configuration_linux_test.go new file mode 100644 index 00000000000..68b87bb068e --- /dev/null +++ b/pkg/agent/cniserver/interface_configuration_linux_test.go @@ -0,0 +1,852 @@ +//go:build linux +// +build linux + +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cniserver + +import ( + "fmt" + "net" + "sync" + "testing" + "unsafe" + + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vishvananda/netlink" + + cniservertest "antrea.io/antrea/pkg/agent/cniserver/testing" + "antrea.io/antrea/pkg/agent/util" + "antrea.io/antrea/pkg/agent/util/arping" + "antrea.io/antrea/pkg/agent/util/ndp" + netlinkutil "antrea.io/antrea/pkg/agent/util/netlink" + netlinktest "antrea.io/antrea/pkg/agent/util/netlink/testing" + "antrea.io/antrea/pkg/ovs/ovsconfig" +) + +var ( + mtu = 1450 + containerVethMac, _ = net.ParseMAC(containerMAC) + hostVethMac, _ = net.ParseMAC(hostIfaceMAC) + containerIfaceName = "eth0" + podName = "pod0" + podContainerID = "abcefgh-12345678" + hostIfaceName = util.GenerateContainerInterfaceName(podName, testPodNamespace, podContainerID) + containerVeth = &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: containerIfaceName, + Flags: net.FlagUp, + MTU: mtu, + HardwareAddr: containerVethMac, + Index: 1, + }, + PeerName: hostIfaceName, + } + hostVeth = &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: hostIfaceName, + Flags: net.FlagUp, + MTU: mtu, + HardwareAddr: hostVethMac, + Index: 2, + }, + PeerName: containerIfaceName, + } + validNSs = sync.Map{} + + sriovUplinkName = "uplink" + sriovVfIndex = 5 + sriovVfRepresentor = fmt.Sprintf("%s-%d", sriovUplinkName, sriovVfIndex) +) + +func newTestIfConfigurator(ovsHardwareOffloadEnabled bool, netlink netlinkutil.Interface, sriovnet SriovNet) *ifConfigurator { + return &ifConfigurator{ + ovsDatapathType: ovsconfig.OVSDatapathSystem, + isOvsHardwareOffloadEnabled: ovsHardwareOffloadEnabled, + disableTXChecksumOffload: true, + netlink: netlink, + sriovnet: sriovnet, + } +} + +type fakeNS struct { + path string + fd uintptr + setErr error + stopCh chan struct{} + waitCompleted bool +} + +func (ns *fakeNS) Do(toRun func(ns.NetNS) error) error { + defer func() { + if ns.waitCompleted { + ns.stopCh <- struct{}{} + } + }() + return toRun(ns) +} + +func (ns *fakeNS) Set() error { + return ns.setErr +} + +func (ns *fakeNS) Path() string { + return ns.path +} + +func (ns *fakeNS) Fd() uintptr { + return ns.fd +} + +func (ns *fakeNS) Close() error { + return nil +} + +func (ns *fakeNS) clear() { + if ns.waitCompleted { + <-ns.stopCh + } + validNSs.Delete(ns.path) +} + +func createNS(t *testing.T, waitForComplete bool) *fakeNS { + nsPath := generateUUID(t) + fakeNs := &fakeNS{path: nsPath, fd: uintptr(unsafe.Pointer(&nsPath)), waitCompleted: waitForComplete, stopCh: make(chan struct{})} + validNSs.Store(nsPath, fakeNs) + return fakeNs +} + +func getFakeNS(nspath string) (ns.NetNS, error) { + fakeNs, exists := validNSs.Load(nspath) + if exists { + return fakeNs.(*fakeNS), nil + } + return nil, fmt.Errorf("ns not found %s", nspath) +} + +func TestConfigureContainerLink(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + fakeSriovNet := cniservertest.NewMockSriovNet(controller) + fakeNetlink := netlinktest.NewMockInterface(controller) + + sriovPfName := "pf" + sriovVfNetdeviceName := "vfDevice" + vfDeviceLink := &netlink.Dummy{LinkAttrs: netlink.LinkAttrs{Index: 2, MTU: mtu, HardwareAddr: containerVethMac, Name: sriovVfNetdeviceName, Flags: net.FlagUp}} + + defer mockGetNS()() + defer mockWithNetNSPath()() + + for _, tc := range []struct { + name string + ovsHardwareOffloadEnabled bool + sriovVFDeviceID string + vfNetdevices []string + podSriovVFDeviceID string + renameIntefaceErr error + setupVethErr error + ipamConfigureIfaceErr error + ethtoolEthTXHWCsumOffErr error + expectErr error + }{ + { + name: "container-vethpair-success", + ovsHardwareOffloadEnabled: false, + }, { + name: "container-vethpair-failure", + ovsHardwareOffloadEnabled: false, + setupVethErr: fmt.Errorf("unable to setup veth pair for container"), + expectErr: fmt.Errorf("failed to create veth devices for container %s: unable to setup veth pair for container", podContainerID), + }, { + name: "container-ipam-failure", + ovsHardwareOffloadEnabled: false, + ipamConfigureIfaceErr: fmt.Errorf("unable to configure container IPAM"), + expectErr: fmt.Errorf("failed to configure IP address for container %s: unable to configure container IPAM", podContainerID), + }, { + name: "container-hwoffload-failure", + ovsHardwareOffloadEnabled: true, + ethtoolEthTXHWCsumOffErr: fmt.Errorf("unable to disable offloading"), + expectErr: fmt.Errorf("error when disabling TX checksum offload on container veth: unable to disable offloading"), + }, { + name: "br-sriov-offloading-disable", + ovsHardwareOffloadEnabled: false, + sriovVFDeviceID: "br-vf", + expectErr: fmt.Errorf("OVS is configured with hardware offload disabled, but SR-IOV VF was requested; please set hardware offload to true via antrea yaml"), + }, { + name: "br-sriov-success", + ovsHardwareOffloadEnabled: true, + sriovVFDeviceID: "br-vf", + vfNetdevices: []string{sriovVfNetdeviceName}, + }, { + name: "br-sriov-pciaddress-issue", + ovsHardwareOffloadEnabled: true, + sriovVFDeviceID: "br-vf", + vfNetdevices: []string{}, + expectErr: fmt.Errorf("failed to get one netdevice interface per br-vf"), + }, { + name: "br-sriov-rename-failure", + ovsHardwareOffloadEnabled: true, + sriovVFDeviceID: "br-vf", + vfNetdevices: []string{sriovVfNetdeviceName}, + renameIntefaceErr: fmt.Errorf("unable to rename netlink"), + expectErr: fmt.Errorf("failed to rename %s to %s: unable to rename netlink", sriovVfRepresentor, hostIfaceName), + }, { + name: "pod-sriov-success", + ovsHardwareOffloadEnabled: true, + podSriovVFDeviceID: "sriovPodVF", + }, + } { + t.Run(tc.name, func(t *testing.T) { + defer mockSetupVethWithName(tc.setupVethErr, 1, 2)() + defer mockRenameInterface(tc.renameIntefaceErr)() + defer mockIPAMConfigureIface(tc.ipamConfigureIfaceErr)() + defer mockEthtoolTXHWCsumOff(tc.ethtoolEthTXHWCsumOffErr)() + testIfConfigurator := newTestIfConfigurator(tc.ovsHardwareOffloadEnabled, fakeNetlink, fakeSriovNet) + containerNS := createNS(t, false) + defer containerNS.clear() + moveVFtoNS := false + if tc.sriovVFDeviceID != "" && tc.ovsHardwareOffloadEnabled { + fakeSriovNet.EXPECT().GetNetDevicesFromPci(tc.sriovVFDeviceID).Return(tc.vfNetdevices, nil).Times(1) + if len(tc.vfNetdevices) == 1 { + fakeSriovNet.EXPECT().GetUplinkRepresentor(tc.sriovVFDeviceID).Return(sriovUplinkName, nil).Times(1) + fakeSriovNet.EXPECT().GetVfIndexByPciAddress(tc.sriovVFDeviceID).Return(sriovVfIndex, nil).Times(1) + fakeSriovNet.EXPECT().GetVfRepresentor(sriovUplinkName, sriovVfIndex).Return(sriovVfRepresentor, nil).Times(1) + if tc.renameIntefaceErr == nil { + hostInterfaceLink := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{Index: 2, MTU: mtu, HardwareAddr: hostVethMac, Name: hostIfaceName, Flags: net.FlagUp}, + } + fakeNetlink.EXPECT().LinkByName(hostIfaceName).Return(hostInterfaceLink, nil).Times(1) + moveVFtoNS = true + } + } + } + if tc.podSriovVFDeviceID != "" { + fakeSriovNet.EXPECT().GetPfName(tc.podSriovVFDeviceID).Return(sriovPfName, nil).Times(1) + fakeSriovNet.EXPECT().GetVfid(tc.podSriovVFDeviceID, sriovPfName).Return(sriovVfIndex, nil).Times(1) + fakeSriovNet.EXPECT().GetVFLinkNames(tc.podSriovVFDeviceID).Return(sriovVfNetdeviceName, nil).Times(1) + fakeNetlink.EXPECT().LinkByName(sriovVfNetdeviceName).Return(vfDeviceLink, nil).Times(1) + moveVFtoNS = true + } + if moveVFtoNS { + fakeNetlink.EXPECT().LinkByName(sriovVfNetdeviceName).Return(vfDeviceLink, nil).Times(1) + fakeNetlink.EXPECT().LinkSetNsFd(vfDeviceLink, gomock.Any()).Return(nil).Times(1) + containerInterfaceLink := &netlink.Dummy{LinkAttrs: netlink.LinkAttrs{Index: 2, MTU: mtu, HardwareAddr: containerVethMac, Name: containerIfaceName, Flags: net.FlagUp}} + fakeNetlink.EXPECT().LinkByName(containerIfaceName).Return(containerInterfaceLink, nil).Times(1) + fakeNetlink.EXPECT().LinkSetMTU(containerInterfaceLink, gomock.Any()).Return(nil).Times(1) + fakeNetlink.EXPECT().LinkSetUp(containerInterfaceLink).Return(nil).Times(1) + } + err := testIfConfigurator.configureContainerLink(podName, testPodNamespace, podContainerID, containerNS.Path(), containerIfaceName, mtu, tc.sriovVFDeviceID, tc.podSriovVFDeviceID, ipamResult, nil) + if tc.expectErr != nil { + assert.Error(t, err) + assert.Equal(t, tc.expectErr, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestAdvertiseContainerAddr(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + interfaceID := 1 + ipv4CIDR := net.IPNet{ + IP: net.ParseIP("192.168.100.100"), + Mask: net.IPv4Mask(255, 255, 255, 0), + } + ipv4Gateway := net.ParseIP("192.168.100.1") + ipv6CIDR := net.IPNet{ + IP: net.ParseIP("fe12:ab::64:64"), + Mask: net.IPMask([]byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}), + } + ipv6Gateway := net.ParseIP("fe12:ab::64:1") + defer mockIsNSorErr()() + defer mockWithNetNSPath()() + testIfConfigurator := newTestIfConfigurator(false, nil, nil) + + for _, tc := range []struct { + name string + result *current.Result + runInNS bool + netInterfaceError error + advertiseIPv4 bool + advertiseIPv6 bool + ipv4ArpingErr error + ipv6NDPErr error + }{ + { + name: "result-no-ips", + result: ¤t.Result{IPs: nil}, + }, { + name: "interface-not-found", + runInNS: true, + result: ¤t.Result{IPs: []*current.IPConfig{ + {Version: "4", Interface: &interfaceID, Address: ipv4CIDR, Gateway: ipv4Gateway}, + {Version: "6", Interface: &interfaceID, Address: ipv6CIDR, Gateway: ipv6Gateway}, + }}, + netInterfaceError: fmt.Errorf("unable to find interface"), + }, { + name: "invalid-ipam-result", + runInNS: true, + result: ¤t.Result{IPs: []*current.IPConfig{ + {Version: "5", Interface: &interfaceID, Address: ipv4CIDR, Gateway: ipv4Gateway}, + }}, + }, { + name: "advertise-ipv4-only", + runInNS: true, + result: ¤t.Result{IPs: []*current.IPConfig{ + {Version: "4", Interface: &interfaceID, Address: ipv4CIDR, Gateway: ipv4Gateway}, + }}, + advertiseIPv4: true, + }, { + name: "advertise-ipv4-failure", + runInNS: true, + result: ¤t.Result{IPs: []*current.IPConfig{ + {Version: "4", Interface: &interfaceID, Address: ipv4CIDR, Gateway: ipv4Gateway}, + }}, + ipv4ArpingErr: fmt.Errorf("failed to send GARP on interface"), + advertiseIPv4: true, + }, { + name: "advertise-ipv6-only", + runInNS: true, + result: ¤t.Result{IPs: []*current.IPConfig{ + {Version: "6", Interface: &interfaceID, Address: ipv6CIDR, Gateway: ipv6Gateway}, + }}, + advertiseIPv6: true, + }, { + name: "advertise-ipv6-failure", + runInNS: true, + result: ¤t.Result{IPs: []*current.IPConfig{ + {Version: "6", Interface: &interfaceID, Address: ipv6CIDR, Gateway: ipv6Gateway}, + }}, + advertiseIPv6: true, + ipv6NDPErr: fmt.Errorf("failed to send IPv6 NDP on interface"), + }, { + name: "advertise-dualstack", + runInNS: true, + result: ¤t.Result{IPs: []*current.IPConfig{ + {Version: "4", Interface: &interfaceID, Address: ipv4CIDR, Gateway: ipv4Gateway}, + {Version: "6", Interface: &interfaceID, Address: ipv6CIDR, Gateway: ipv6Gateway}, + }}, + advertiseIPv4: true, + advertiseIPv6: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + defer mockGetInterfaceByName(tc.netInterfaceError, 1)() + defer func() { + arpingGratuitousARPOverIface = arping.GratuitousARPOverIface + ndpGratuitousNDPOverIface = ndp.GratuitousNDPOverIface + }() + containerNS := createNS(t, tc.runInNS) + count := 0 + if tc.advertiseIPv4 { + count += 3 + } + if tc.advertiseIPv6 { + count += 3 + } + if tc.advertiseIPv4 { + arpingGratuitousARPOverIface = func(srcIP net.IP, iface *net.Interface) error { + count -= 1 + return tc.ipv4ArpingErr + } + } + if tc.advertiseIPv6 { + ndpGratuitousNDPOverIface = func(srcIP net.IP, iface *net.Interface) error { + count -= 1 + return tc.ipv6NDPErr + } + } + err := testIfConfigurator.advertiseContainerAddr(containerNS.Path(), containerIfaceName, tc.result) + assert.NoError(t, err) + containerNS.clear() + assert.Equal(t, 0, count) + }) + } +} + +func TestCheckContainerInterface(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + containerIPs := ipamResult.IPs + containerRoutes := ipamResult.Routes + sriovVfNetdeviceName := "vfDevice" + vfDeviceLink := &netlink.Dummy{LinkAttrs: netlink.LinkAttrs{Index: 2, MTU: mtu, HardwareAddr: containerVethMac, Name: sriovVfNetdeviceName, Flags: net.FlagUp}} + + fakeSriovNet := cniservertest.NewMockSriovNet(controller) + fakeNetlink := netlinktest.NewMockInterface(controller) + + defer mockWithNetNSPath()() + testIfConfigurator := newTestIfConfigurator(false, fakeNetlink, fakeSriovNet) + for _, tc := range []struct { + name string + sriovVFDeviceID string + vfDevices []string + containerIPs []*current.IPConfig + containerIface *current.Interface + containerLink netlink.Link + peerIndex int + getPeerErr error + getNetDevice bool + getDeviceErr error + validateIPErr error + validateRouteErr error + expectErrStr string + }{ + { + name: "containerNS-not-equal", + containerIface: ¤t.Interface{Name: containerIfaceName, Sandbox: "not-exist", Mac: "01:02:03:04:05:06"}, + expectErrStr: "sandbox in prevResult not-exist doesn't match configured netns", + }, + { + name: "sriov-interface-unset", + containerIface: ¤t.Interface{Mac: containerMAC}, + sriovVFDeviceID: "sriovVF", + vfDevices: []string{}, + expectErrStr: "interface name is missing", + }, + { + name: "sriov-netdevice-count-issue", + containerIface: ¤t.Interface{Name: containerIfaceName, Mac: containerMAC}, + sriovVFDeviceID: "sriovVF", + getNetDevice: true, + vfDevices: []string{sriovVfNetdeviceName}, + containerLink: vfDeviceLink, + expectErrStr: "VF netdevice still in host network namespace sriovVF [vfDevice]", + }, { + name: "sriov-get-netdevice-failure", + containerIface: ¤t.Interface{Name: containerIfaceName, Mac: containerMAC}, + sriovVFDeviceID: "sriovVF", + getNetDevice: true, + vfDevices: []string{}, + containerLink: vfDeviceLink, + getDeviceErr: fmt.Errorf("unable to get VF device"), + expectErrStr: "failed to find netdevice to PCI address sriovVF: unable to get VF device", + }, { + name: "sriov-MAC-mismatch", + containerIface: ¤t.Interface{Name: containerIfaceName, Mac: hostIfaceMAC}, + sriovVFDeviceID: "sriovVF", + getNetDevice: true, + vfDevices: []string{}, + containerLink: vfDeviceLink, + expectErrStr: fmt.Sprintf("interface %s MAC %s doesn't match container MAC: %s", containerIfaceName, hostIfaceMAC, "01:02:03:04:05:06"), + }, { + name: "sriov-success", + containerIface: ¤t.Interface{Name: containerIfaceName, Mac: containerMAC}, + sriovVFDeviceID: "sriovVF", + getNetDevice: true, + vfDevices: []string{}, + containerLink: vfDeviceLink, + }, { + name: "container-link-type-incorrect", + containerIface: ¤t.Interface{Name: containerIfaceName, Mac: containerMAC}, + containerLink: vfDeviceLink, + expectErrStr: fmt.Sprintf("interface %s is not of type veth", containerIfaceName), + }, { + name: "container-peer-not-found", + containerIface: ¤t.Interface{Name: containerIfaceName, Mac: "01:02:03:04:05:06"}, + containerLink: containerVeth, + getPeerErr: fmt.Errorf("peer not found"), + expectErrStr: fmt.Sprintf("failed to get veth peer index for veth %s: peer not found", containerIfaceName), + }, { + name: "container-ip-not-equal", + containerIface: ¤t.Interface{Name: containerIfaceName, Mac: containerMAC}, + vfDevices: []string{}, + containerLink: containerVeth, + validateIPErr: fmt.Errorf("IP not equal"), + expectErrStr: "IP not equal", + }, { + name: "container-route-not-equal", + containerIface: ¤t.Interface{Name: containerIfaceName, Mac: containerMAC}, + vfDevices: []string{}, + containerLink: containerVeth, + validateRouteErr: fmt.Errorf("route not equal"), + expectErrStr: "route not equal", + }, { + name: "container-success", + containerIface: ¤t.Interface{Name: containerIfaceName, Mac: containerMAC}, + vfDevices: []string{}, + containerLink: containerVeth, + }, + } { + t.Run(tc.name, func(t *testing.T) { + defer mockIpValidateExpectedInterfaceIPs(tc.validateIPErr)() + defer mockIpValidateExpectedRoute(tc.validateRouteErr)() + defer mockIpGetVethPeerIfindex(tc.peerIndex, tc.getPeerErr)() + + containerNS := createNS(t, false) + defer containerNS.clear() + + if tc.containerIface.Sandbox == "" { + tc.containerIface.Sandbox = containerNS.Path() + } + + if tc.containerLink != nil { + fakeNetlink.EXPECT().LinkByName(tc.containerIface.Name).Return(tc.containerLink, nil).Times(1) + } + if tc.sriovVFDeviceID != "" && tc.getNetDevice { + fakeSriovNet.EXPECT().GetNetDevicesFromPci(tc.sriovVFDeviceID).Return(tc.vfDevices, tc.getDeviceErr).Times(1) + } + _, err := testIfConfigurator.checkContainerInterface(containerNS.Path(), podContainerID, tc.containerIface, containerIPs, containerRoutes, tc.sriovVFDeviceID) + if tc.expectErrStr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.expectErrStr) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestValidateVFRepInterface(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + fakeSriovNet := cniservertest.NewMockSriovNet(controller) + testIfConfigurator := newTestIfConfigurator(false, nil, fakeSriovNet) + + for _, tc := range []struct { + name string + sriovVFDeviceID string + getUplinkRepErr error + getVfIndexErr error + getVfRepErr error + expectedErr error + }{ + { + name: "get-uplink-failure", + sriovVFDeviceID: "vf1", + getUplinkRepErr: fmt.Errorf("unable to get uplink"), + expectedErr: fmt.Errorf("failed to get uplink representor for PCI Address vf1"), + }, { + name: "get-vfIndex-failure", + sriovVFDeviceID: "vf2", + getVfIndexErr: fmt.Errorf("unable to get vf index"), + expectedErr: fmt.Errorf("failed to vf index for PCI Address vf2"), + }, { + name: "get-vf-rep-failure", + sriovVFDeviceID: "vf3", + getVfRepErr: fmt.Errorf("unable to get vf rep"), + expectedErr: fmt.Errorf("unable to get vf rep"), + }, { + name: "get-vf-success", + sriovVFDeviceID: "vf4", + }, + } { + t.Run(tc.name, func(t *testing.T) { + fakeSriovNet.EXPECT().GetUplinkRepresentor(tc.sriovVFDeviceID).Return(sriovUplinkName, tc.getUplinkRepErr).Times(1) + if tc.getUplinkRepErr == nil { + fakeSriovNet.EXPECT().GetVfIndexByPciAddress(tc.sriovVFDeviceID).Return(sriovVfIndex, tc.getVfIndexErr).Times(1) + if tc.getVfIndexErr == nil { + fakeSriovNet.EXPECT().GetVfRepresentor(sriovUplinkName, sriovVfIndex).Return(sriovVfRepresentor, tc.getVfRepErr).Times(1) + } + } + vfRep, err := testIfConfigurator.validateVFRepInterface(tc.sriovVFDeviceID) + if tc.expectedErr != nil { + require.Error(t, err) + assert.Equal(t, tc.expectedErr, err) + } else { + assert.NoError(t, err) + assert.Equal(t, vfRep, sriovVfRepresentor) + } + }) + } +} + +func TestGetInterceptedInterfaces(t *testing.T) { + sandbox := "containerSandbox" + containerNS := "containerNS" + testIfConfigurator := newTestIfConfigurator(false, nil, nil) + + for _, tc := range []struct { + name string + hostIfaceName string + brName string + getContainerIfaceErr error + getPeerIfaceErr error + containerInterface *current.Interface + hostInterface *current.Interface + expErrStr string + }{ + { + name: "get-container-iface-failure", + hostIfaceName: "hostPort1", + getContainerIfaceErr: fmt.Errorf("unable to get container device"), + expErrStr: "connectInterceptedInterface failed to get veth info", + }, { + name: "get-peer-iface-failure", + hostIfaceName: "hostPort2", + getPeerIfaceErr: fmt.Errorf("unable to get container peer"), + expErrStr: "connectInterceptedInterface failed to get veth peer info", + }, { + name: "peer-attach-to-bridge", + hostIfaceName: "hostPort3", + brName: "br", + expErrStr: "connectInterceptedInterface: does not expect device hostPort3 attached to bridge", + }, { + name: "success", + hostIfaceName: "hostPort4", + containerInterface: ¤t.Interface{Name: containerIfaceName, Sandbox: sandbox, Mac: containerMAC}, + hostInterface: ¤t.Interface{Name: "hostPort4", Mac: hostIfaceMAC}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + containerInterface := &net.Interface{Index: 1, MTU: mtu, HardwareAddr: containerVethMac, Name: containerIfaceName, Flags: net.FlagUp} + hostInterface := &net.Interface{Index: 2, MTU: mtu, HardwareAddr: hostVethMac, Name: tc.hostIfaceName, Flags: net.FlagUp} + defer mockGetNSDevInterface(containerInterface, tc.getContainerIfaceErr)() + defer mockGetNSPeerDevBridge(hostInterface, tc.brName, tc.getPeerIfaceErr)() + containerIface, hostIface, err := testIfConfigurator.getInterceptedInterfaces(sandbox, containerNS, containerIfaceName) + if tc.expErrStr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.expErrStr) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.containerInterface, containerIface) + assert.Equal(t, tc.hostInterface, hostIface) + } + }) + } +} + +func TestValidateContainerPeerInterface(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + fakeNetlink := netlinktest.NewMockInterface(controller) + testIfConfigurator := newTestIfConfigurator(false, fakeNetlink, nil) + + for _, tc := range []struct { + name string + interfaces []*current.Interface + containerVeth *vethPair + hostLink netlink.Link + hostLinkErr error + peerIndex int + getPeerErr error + expError error + expHostVeth *vethPair + }{ + { + name: "host-interface-not-set", + interfaces: []*current.Interface{{Name: containerIfaceName, Sandbox: "container-sandbox", Mac: containerMAC}}, + containerVeth: &vethPair{name: containerIfaceName, ifIndex: 1}, + expError: fmt.Errorf("peer veth interface not found for container interface %s", containerIfaceName), + }, { + name: "host-link-not-found", + interfaces: []*current.Interface{{Name: containerIfaceName, Sandbox: "container-sandbox", Mac: containerMAC}, {Name: hostIfaceName, Mac: hostIfaceMAC}}, + containerVeth: &vethPair{name: containerIfaceName, ifIndex: 1}, + hostLink: hostVeth, + hostLinkErr: fmt.Errorf("unable to find host link with name %s", hostIfaceName), + expError: fmt.Errorf("peer veth interface not found for container interface %s", containerIfaceName), + }, { + name: "host-link-index-incorrect", + interfaces: []*current.Interface{{Name: containerIfaceName, Sandbox: "container-sandbox", Mac: containerMAC}, {Name: hostIfaceName, Mac: hostIfaceMAC}}, + containerVeth: &vethPair{name: containerIfaceName, ifIndex: 1, peerIndex: 3}, + hostLink: hostVeth, + expError: fmt.Errorf("peer veth interface not found for container interface %s", containerIfaceName), + }, { + name: "host-link-peer-index-incorrect", + interfaces: []*current.Interface{{Name: containerIfaceName, Sandbox: "container-sandbox", Mac: containerMAC}, {Name: hostIfaceName, Mac: hostIfaceMAC}}, + containerVeth: &vethPair{name: containerIfaceName, ifIndex: 1, peerIndex: 2}, + hostLink: hostVeth, + peerIndex: 3, + expError: fmt.Errorf("host interface %s peer index doesn't match container interface %s index", hostIfaceName, containerIfaceName), + }, { + name: "host-link-peer-not-found", + interfaces: []*current.Interface{{Name: containerIfaceName, Sandbox: "container-sandbox", Mac: containerMAC}, {Name: hostIfaceName, Mac: hostIfaceMAC}}, + containerVeth: &vethPair{name: containerIfaceName, ifIndex: 1, peerIndex: 2}, + hostLink: hostVeth, + peerIndex: 1, + getPeerErr: fmt.Errorf("peer link not found"), + expError: fmt.Errorf("failed to get veth peer index for host interface %s: peer link not found", hostIfaceName), + }, { + name: "host-link-MAC-incorrect", + interfaces: []*current.Interface{{Name: containerIfaceName, Sandbox: "container-sandbox", Mac: containerMAC}, {Name: hostIfaceName, Mac: "aa:bb:cc:cc:bb:aa"}}, + containerVeth: &vethPair{name: containerIfaceName, ifIndex: 1, peerIndex: 2}, + hostLink: hostVeth, + peerIndex: 1, + expError: fmt.Errorf("host interface %s MAC aa:bb:cc:cc:bb:aa doesn't match", hostIfaceName), + }, { + name: "success", + interfaces: []*current.Interface{{Name: containerIfaceName, Sandbox: "container-sandbox", Mac: containerMAC}, {Name: hostIfaceName, Mac: hostIfaceMAC}}, + containerVeth: &vethPair{name: containerIfaceName, ifIndex: 1, peerIndex: 2}, + hostLink: hostVeth, + peerIndex: 1, + expHostVeth: &vethPair{name: hostIfaceName, ifIndex: 2, peerIndex: 1}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + defer mockIpGetVethPeerIfindex(tc.peerIndex, tc.getPeerErr)() + if tc.hostLink != nil { + fakeNetlink.EXPECT().LinkByName(hostIfaceName).Return(tc.hostLink, tc.hostLinkErr).Times(1) + } + hostVeth, err := testIfConfigurator.validateContainerPeerInterface(tc.interfaces, tc.containerVeth) + if tc.expError != nil { + assert.Error(t, err) + assert.Equal(t, tc.expError, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expHostVeth, hostVeth) + } + }) + } +} + +func mockSetupVethWithName(setupVethErr error, containerIndex, hostIndex int) func() { + originalIPSetupVethWithName := ipSetupVethWithName + ipSetupVethWithName = func(contVethName, hostVethName string, mtu int, mac string, hostNS ns.NetNS) (net.Interface, net.Interface, error) { + if setupVethErr != nil { + return net.Interface{}, net.Interface{}, setupVethErr + } + containerInterface := net.Interface{Index: containerIndex, MTU: mtu, HardwareAddr: containerVethMac, Name: contVethName, Flags: net.FlagUp} + hostInterface := net.Interface{Index: hostIndex, MTU: mtu, HardwareAddr: hostVethMac, Name: hostVethName, Flags: net.FlagUp} + return containerInterface, hostInterface, nil + } + return func() { + ipSetupVethWithName = originalIPSetupVethWithName + } +} + +func mockRenameInterface(renameIntefaceErr error) func() { + originalRenameInterface := renameInterface + renameInterface = func(from, to string) error { + return renameIntefaceErr + } + return func() { + renameInterface = originalRenameInterface + } +} + +func mockIPAMConfigureIface(ipamConfigureIfaceErr error) func() { + originalIPAMConfigureIface := ipamConfigureIface + ipamConfigureIface = func(ifName string, res *current.Result) error { + return ipamConfigureIfaceErr + } + return func() { + ipamConfigureIface = originalIPAMConfigureIface + } +} + +func mockEthtoolTXHWCsumOff(ethtoolEthTXHWCsumOffErr error) func() { + originalEthtoolTXHWCsumOff := ethtoolTXHWCsumOff + ethtoolTXHWCsumOff = func(name string) error { + return ethtoolEthTXHWCsumOffErr + } + return func() { + ethtoolTXHWCsumOff = originalEthtoolTXHWCsumOff + } +} + +func mockGetInterfaceByName(netInterfaceError error, ifaceIndex int) func() { + originalNetInterfaceByName := netInterfaceByName + netInterfaceByName = func(name string) (*net.Interface, error) { + return &net.Interface{Index: ifaceIndex, MTU: mtu, HardwareAddr: containerVethMac, Name: name, Flags: net.FlagUp}, netInterfaceError + } + return func() { + netInterfaceByName = originalNetInterfaceByName + } +} + +func mockIpValidateExpectedInterfaceIPs(validateIPErr error) func() { + originalIpValidateExpectedInterfaceIPs := ipValidateExpectedInterfaceIPs + ipValidateExpectedInterfaceIPs = func(ifName string, resultIPs []*current.IPConfig) error { + return validateIPErr + } + return func() { + ipValidateExpectedInterfaceIPs = originalIpValidateExpectedInterfaceIPs + } +} + +func mockIpValidateExpectedRoute(validateRouteErr error) func() { + originalIpValidateExpectedRoute := ipValidateExpectedRoute + ipValidateExpectedRoute = func(resultRoutes []*cnitypes.Route) error { + return validateRouteErr + } + return func() { + ipValidateExpectedRoute = originalIpValidateExpectedRoute + } +} + +func mockIpGetVethPeerIfindex(peerIndex int, getPeerErr error) func() { + originalIpGetVethPeerIfindex := ipGetVethPeerIfindex + ipGetVethPeerIfindex = func(ifName string) (netlink.Link, int, error) { + return &netlink.Dummy{}, peerIndex, getPeerErr + } + return func() { + ipGetVethPeerIfindex = originalIpGetVethPeerIfindex + } +} + +func mockGetNSPeerDevBridge(hostInterface *net.Interface, brName string, getPeerIfaceErr error) func() { + originalGetNSPeerDevBridge := getNSPeerDevBridge + getNSPeerDevBridge = func(nsPath, dev string) (*net.Interface, string, error) { + return hostInterface, brName, getPeerIfaceErr + } + return func() { + getNSPeerDevBridge = originalGetNSPeerDevBridge + } +} + +func mockGetNSDevInterface(containerInterface *net.Interface, getContainerIfaceErr error) func() { + originalGetNSDevInterface := getNSDevInterface + getNSDevInterface = func(nsPath, dev string) (*net.Interface, error) { + return containerInterface, getContainerIfaceErr + } + return func() { + getNSDevInterface = originalGetNSDevInterface + } +} + +func mockGetNS() func() { + originalGetNS := nsGetNS + nsGetNS = getFakeNS + return func() { + nsGetNS = originalGetNS + } +} + +func mockWithNetNSPath() func() { + originalWithNetNSPath := nsWithNetNSPath + nsWithNetNSPath = func(nspath string, toRun func(ns.NetNS) error) error { + netNS, err := getFakeNS(nspath) + if err != nil { + return err + } + return netNS.Do(toRun) + } + return func() { + nsWithNetNSPath = originalWithNetNSPath + } +} + +func mockIsNSorErr() func() { + originalIsNSorErr := nsIsNSorErr + nsIsNSorErr = func(nspath string) error { + _, err := getFakeNS(nspath) + return err + } + return func() { + nsIsNSorErr = originalIsNSorErr + } +} diff --git a/pkg/agent/cniserver/interface_configuration_windows.go b/pkg/agent/cniserver/interface_configuration_windows.go index 40042db1256..a888a9e1830 100644 --- a/pkg/agent/cniserver/interface_configuration_windows.go +++ b/pkg/agent/cniserver/interface_configuration_windows.go @@ -41,7 +41,23 @@ const ( notFoundHNSEndpoint = "The endpoint was not found" ) -type postInterfaceCreateHook func() error +var ( + getHnsNetworkByNameFunc = hcsshim.GetHNSNetworkByName + listHnsEndpointFunc = hcsshim.HNSListEndpointRequest + setInterfaceMTUFunc = util.SetInterfaceMTU + hostInterfaceExistsFunc = util.HostInterfaceExists + getNetInterfaceAddrsFunc = getNetInterfaceAddrs + createHnsEndpointFunc = createHnsEndpoint + getNamespaceEndpointIDsFunc = hcn.GetNamespaceEndpointIds + hotAttachEndpointFunc = hcsshim.HotAttachEndpoint + attachEndpointInNamespaceFunc = attachEndpointInNamespace + isContainerAttachOnEndpointFunc = isContainerAttachOnEndpoint + getHcnEndpointByIDFunc = hcn.GetEndpointByID + deleteHnsEndpointFunc = deleteHnsEndpoint + removeEndpointFromNamespaceFunc = hcn.RemoveNamespaceEndpoint + getHnsEndpointByNameFunc = hcsshim.GetHNSEndpointByName + getNetInterfaceByNameFunc = net.InterfaceByName +) type ifConfigurator struct { hnsNetwork *hcsshim.HNSNetwork @@ -50,11 +66,11 @@ type ifConfigurator struct { // disableTXChecksumOffload is ignored on Windows. func newInterfaceConfigurator(ovsDatapathType ovsconfig.OVSDatapathType, isOvsHardwareOffloadEnabled bool, disableTXChecksumOffload bool) (*ifConfigurator, error) { - hnsNetwork, err := hcsshim.GetHNSNetworkByName(util.LocalHNSNetwork) + hnsNetwork, err := getHnsNetworkByNameFunc(util.LocalHNSNetwork) if err != nil { return nil, err } - eps, err := hcsshim.HNSListEndpointRequest() + eps, err := listHnsEndpointFunc() if err != nil { return nil, err } @@ -164,7 +180,7 @@ func (ic *ifConfigurator) configureContainerLink( // and the hcsshim call is not synchronized from the observation. return ic.addPostInterfaceCreateHook(infraContainerID, epName, containerAccess, func() error { ifaceName := util.VirtualAdapterName(epName) - if err := util.SetInterfaceMTU(ifaceName, mtu); err != nil { + if err := setInterfaceMTUFunc(ifaceName, mtu); err != nil { return fmt.Errorf("failed to configure MTU on container interface '%s': %v", ifaceName, err) } return nil @@ -187,7 +203,7 @@ func (ic *ifConfigurator) createContainerLink(endpointName string, result *curre GatewayAddress: containerIP.Gateway.String(), IPAddress: containerIP.Address.IP, } - hnsEP, err := epRequest.Create() + hnsEP, err := createHnsEndpointFunc(epRequest) if err != nil { return nil, err } @@ -207,16 +223,16 @@ func attachContainerLink(ep *hcsshim.HNSEndpoint, containerID, sandbox, containe var hcnEp *hcn.HostComputeEndpoint if isDockerContainer(sandbox) { // Docker runtime - attached, err = ep.IsAttached(containerID) + attached, err = isContainerAttachOnEndpointFunc(ep, containerID) if err != nil { return nil, err } } else { // Containerd runtime - if hcnEp, err = hcn.GetEndpointByID(ep.Id); err != nil { + if hcnEp, err = getHcnEndpointByIDFunc(ep.Id); err != nil { return nil, err } - attachedEpIds, err := hcn.GetNamespaceEndpointIds(sandbox) + attachedEpIds, err := getNamespaceEndpointIDsFunc(sandbox) if err != nil { return nil, err } @@ -233,12 +249,12 @@ func attachContainerLink(ep *hcsshim.HNSEndpoint, containerID, sandbox, containe } else { if hcnEp == nil { // Docker runtime - if err := hcsshim.HotAttachEndpoint(containerID, ep.Id); err != nil { + if err := hotAttachEndpointFunc(containerID, ep.Id); err != nil { return nil, err } } else { // Containerd runtime - if err := hcnEp.NamespaceAttach(sandbox); err != nil { + if err := attachEndpointInNamespaceFunc(hcnEp, sandbox); err != nil { return nil, err } } @@ -251,6 +267,14 @@ func attachContainerLink(ep *hcsshim.HNSEndpoint, containerID, sandbox, containe return containerIface, nil } +func isContainerAttachOnEndpoint(endpoint *hcsshim.HNSEndpoint, containerID string) (bool, error) { + return endpoint.IsAttached(containerID) +} + +func attachEndpointInNamespace(hcnEp *hcn.HostComputeEndpoint, sandbox string) error { + return hcnEp.NamespaceAttach(sandbox) +} + // advertiseContainerAddr returns immediately as the address is advertised automatically after it is configured on an // network interface on Windows. func (ic *ifConfigurator) advertiseContainerAddr(containerNetNS string, containerIfaceName string, result *current.Result) error { @@ -273,16 +297,16 @@ func (ic *ifConfigurator) removeHNSEndpoint(endpoint *hcsshim.HNSEndpoint, conta deleteCh := make(chan error) // Remove HNSEndpoint. go func() { - hcnEndpoint, _ := hcn.GetEndpointByID(endpoint.Id) + hcnEndpoint, _ := getHcnEndpointByIDFunc(endpoint.Id) if hcnEndpoint != nil && isValidHostNamespace(hcnEndpoint.HostComputeNamespace) { - err := hcn.RemoveNamespaceEndpoint(hcnEndpoint.HostComputeNamespace, hcnEndpoint.Id) + err := removeEndpointFromNamespaceFunc(hcnEndpoint.HostComputeNamespace, hcnEndpoint.Id) if err != nil { klog.Errorf("Failed to remove HostComputeEndpoint %s from HostComputeNameSpace %s: %v", hcnEndpoint.Name, hcnEndpoint.HostComputeNamespace, err) deleteCh <- err return } } - _, err := endpoint.Delete() + _, err := deleteHnsEndpointFunc(endpoint) if err != nil && strings.Contains(err.Error(), notFoundHNSEndpoint) { err = nil } @@ -311,6 +335,10 @@ func (ic *ifConfigurator) removeHNSEndpoint(endpoint *hcsshim.HNSEndpoint, conta return nil } +func deleteHnsEndpoint(endpoint *hcsshim.HNSEndpoint) (*hcsshim.HNSEndpoint, error) { + return endpoint.Delete() +} + // isValidHostNamespace checks if the hostNamespace is valid or not. When using Docker runtime, the hostNamespace // is not set, and Windows HCN should use a default value "00000000-0000-0000-0000-000000000000". An error returns // when removing HostComputeEndpoint in this namespace. This field is set with a valid value when containerd is used. @@ -347,7 +375,7 @@ func (ic *ifConfigurator) checkContainerInterface( } hnsEP := strings.Split(containerIface.Name, "_")[0] containerIfaceName := util.VirtualAdapterName(hnsEP) - intf, err := net.InterfaceByName(containerIfaceName) + intf, err := getNetInterfaceByNameFunc(containerIfaceName) if err != nil { klog.Errorf("Failed to get container %s interface: %v", containerID, err) return nil, err @@ -381,9 +409,13 @@ func (ic *ifConfigurator) checkContainerInterface( return contVeth, nil } +func getNetInterfaceAddrs(intf *net.Interface) ([]net.Addr, error) { + return intf.Addrs() +} + // validateExpectedInterfaceIPs checks if the vNIC for the container has configured with correct IP address. func validateExpectedInterfaceIPs(containerIPConfig *current.IPConfig, intf *net.Interface) error { - addrs, err := intf.Addrs() + addrs, err := getNetInterfaceAddrsFunc(intf) if err != nil { return err } @@ -416,7 +448,7 @@ func (ic *ifConfigurator) validateContainerPeerInterface(interfaces []*current.I return nil, fmt.Errorf("Host interface name %s doesn't match configured name %s", hostIntf.Name, expectedContainerIfname) } - ep, err := hcsshim.GetHNSEndpointByName(hostIntf.Name) + ep, err := getHnsEndpointByNameFunc(hostIntf.Name) if err != nil { klog.Errorf("Failed to get HNSEndpoint %s: %v", hostIntf.Name, err) return nil, err @@ -448,9 +480,9 @@ func (ic *ifConfigurator) getInterceptedInterfaces( } // getOVSInterfaceType returns "internal". Windows uses internal OVS interface for container vNIC. -func (ic *ifConfigurator) getOVSInterfaceType(ovsPortName string) int { +func getOVSInterfaceType(ovsPortName string) int { ifaceName := fmt.Sprintf("vEthernet (%s)", ovsPortName) - if !util.HostInterfaceExists(ifaceName) { + if !hostInterfaceExistsFunc(ifaceName) { return defaultOVSInterfaceType } return internalOVSInterfaceType @@ -479,7 +511,7 @@ func (ic *ifConfigurator) addPostInterfaceCreateHook(containerID, endpointName s klog.InfoS("Detected HNSEndpoint change, exit current goroutine", "HNSEndpoint", endpointName) return true, nil } - if !util.HostInterfaceExists(ifaceName) { + if !hostInterfaceExistsFunc(ifaceName) { klog.InfoS("Waiting for interface to be created", "interface", ifaceName) return false, nil } @@ -499,3 +531,7 @@ func (ic *ifConfigurator) addPostInterfaceCreateHook(containerID, endpointName s }() return nil } + +func createHnsEndpoint(epRequest *hcsshim.HNSEndpoint) (*hcsshim.HNSEndpoint, error) { + return epRequest.Create() +} diff --git a/pkg/agent/cniserver/interfaces.go b/pkg/agent/cniserver/interfaces.go new file mode 100644 index 00000000000..054587e2846 --- /dev/null +++ b/pkg/agent/cniserver/interfaces.go @@ -0,0 +1,44 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cniserver + +import ( + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" +) + +type postInterfaceCreateHook func() error + +// podInterfaceConfigurator is for testing. +type podInterfaceConfigurator interface { + configureContainerLink(podName string, podNamespace string, containerID string, containerNetNS string, containerIfaceName string, mtu int, brSriovVFDeviceID string, podSriovVFDeviceID string, result *current.Result, containerAccess *containerAccessArbitrator) error + removeContainerLink(containerID, hostInterfaceName string) error + advertiseContainerAddr(containerNetNS string, containerIfaceName string, result *current.Result) error + validateVFRepInterface(sriovVFDeviceID string) (string, error) + validateContainerPeerInterface(interfaces []*current.Interface, containerVeth *vethPair) (*vethPair, error) + getInterceptedInterfaces(sandbox string, containerNetNS string, containerIFDev string) (*current.Interface, *current.Interface, error) + checkContainerInterface(containerNetns, containerID string, containerIface *current.Interface, containerIPs []*current.IPConfig, containerRoutes []*cnitypes.Route, sriovVFDeviceID string) (interface{}, error) + addPostInterfaceCreateHook(containerID, endpointName string, containerAccess *containerAccessArbitrator, hook postInterfaceCreateHook) error +} + +type SriovNet interface { + GetNetDevicesFromPci(pciAddress string) ([]string, error) + GetUplinkRepresentor(pciAddress string) (string, error) + GetVfIndexByPciAddress(vfPciAddress string) (int, error) + GetVfRepresentor(uplink string, vfIndex int) (string, error) + GetPfName(vf string) (string, error) + GetVfid(addr string, pfName string) (int, error) + GetVFLinkNames(pciAddr string) (string, error) +} diff --git a/pkg/agent/cniserver/ipam/ipam_service.go b/pkg/agent/cniserver/ipam/ipam_service.go index 209305545f3..b6ba6d2c8e3 100644 --- a/pkg/agent/cniserver/ipam/ipam_service.go +++ b/pkg/agent/cniserver/ipam/ipam_service.go @@ -168,3 +168,12 @@ func getAntreaIPAMDriver() *AntreaIPAM { } return drivers[0].(*AntreaIPAM) } + +// The following functions are only for testing. +func ResetIPAMDriver(ipamType string, driver IPAMDriver) { + ipamDrivers[ipamType] = []IPAMDriver{driver} +} + +func AddIPAMResult(key string, result *IPAMResult) { + ipamResults.Store(key, result) +} diff --git a/pkg/agent/cniserver/pod_configuration.go b/pkg/agent/cniserver/pod_configuration.go index c50b248608b..0613b16b109 100644 --- a/pkg/agent/cniserver/pod_configuration.go +++ b/pkg/agent/cniserver/pod_configuration.go @@ -58,13 +58,17 @@ const ( internalOVSInterfaceType ) +var ( + getNSPath = util.GetNSPath +) + type podConfigurator struct { ovsBridgeClient ovsconfig.OVSBridgeClient ofClient openflow.Client routeClient route.Interface ifaceStore interfacestore.InterfaceStore gatewayMAC net.HardwareAddr - ifConfigurator *ifConfigurator + ifConfigurator podInterfaceConfigurator // podUpdateNotifier is used for notifying updates of local Pods to other components which may benefit from this // information, i.e. NetworkPolicyController, EgressController. podUpdateNotifier channel.Notifier @@ -157,9 +161,7 @@ func getContainerIPsString(ips []net.IP) string { // external_ids, initializes and returns an InterfaceConfig struct. // nill will be returned, if the OVS port does not have external IDs or it is // not created for a Pod interface. -// If "checkMac" param is set as true the ovsExternalIDMAC of portData should be -// a valid MAC string, otherwise it will print error. -func ParseOVSPortInterfaceConfig(portData *ovsconfig.OVSPortData, portConfig *interfacestore.OVSPortConfig, checkMac bool) *interfacestore.InterfaceConfig { +func ParseOVSPortInterfaceConfig(portData *ovsconfig.OVSPortData, portConfig *interfacestore.OVSPortConfig) *interfacestore.InterfaceConfig { if portData.ExternalIDs == nil { klog.V(2).Infof("OVS port %s has no external_ids", portData.Name) return nil @@ -177,7 +179,7 @@ func ParseOVSPortInterfaceConfig(portData *ovsconfig.OVSPortData, portConfig *in } containerMAC, err := net.ParseMAC(portData.ExternalIDs[ovsExternalIDMAC]) - if err != nil && checkMac { + if err != nil { klog.Errorf("Failed to parse MAC address from OVS external config %s: %v", portData.ExternalIDs[ovsExternalIDMAC], err) } @@ -244,7 +246,6 @@ func (pc *podConfigurator) configureInterfaces( if containerConfig, err = pc.connectInterfaceToOVS(podName, podNameSpace, containerID, hostIface, containerIface, result.IPs, result.VLANID, containerAccess); err != nil { return fmt.Errorf("failed to connect to ovs for container %s: %v", containerID, err) } - success = true defer func() { if !success { _ = pc.disconnectInterfaceFromOVS(containerConfig) @@ -269,7 +270,7 @@ func (pc *podConfigurator) configureInterfaces( func (pc *podConfigurator) createOVSPort(ovsPortName string, ovsAttachInfo map[string]interface{}, vlanID uint16) (string, error) { var portUUID string var err error - switch pc.ifConfigurator.getOVSInterfaceType(ovsPortName) { + switch getOVSInterfaceType(ovsPortName) { case internalOVSInterfaceType: portUUID, err = pc.ovsBridgeClient.CreateInternalPort(ovsPortName, 0, "", ovsAttachInfo) default: @@ -487,12 +488,13 @@ func (pc *podConfigurator) connectInterfaceToOVSCommon(ovsPortName string, conta }() // GetOFPort will wait for up to 1 second for OVSDB to report the OFPort number. - ofPort, err := pc.ovsBridgeClient.GetOFPort(ovsPortName, false) + var ofPort int32 + ofPort, err = pc.ovsBridgeClient.GetOFPort(ovsPortName, false) if err != nil { return fmt.Errorf("failed to get of_port of OVS port %s: %v", ovsPortName, err) } klog.V(2).Infof("Setting up Openflow entries for container %s", containerID) - if err := pc.ofClient.InstallPodFlows(ovsPortName, containerConfig.IPs, containerConfig.MAC, uint32(ofPort), containerConfig.VLANID, nil); err != nil { + if err = pc.ofClient.InstallPodFlows(ovsPortName, containerConfig.IPs, containerConfig.MAC, uint32(ofPort), containerConfig.VLANID, nil); err != nil { return fmt.Errorf("failed to add Openflow entries for container %s: %v", containerID, err) } containerConfig.OVSPortConfig = &interfacestore.OVSPortConfig{PortUUID: portUUID, OFPort: ofPort} @@ -549,7 +551,7 @@ func (pc *podConfigurator) connectInterceptedInterface( containerIPs []*current.IPConfig, containerAccess *containerAccessArbitrator, ) error { - sandbox, err := util.GetNSPath(containerNetNS) + sandbox, err := getNSPath(containerNetNS) if err != nil { return err } diff --git a/pkg/agent/cniserver/pod_configuration_linux_test.go b/pkg/agent/cniserver/pod_configuration_linux_test.go new file mode 100644 index 00000000000..ba8adb4da7b --- /dev/null +++ b/pkg/agent/cniserver/pod_configuration_linux_test.go @@ -0,0 +1,514 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cniserver + +import ( + "fmt" + "net" + "testing" + + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "antrea.io/antrea/pkg/agent/interfacestore" + openflowtest "antrea.io/antrea/pkg/agent/openflow/testing" + routetest "antrea.io/antrea/pkg/agent/route/testing" + "antrea.io/antrea/pkg/agent/util" + "antrea.io/antrea/pkg/ovs/ovsconfig" + ovsconfigtest "antrea.io/antrea/pkg/ovs/ovsconfig/testing" + "antrea.io/antrea/pkg/util/channel" +) + +var ( + containerMAC = "01:02:03:04:05:06" + hostIfaceMAC = "06:05:04:03:02:01" + containerIP = net.ParseIP("10.1.2.100") +) + +type fakeInterfaceConfigurator struct { + configureContainerLinkError error + removeContainerLinkError error + advertiseContainerAddrError error + ovsInterfaceTypeMapping map[string]int + validateVFRepInterfaceError error + validateContainerPeerInterfaceError error + containerVethPair *vethPair + containerMAC string + hostIfaceMAC string + hostIfaceName string + getInterceptedInterfacesError error + checkContainerInterfaceError error + containerVFLink interface{} +} + +func (c *fakeInterfaceConfigurator) configureContainerLink(podName string, podNamespace string, containerID string, containerNetNS string, containerIfaceName string, mtu int, brSriovVFDeviceID string, podSriovVFDeviceID string, result *current.Result, containerAccess *containerAccessArbitrator) error { + if c.configureContainerLinkError != nil { + return c.configureContainerLinkError + } + hostIface := ¤t.Interface{} + containerIface := ¤t.Interface{Name: containerIfaceName, Sandbox: containerNetNS} + if podSriovVFDeviceID != "" { + hostIface.Name = containerIfaceName + } else { + hostIface.Name = c.hostIfaceName + hostIface.Mac = hostIfaceMAC + containerIface.Mac = containerMAC + } + result.Interfaces = []*current.Interface{hostIface, containerIface} + return nil +} + +func (c *fakeInterfaceConfigurator) removeContainerLink(containerID, hostInterfaceName string) error { + if c.removeContainerLinkError != nil { + return c.removeContainerLinkError + } + c.hostIfaceName = "" + return nil +} + +func (c *fakeInterfaceConfigurator) advertiseContainerAddr(containerNetNS string, containerIfaceName string, result *current.Result) error { + return c.advertiseContainerAddrError +} + +func (c *fakeInterfaceConfigurator) validateVFRepInterface(sriovVFDeviceID string) (string, error) { + return c.hostIfaceName, c.validateVFRepInterfaceError +} + +func (c *fakeInterfaceConfigurator) validateContainerPeerInterface(interfaces []*current.Interface, containerVeth *vethPair) (*vethPair, error) { + return containerVeth, c.validateContainerPeerInterfaceError +} + +func (c *fakeInterfaceConfigurator) getInterceptedInterfaces(sandbox string, containerNetNS string, containerIFDev string) (*current.Interface, *current.Interface, error) { + containerIface := ¤t.Interface{ + Name: containerIFDev, + Sandbox: sandbox, + Mac: c.containerMAC, + } + hostIface := ¤t.Interface{ + Name: c.hostIfaceName, + Mac: c.hostIfaceMAC, + } + return containerIface, hostIface, c.getInterceptedInterfacesError +} + +func (c *fakeInterfaceConfigurator) addPostInterfaceCreateHook(containerID, endpointName string, containerAccess *containerAccessArbitrator, hook postInterfaceCreateHook) error { + return nil +} + +func (c *fakeInterfaceConfigurator) checkContainerInterface(containerNetns, containerID string, containerIface *current.Interface, containerIPs []*current.IPConfig, containerRoutes []*cnitypes.Route, sriovVFDeviceID string) (interface{}, error) { + if c.checkContainerInterfaceError != nil { + return nil, c.checkContainerInterfaceError + } + if sriovVFDeviceID != "" { + return c.containerVFLink, nil + } + return c.containerVethPair, nil +} + +func newTestInterfaceConfigurator() *fakeInterfaceConfigurator { + return &fakeInterfaceConfigurator{ + containerMAC: "01:02:03:04:05:06", + hostIfaceMAC: hostIfaceMAC, + } +} + +func TestConnectInterceptedInterface(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + defer func() { + getNSPath = util.GetNSPath + restoreSecondaryIPAM() + }() + testPodName := "test-pod" + podNamespace := testPodNamespace + hostInterfaceName := util.GenerateContainerInterfaceName(testPodName, testPodNamespace, testPodInfraContainerID) + containerID := generateUUID(t) + containerNetNS := "container-ns" + containerDev := "eth0" + + createIfaceCreator := func(getInterceptedInterfacesError error) *fakeInterfaceConfigurator { + testIfaceConfigurator := newTestInterfaceConfigurator() + testIfaceConfigurator.hostIfaceName = hostInterfaceName + if getInterceptedInterfacesError != nil { + testIfaceConfigurator.getInterceptedInterfacesError = getInterceptedInterfacesError + } + return testIfaceConfigurator + } + for _, tc := range []struct { + name string + getInterfaceErr error + getNSPathErr error + migratedRoute bool + migrateRouteErr error + connectedOVS bool + createOVSPortErr error + getOFPortErr error + installPodFlowErr error + expectedErr bool + }{ + { + name: "error-get-net-ns", + getNSPathErr: fmt.Errorf("failed to open netns"), + expectedErr: true, + }, + { + name: "error-get-intercepted-interfaces", + getInterfaceErr: fmt.Errorf("unable to get intercepted interfaces"), + expectedErr: true, + }, + { + name: "error-migrate-route", + migratedRoute: true, + migrateRouteErr: fmt.Errorf("unable to get host interface"), + expectedErr: true, + }, + { + name: "error-ovs-create-port", + migratedRoute: true, + connectedOVS: true, + createOVSPortErr: ovsconfig.NewTransactionError(fmt.Errorf("unable to create OVS port"), true), + expectedErr: true, + }, + { + name: "error-ovs-get-ofport", + migratedRoute: true, + connectedOVS: true, + getOFPortErr: ovsconfig.NewTransactionError(fmt.Errorf("timeout to get OpenFlow port"), true), + expectedErr: true, + }, + { + name: "error-ovs-install-flows", + migratedRoute: true, + connectedOVS: true, + installPodFlowErr: fmt.Errorf("failed to install Pod OpenFlow"), + expectedErr: true, + }, + { + name: "success", + migratedRoute: true, + connectedOVS: true, + expectedErr: false, + }, + } { + t.Run(tc.name, func(t *testing.T) { + podConfigurator := createPodConfigurator(controller, createIfaceCreator(tc.getInterfaceErr)) + getNSPath = func(netnsName string) (string, error) { + return netnsName, tc.getNSPathErr + } + if tc.migratedRoute { + routeMock.EXPECT().MigrateRoutesToGw(hostInterfaceName).Return(tc.migrateRouteErr) + } + ovsPortID := generateUUID(t) + if tc.connectedOVS { + mockOVSBridgeClient.EXPECT().CreatePort(hostInterfaceName, gomock.Any(), gomock.Any()).Return(ovsPortID, tc.createOVSPortErr).Times(1) + if tc.createOVSPortErr == nil { + mockOVSBridgeClient.EXPECT().GetOFPort(hostInterfaceName, false).Return(int32(100), tc.getOFPortErr).Times(1) + if tc.getOFPortErr == nil { + mockOFClient.EXPECT().InstallPodFlows(hostInterfaceName, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(tc.installPodFlowErr).Times(1) + } + if tc.getOFPortErr != nil || tc.installPodFlowErr != nil { + mockOVSBridgeClient.EXPECT().DeletePort(ovsPortID).Times(1) + } + } + } + err := podConfigurator.connectInterceptedInterface(podName, podNamespace, containerID, containerNetNS, containerDev, ipamResult.IPs, nil) + if tc.expectedErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + containerConfig, exists := ifaceStore.GetContainerInterface(containerID) + assert.True(t, exists) + assert.Equal(t, containerID, containerConfig.ContainerID) + assert.Equal(t, ovsPortID, containerConfig.OVSPortConfig.PortUUID) + } + }) + } +} + +func TestCreateOVSPort(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + containerID := generateUUID(t) + podName := "p0" + podNameSpace := testPodNamespace + + for _, tc := range []struct { + name string + portName string + portType int + vlanID uint16 + createPortCount int + createAccessPortCount int + }{ + { + name: "create-general-port", + portName: "p1", + portType: defaultOVSInterfaceType, + vlanID: 0, + createPortCount: 1, + }, { + name: "create-access-port", + portName: "p3", + portType: defaultOVSInterfaceType, + vlanID: 10, + createAccessPortCount: 1, + }, + } { + t.Run(tc.name, func(t *testing.T) { + testIfaceConfigurator := &fakeInterfaceConfigurator{ovsInterfaceTypeMapping: map[string]int{tc.portName: tc.portType}} + podConfigurator := createPodConfigurator(controller, testIfaceConfigurator) + containerConfig := buildContainerConfig(tc.portName, containerID, podName, podNameSpace, ¤t.Interface{Mac: "01:02:03:04:05:06"}, ipamResult.IPs, tc.vlanID) + attachInfo := BuildOVSPortExternalIDs(containerConfig) + if tc.createPortCount > 0 { + mockOVSBridgeClient.EXPECT().CreatePort(tc.portName, tc.portName, attachInfo).Times(tc.createPortCount).Return(generateUUID(t), nil) + } + if tc.createAccessPortCount > 0 { + mockOVSBridgeClient.EXPECT().CreateAccessPort(tc.portName, tc.portName, attachInfo, tc.vlanID).Times(tc.createAccessPortCount).Return(generateUUID(t), nil) + } + _, err := podConfigurator.createOVSPort(tc.portName, attachInfo, tc.vlanID) + assert.NoError(t, err) + }) + } +} + +func TestParseOVSPortInterfaceConfig(t *testing.T) { + containerID := generateUUID(t) + portUUID := generateUUID(t) + ofPort := int32(1) + containerIPs := "1.1.1.2,aabb:1122::101:102" + parsedIPs := []net.IP{net.ParseIP("1.1.1.2"), net.ParseIP("aabb:1122::101:102")} + containerMACStr := "11:22:33:44:55:66" + containerMAC, _ := net.ParseMAC(containerMACStr) + podName := "pod0" + portName := "p0" + for _, tc := range []struct { + name string + portData *ovsconfig.OVSPortData + portConfig *interfacestore.OVSPortConfig + ifaceConfig *interfacestore.InterfaceConfig + }{ + { + name: "no-externalIDs", + portData: &ovsconfig.OVSPortData{ + Name: portName, + }, + portConfig: &interfacestore.OVSPortConfig{ + PortUUID: portUUID, + OFPort: ofPort, + }, + }, + { + name: "no-containerID", + portData: &ovsconfig.OVSPortData{ + Name: portName, + ExternalIDs: map[string]string{ + ovsExternalIDIP: containerIPs, + ovsExternalIDMAC: containerMACStr, + ovsExternalIDPodName: podName, + ovsExternalIDPodNamespace: testPodNamespace, + }, + }, + portConfig: &interfacestore.OVSPortConfig{ + PortUUID: portUUID, + OFPort: ofPort, + }, + }, + { + name: "invalid-MAC", + portData: &ovsconfig.OVSPortData{ + Name: portName, + ExternalIDs: map[string]string{ + ovsExternalIDContainerID: containerID, + ovsExternalIDIP: containerIPs, + ovsExternalIDMAC: "1:2:3:4:5:6", + ovsExternalIDPodName: podName, + ovsExternalIDPodNamespace: testPodNamespace, + }, + }, + portConfig: &interfacestore.OVSPortConfig{ + PortUUID: portUUID, + OFPort: ofPort, + }, + ifaceConfig: &interfacestore.InterfaceConfig{ + Type: interfacestore.ContainerInterface, + InterfaceName: portName, + IPs: parsedIPs, + OVSPortConfig: &interfacestore.OVSPortConfig{ + PortUUID: portUUID, + OFPort: ofPort, + }, + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{ + ContainerID: containerID, + PodName: podName, + PodNamespace: testPodNamespace, + }, + }, + }, + { + name: "valid-configuration", + portData: &ovsconfig.OVSPortData{ + Name: portName, + ExternalIDs: map[string]string{ + ovsExternalIDContainerID: containerID, + ovsExternalIDIP: containerIPs, + ovsExternalIDMAC: containerMACStr, + ovsExternalIDPodName: podName, + ovsExternalIDPodNamespace: testPodNamespace, + }, + }, + portConfig: &interfacestore.OVSPortConfig{ + PortUUID: portUUID, + OFPort: ofPort, + }, + ifaceConfig: &interfacestore.InterfaceConfig{ + Type: interfacestore.ContainerInterface, + InterfaceName: portName, + IPs: parsedIPs, + MAC: containerMAC, + OVSPortConfig: &interfacestore.OVSPortConfig{ + PortUUID: portUUID, + OFPort: ofPort, + }, + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{ + ContainerID: containerID, + PodName: podName, + PodNamespace: testPodNamespace, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + iface := ParseOVSPortInterfaceConfig(tc.portData, tc.portConfig) + assert.Equal(t, tc.ifaceConfig, iface) + }) + } +} + +func TestCheckHostInterface(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + hostIfaceName := "port1" + containerID := generateUUID(t) + containerIntf := ¤t.Interface{Name: ifname, Sandbox: netns, Mac: "01:02:03:04:05:06"} + interfaces := []*current.Interface{containerIntf, {Name: hostIfaceName}} + containeIPs := ipamResult.IPs + ifaceMAC, _ := net.ParseMAC("01:02:03:04:05:06") + containerInterface := interfacestore.NewContainerInterface(hostIfaceName, containerID, "pod1", testPodNamespace, ifaceMAC, []net.IP{containerIP}, 1) + containerInterface.OVSPortConfig = &interfacestore.OVSPortConfig{ + PortUUID: generateUUID(t), + OFPort: int32(10), + } + + for _, tc := range []struct { + name string + vfRepIfaceError error + containerPeerErr error + containerIfKind interface{} + sriovVFDeviceID string + expectedErr error + }{ + { + name: "vf-validation-failure", + vfRepIfaceError: fmt.Errorf("fail to validate VF representor"), + sriovVFDeviceID: "vf1", + expectedErr: fmt.Errorf("fail to validate VF representor"), + }, { + name: "vf-validation-success", + sriovVFDeviceID: "vf1", + }, { + name: "vethpair-validation-failure", + containerPeerErr: fmt.Errorf("fail to validate container peer"), + containerIfKind: &vethPair{name: hostIfaceName, ifIndex: 10, peerIndex: 20}, + expectedErr: fmt.Errorf("fail to validate container peer"), + }, { + name: "vethpair-validation-success", + containerIfKind: &vethPair{name: hostIfaceName, ifIndex: 10, peerIndex: 20}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + fakeIfaceConfigrator := newTestInterfaceConfigurator() + fakeIfaceConfigrator.hostIfaceName = hostIfaceName + fakeIfaceConfigrator.validateVFRepInterfaceError = tc.vfRepIfaceError + fakeIfaceConfigrator.validateContainerPeerInterfaceError = tc.containerPeerErr + configurator := createPodConfigurator(controller, fakeIfaceConfigrator) + configurator.ifaceStore.AddInterface(containerInterface) + err := configurator.checkHostInterface(containerID, containerIntf, tc.containerIfKind, containeIPs, interfaces, tc.sriovVFDeviceID) + if tc.expectedErr != nil { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestConfigureSriovSecondaryInterface(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + containerID := generateUUID(t) + containerNS := "containerNS" + + for _, tc := range []struct { + name string + podSriovVFDeviceID string + configureLinkErr error + advertiseErr error + expectedErr error + }{ + { + name: "sriov-vf-not-set", + expectedErr: fmt.Errorf("error getting the Pod SR-IOV VF device ID"), + }, { + name: "configure-link-failure", + podSriovVFDeviceID: "vf1", + configureLinkErr: fmt.Errorf("unable to create sriov VF link"), + expectedErr: fmt.Errorf("unable to create sriov VF link"), + }, { + name: "advertise-failure", + podSriovVFDeviceID: "vf2", + advertiseErr: fmt.Errorf("unable to advertise on the sriov link"), + expectedErr: fmt.Errorf("failed to advertise IP address for container %s: unable to advertise on the sriov link", containerID), + }, { + name: "success", + podSriovVFDeviceID: "vf3", + }, + } { + t.Run(tc.name, func(t *testing.T) { + ifaceConfigurator := newTestInterfaceConfigurator() + ifaceConfigurator.configureContainerLinkError = tc.configureLinkErr + ifaceConfigurator.advertiseContainerAddrError = tc.advertiseErr + podConfigurator := createPodConfigurator(controller, ifaceConfigurator) + err := podConfigurator.ConfigureSriovSecondaryInterface(podName, testPodNamespace, containerID, containerNS, containerIfaceName, mtu, tc.podSriovVFDeviceID, ¤t.Result{}) + assert.Equal(t, tc.expectedErr, err) + }) + } +} + +func createPodConfigurator(controller *gomock.Controller, testIfaceConfigurator *fakeInterfaceConfigurator) *podConfigurator { + gwMAC, _ := net.ParseMAC("00:00:11:11:11:11") + mockOVSBridgeClient = ovsconfigtest.NewMockOVSBridgeClient(controller) + mockOFClient = openflowtest.NewMockClient(controller) + ifaceStore = interfacestore.NewInterfaceStore() + routeMock = routetest.NewMockInterface(controller) + configurator, _ := newPodConfigurator(mockOVSBridgeClient, mockOFClient, routeMock, ifaceStore, gwMAC, "system", false, channel.NewSubscribableChannel("PodUpdate", 100), nil, false) + configurator.ifConfigurator = testIfaceConfigurator + return configurator +} diff --git a/pkg/agent/cniserver/pod_configuration_windows.go b/pkg/agent/cniserver/pod_configuration_windows.go index 230a27d71e3..c1d566a7e57 100644 --- a/pkg/agent/cniserver/pod_configuration_windows.go +++ b/pkg/agent/cniserver/pod_configuration_windows.go @@ -86,7 +86,7 @@ func (pc *podConfigurator) connectInterfaceToOVS( // If one day Containerd runtime changes the behavior and container interface can be created when attaching // HNSEndpoint/HostComputeEndpoint, the current implementation will still work. It will choose the synchronized // way to create OVS port. - if util.HostInterfaceExists(hostIfAlias) { + if hostInterfaceExistsFunc(hostIfAlias) { return containerConfig, pc.connectInterfaceToOVSCommon(ovsPortName, containerConfig) } klog.V(2).Infof("Adding OVS port %s for container %s", ovsPortName, containerID) diff --git a/pkg/agent/cniserver/server.go b/pkg/agent/cniserver/server.go index 8eb43d72af8..d28692f1840 100644 --- a/pkg/agent/cniserver/server.go +++ b/pkg/agent/cniserver/server.go @@ -389,9 +389,16 @@ func (s *CNIServer) GetPodConfigurator() *podConfigurator { return s.podConfigurator } +// Declared variables for testing +var ( + ipamSecondaryNetworkAdd = ipam.SecondaryNetworkAdd + ipamSecondaryNetworkDel = ipam.SecondaryNetworkDel + ipamSecondaryNetworkCheck = ipam.SecondaryNetworkCheck +) + // Antrea IPAM for secondary network. func (s *CNIServer) ipamAdd(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) { - ipamResult, err := ipam.SecondaryNetworkAdd(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.NetworkConfig) + ipamResult, err := ipamSecondaryNetworkAdd(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.NetworkConfig) if err != nil { return s.ipamFailureResponse(err), nil } @@ -400,14 +407,14 @@ func (s *CNIServer) ipamAdd(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) } func (s *CNIServer) ipamDel(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) { - if err := ipam.SecondaryNetworkDel(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.NetworkConfig); err != nil { + if err := ipamSecondaryNetworkDel(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.NetworkConfig); err != nil { return s.ipamFailureResponse(err), nil } return &cnipb.CniCmdResponse{CniResult: []byte("")}, nil } func (s *CNIServer) ipamCheck(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) { - if err := ipam.SecondaryNetworkCheck(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.NetworkConfig); err != nil { + if err := ipamSecondaryNetworkCheck(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.NetworkConfig); err != nil { return s.ipamFailureResponse(err), nil } // CNI CHECK is not implemented for secondary network IPAM, and so the func will always @@ -473,7 +480,7 @@ func (s *CNIServer) CmdAdd(ctx context.Context, request *cnipb.CniCmdRequest) (* var ipamResult *ipam.IPAMResult var err error // Only allocate IP when handling CNI request from infra container. - // On windows platform, CNI plugin is called for all containers in a Pod. + // On Windows platform, CNI plugin is called for all containers in a Pod. if !isInfraContainer { if ipamResult, _ = ipam.GetIPFromCache(infraContainer); ipamResult == nil { return nil, fmt.Errorf("allocated IP address not found") diff --git a/pkg/agent/cniserver/server_linux_test.go b/pkg/agent/cniserver/server_linux_test.go new file mode 100644 index 00000000000..50018155f4c --- /dev/null +++ b/pkg/agent/cniserver/server_linux_test.go @@ -0,0 +1,743 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cniserver + +import ( + "context" + "errors" + "fmt" + "net" + "testing" + + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/golang/mock/gomock" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + fakeclientset "k8s.io/client-go/kubernetes/fake" + + "antrea.io/antrea/pkg/agent/cniserver/ipam" + ipamtest "antrea.io/antrea/pkg/agent/cniserver/ipam/testing" + cniservertest "antrea.io/antrea/pkg/agent/cniserver/testing" + types "antrea.io/antrea/pkg/agent/cniserver/types" + "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/interfacestore" + openflowtest "antrea.io/antrea/pkg/agent/openflow/testing" + routetest "antrea.io/antrea/pkg/agent/route/testing" + "antrea.io/antrea/pkg/agent/secondarynetwork/cnipodcache" + "antrea.io/antrea/pkg/agent/util" + cnipb "antrea.io/antrea/pkg/apis/cni/v1beta1" + "antrea.io/antrea/pkg/ovs/ovsconfig" + ovsconfigtest "antrea.io/antrea/pkg/ovs/ovsconfig/testing" + "antrea.io/antrea/pkg/util/channel" +) + +func TestValidatePrevResult(t *testing.T) { + cniServer := newCNIServer(t) + cniVersion := "0.4.0" + networkCfg := generateNetworkConfiguration("", cniVersion, "", testIpamType) + k8sPodArgs := &types.K8sArgs{} + cnitypes.LoadArgs(args, k8sPodArgs) + networkCfg.PrevResult = nil + ipamResult := ipamtest.GenerateIPAMResult(cniVersion, ips, routes, dns) + networkCfg.RawPrevResult, _ = translateRawPrevResult(ipamResult, cniVersion) + + prevResult, _ := cniServer.parsePrevResultFromRequest(networkCfg) + containerIface := ¤t.Interface{Name: ifname, Sandbox: netns} + containerID := uuid.New().String() + hostIfaceName := util.GenerateContainerInterfaceName(testPodNameA, testPodNamespace, containerID) + hostIface := ¤t.Interface{Name: hostIfaceName} + prevResult.Interfaces = []*current.Interface{hostIface, containerIface} + + baseCNIConfig := func() *CNIConfig { + cniConfig := &CNIConfig{NetworkConfig: networkCfg, CniCmdArgs: &cnipb.CniCmdArgs{Args: args}} + cniConfig.ContainerId = containerID + return cniConfig + } + + t.Run("Invalid container interface veth", func(t *testing.T) { + cniConfig := baseCNIConfig() + cniConfig.Ifname = "invalid_iface" // invalid + sriovVFDeviceID := "" + response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) + checkErrorResponse( + t, response, cnipb.ErrorCode_INVALID_NETWORK_CONFIG, + "prevResult does not match network configuration", + ) + }) + + t.Run("Invalid container interface SR-IOV VF", func(t *testing.T) { + cniConfig := baseCNIConfig() + cniConfig.Ifname = "invalid_iface" // invalid + sriovVFDeviceID := "0000:03:00.6" + response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) + checkErrorResponse( + t, response, cnipb.ErrorCode_INVALID_NETWORK_CONFIG, + "prevResult does not match network configuration", + ) + }) + + t.Run("Interface check failure veth", func(t *testing.T) { + cniConfig := baseCNIConfig() + cniConfig.Ifname = ifname + cniConfig.Netns = "invalid_netns" + sriovVFDeviceID := "" + cniServer.podConfigurator, _ = newPodConfigurator(nil, nil, nil, nil, nil, "", false, channel.NewSubscribableChannel("PodUpdate", 100), nil, false) + response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) + checkErrorResponse(t, response, cnipb.ErrorCode_CHECK_INTERFACE_FAILURE, "") + }) + + t.Run("Interface check failure SR-IOV VF", func(t *testing.T) { + cniConfig := baseCNIConfig() + cniConfig.Ifname = ifname + cniConfig.Netns = "invalid_netns" + sriovVFDeviceID := "0000:03:00.6" + prevResult.Interfaces = []*current.Interface{hostIface, containerIface} + cniServer.podConfigurator, _ = newPodConfigurator(nil, nil, nil, nil, nil, "", true, channel.NewSubscribableChannel("PodUpdate", 100), nil, false) + response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) + checkErrorResponse(t, response, cnipb.ErrorCode_CHECK_INTERFACE_FAILURE, "") + }) +} + +func TestRemoveInterface(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + mockOVSBridgeClient = ovsconfigtest.NewMockOVSBridgeClient(controller) + mockOFClient = openflowtest.NewMockClient(controller) + ifaceStore = interfacestore.NewInterfaceStore() + routeMock = routetest.NewMockInterface(controller) + gwMAC, _ := net.ParseMAC("00:00:11:11:11:11") + podConfigurator, err := newPodConfigurator(mockOVSBridgeClient, mockOFClient, routeMock, ifaceStore, gwMAC, "system", false, channel.NewSubscribableChannel("PodUpdate", 100), nil, false) + require.Nil(t, err, "No error expected in podConfigurator constructor") + + containerMAC, _ := net.ParseMAC("aa:bb:cc:dd:ee:ff") + containerIP := net.ParseIP("1.1.1.1") + + var cniConfig *CNIConfig + var containerID string + var podName string + var hostIfaceName string + var fakePortUUID string + var containerConfig *interfacestore.InterfaceConfig + + setup := func(name string) { + containerID = uuid.New().String() + podName = name + hostIfaceName = util.GenerateContainerInterfaceName(podName, testPodNamespace, containerID) + fakePortUUID = uuid.New().String() + + netcfg := generateNetworkConfiguration("", supportedCNIVersion, "", testIpamType) + cniConfig = &CNIConfig{NetworkConfig: netcfg, CniCmdArgs: &cnipb.CniCmdArgs{}} + cniConfig.Ifname = "eth0" + cniConfig.ContainerId = containerID + cniConfig.Netns = "" + + containerConfig = interfacestore.NewContainerInterface( + hostIfaceName, + containerID, + podName, + testPodNamespace, + containerMAC, + []net.IP{containerIP}, + 0) + containerConfig.OVSPortConfig = &interfacestore.OVSPortConfig{PortUUID: fakePortUUID, OFPort: 0} + } + + t.Run("Successful removal", func(t *testing.T) { + setup("test1") + ifaceStore.AddInterface(containerConfig) + + mockOFClient.EXPECT().UninstallPodFlows(hostIfaceName).Return(nil) + mockOVSBridgeClient.EXPECT().DeletePort(fakePortUUID).Return(nil) + routeMock.EXPECT().DeleteLocalAntreaFlexibleIPAMPodRule([]net.IP{containerIP}).Return(nil).Times(1) + + err := podConfigurator.removeInterfaces(containerID) + require.Nil(t, err, "Failed to remove interface") + _, found := ifaceStore.GetContainerInterface(containerID) + assert.False(t, found, "Interface should not be in the local cache anymore") + }) + + t.Run("Error in OVS port delete", func(t *testing.T) { + setup("test2") + ifaceStore.AddInterface(containerConfig) + + mockOVSBridgeClient.EXPECT().DeletePort(fakePortUUID).Return(ovsconfig.NewTransactionError(fmt.Errorf("error while deleting OVS port"), true)) + mockOFClient.EXPECT().UninstallPodFlows(hostIfaceName).Return(nil) + + err := podConfigurator.removeInterfaces(containerID) + require.NotNil(t, err, "Expected interface remove to fail") + _, found := ifaceStore.GetContainerInterface(containerID) + assert.True(t, found, "Interface should still be in local cache because of port deletion failure") + }) + + t.Run("Error in Pod flows delete", func(t *testing.T) { + setup("test3") + ifaceStore.AddInterface(containerConfig) + + mockOFClient.EXPECT().UninstallPodFlows(hostIfaceName).Return(fmt.Errorf("failed to delete openflow entry")) + + err := podConfigurator.removeInterfaces(containerID) + require.NotNil(t, err, "Expected interface remove to fail") + _, found := ifaceStore.GetContainerInterface(containerID) + assert.True(t, found, "Interface should still be in local cache because of flow deletion failure") + }) +} + +func mockCNIServer(t *testing.T, controller *gomock.Controller, ipamDriver ipam.IPAMDriver, ipamType string, enableSecondaryNetworkIPAM, isChaining, secondaryNetworkEnabled bool) *CNIServer { + mockOVSBridgeClient = ovsconfigtest.NewMockOVSBridgeClient(controller) + mockOFClient = openflowtest.NewMockClient(controller) + ifaceStore = interfacestore.NewInterfaceStore() + routeMock = routetest.NewMockInterface(controller) + ipam.ResetIPAMDriver(ipamType, ipamDriver) + cniServer := newCNIServer(t) + cniServer.routeClient = routeMock + _, nodePodCIDRv4, _ := net.ParseCIDR("192.168.1.0/24") + gwMAC, _ := net.ParseMAC("00:00:11:11:11:11") + gateway := &config.GatewayConfig{Name: "", IPv4: gwIPv4, MAC: gwMAC} + cniServer.nodeConfig = &config.NodeConfig{Name: "node1", PodIPv4CIDR: nodePodCIDRv4, GatewayConfig: gateway} + cniServer.podConfigurator, _ = newPodConfigurator(mockOVSBridgeClient, mockOFClient, routeMock, ifaceStore, gwMAC, "system", false, channel.NewSubscribableChannel("PodUpdate", 100), nil, false) + cniServer.enableSecondaryNetworkIPAM = enableSecondaryNetworkIPAM + cniServer.isChaining = isChaining + cniServer.secondaryNetworkEnabled = secondaryNetworkEnabled + if secondaryNetworkEnabled { + cniServer.podConfigurator.podInfoStore = cnipodcache.NewCNIPodInfoStore() + } + return cniServer +} + +func prepareSetup(t *testing.T, name string, cniType string, result *current.Result, ipamType string, withPreviousResult bool) (*cnipb.CniCmdRequest, string) { + networkCfg := generateNetworkConfiguration("", supportedCNIVersion, cniType, ipamType) + networkCfg.DNS = cnitypes.DNS{ + Nameservers: dns, + } + if withPreviousResult { + networkCfg.RawPrevResult, _ = translateRawPrevResult(result, supportedCNIVersion) + } + podArgs := cniservertest.GenerateCNIArgs(name, testPodNamespace, testPodInfraContainerID) + requestMsg, _ := newRequest(podArgs, networkCfg, "", t) + return requestMsg, util.GenerateContainerInterfaceName(name, testPodNamespace, testPodInfraContainerID) +} + +func TestCmdAdd(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + ipamMock := ipamtest.NewMockIPAMDriver(controller) + ctx := context.TODO() + + getNSPath = func(netnsName string) (string, error) { + return netnsName, nil + } + defer func() { + getNSPath = util.GetNSPath + restoreSecondaryIPAM() + }() + + for _, tc := range []struct { + name string + podName string + ipamType string + ipamAdd bool + ipamError error + cniType string + enableSecondaryNetworkIPAM bool + isChaining bool + secondaryNetworkEnabled bool + connectOVS bool + migrateRoute bool + addLocalIPAMRoute bool + addLocalIPAMRouteError error + containerIfaceExist bool + response *cnipb.CniCmdResponse + }{ + { + name: "secondary-IPAM", + podName: "pod0", + ipamType: ipam.AntreaIPAMType, + cniType: "cniType", + enableSecondaryNetworkIPAM: true, + isChaining: false, + ipamAdd: true, + response: resultToResponse(ipamResult), + }, { + name: "secondary-IPAM-failure", + podName: "pod1", + ipamType: ipam.AntreaIPAMType, + cniType: "cniType", + enableSecondaryNetworkIPAM: true, + isChaining: false, + ipamAdd: true, + ipamError: fmt.Errorf("failed to parse static addresses in the IPAM config"), + response: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_IPAM_FAILURE, + Message: "failed to parse static addresses in the IPAM config", + }, + }, + }, { + name: "chaining", + podName: "pod2", + ipamType: "test-cni-ipam", + enableSecondaryNetworkIPAM: false, + isChaining: true, + connectOVS: true, + migrateRoute: true, + containerIfaceExist: true, + }, { + name: "add-general-cni", + podName: "pod3", + ipamType: "test-cni-ipam", + ipamAdd: true, + enableSecondaryNetworkIPAM: false, + isChaining: false, + connectOVS: true, + addLocalIPAMRoute: true, + containerIfaceExist: true, + }, { + name: "add-general-cni-failure", + podName: "pod3", + ipamType: "test-cni-ipam", + ipamAdd: true, + enableSecondaryNetworkIPAM: false, + isChaining: false, + connectOVS: true, + addLocalIPAMRoute: true, + addLocalIPAMRouteError: fmt.Errorf("failed to configure route"), + containerIfaceExist: false, + }, { + name: "add-secondary-network", + podName: "pod4", + ipamType: "test-cni-ipam", + ipamAdd: true, + enableSecondaryNetworkIPAM: false, + secondaryNetworkEnabled: true, + isChaining: false, + connectOVS: true, + addLocalIPAMRoute: true, + containerIfaceExist: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + cniserver := mockCNIServer(t, controller, ipamMock, tc.ipamType, tc.enableSecondaryNetworkIPAM, tc.isChaining, tc.secondaryNetworkEnabled) + testIfaceConfigurator := newTestInterfaceConfigurator() + requestMsg, hostInterfaceName := prepareSetup(t, tc.podName, tc.cniType, ipamResult, tc.ipamType, true) + testIfaceConfigurator.hostIfaceName = hostInterfaceName + cniserver.podConfigurator.ifConfigurator = testIfaceConfigurator + if tc.ipamAdd { + if tc.enableSecondaryNetworkIPAM { + mockIPAMResult := ipamResult + if tc.ipamError != nil { + mockIPAMResult = nil + } + ipamSecondaryNetworkAdd = func(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) (*current.Result, error) { + return mockIPAMResult, tc.ipamError + } + } else { + ipamMock.EXPECT().Add(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, &ipam.IPAMResult{Result: *ipamResult}, tc.ipamError).Times(1) + } + } + if tc.migrateRoute { + routeMock.EXPECT().MigrateRoutesToGw(hostInterfaceName).Return(nil).Times(1) + } + if tc.addLocalIPAMRoute { + routeMock.EXPECT().AddLocalAntreaFlexibleIPAMPodRule(gomock.Any()).Return(tc.addLocalIPAMRouteError).Times(1) + } + ovsPortID := generateUUID(t) + if tc.connectOVS { + mockOVSBridgeClient.EXPECT().CreatePort(hostInterfaceName, gomock.Any(), gomock.Any()).Return(ovsPortID, nil).Times(1) + mockOVSBridgeClient.EXPECT().GetOFPort(hostInterfaceName, false).Return(int32(100), nil).Times(1) + mockOFClient.EXPECT().InstallPodFlows(hostInterfaceName, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + } + if tc.addLocalIPAMRouteError != nil { + mockOFClient.EXPECT().UninstallPodFlows(hostInterfaceName).Return(nil).Times(1) + mockOVSBridgeClient.EXPECT().DeletePort(ovsPortID).Return(nil).Times(1) + ipamMock.EXPECT().Del(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + } + resp, err := cniserver.CmdAdd(ctx, requestMsg) + assert.NoError(t, err) + assert.NotNil(t, resp) + containerID := requestMsg.CniArgs.ContainerId + _, exists := ifaceStore.GetContainerInterface(containerID) + assert.Equal(t, exists, tc.containerIfaceExist) + if tc.response != nil { + assert.Equal(t, tc.response, resp) + } else if tc.addLocalIPAMRouteError != nil { + assert.Equal(t, cnipb.ErrorCode_CONFIG_INTERFACE_FAILURE, resp.Error.Code) + assert.Equal(t, "", testIfaceConfigurator.hostIfaceName) + } else if !tc.isChaining { + // The response with chaining mode uses the previous result provided in the CmdAdd request, so + // it is not checked in the test. + cniResult := *ipamResult + cniResult.Interfaces = []*current.Interface{ + {Name: hostInterfaceName, Mac: hostIfaceMAC, Sandbox: ""}, + {Name: "eth0", Mac: containerMAC, Sandbox: cniserver.hostNetNsPath(requestMsg.CniArgs.Netns)}, + } + successResponse := resultToResponse(&cniResult) + assert.Equal(t, successResponse, resp) + } + if tc.secondaryNetworkEnabled { + cniConfigInfo := cniserver.podConfigurator.podInfoStore.GetCNIConfigInfoByContainerID(tc.podName, testPodNamespace, containerID) + assert.NotNil(t, cniConfigInfo) + } + }) + } +} + +func TestCmdDel(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + defer restoreSecondaryIPAM() + + ipamMock := ipamtest.NewMockIPAMDriver(controller) + ovsPortID := generateUUID(t) + ovsPort := int32(100) + ctx := context.TODO() + + for _, tc := range []struct { + name string + podName string + ipamType string + ipamDel bool + ipamError error + cniType string + enableSecondaryNetworkIPAM bool + isChaining bool + secondaryNetworkEnabled bool + disconnectOVS bool + migrateRoute bool + delLocalIPAMRoute bool + delLocalIPAMRouteError error + response *cnipb.CniCmdResponse + }{ + { + name: "secondary-IPAM", + podName: "pod1", + ipamType: ipam.AntreaIPAMType, + cniType: "cniType", + ipamDel: true, + enableSecondaryNetworkIPAM: true, + isChaining: false, + response: emptyResponse, + }, + { + name: "secondary-IPAM-failure", + podName: "pod1", + ipamType: ipam.AntreaIPAMType, + cniType: "cniType", + ipamDel: true, + ipamError: fmt.Errorf("failed to delete secondary IPAM response"), + enableSecondaryNetworkIPAM: true, + isChaining: false, + response: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_IPAM_FAILURE, + Message: "failed to delete secondary IPAM response", + }, + }, + }, + { + name: "chaining", + podName: "pod2", + ipamType: "test-delete", + enableSecondaryNetworkIPAM: false, + isChaining: true, + migrateRoute: true, + disconnectOVS: true, + }, + { + name: "del-general-cni", + podName: "pod3", + ipamType: "test-delete", + ipamDel: true, + enableSecondaryNetworkIPAM: false, + isChaining: false, + disconnectOVS: true, + delLocalIPAMRoute: true, + }, + { + name: "del-general-cni-failure", + podName: "pod3", + ipamType: "test-delete", + ipamDel: true, + enableSecondaryNetworkIPAM: false, + isChaining: false, + disconnectOVS: true, + delLocalIPAMRoute: true, + delLocalIPAMRouteError: fmt.Errorf("unable to delete flexible IPAM rule"), + }, + { + name: "del-secondary-network", + podName: "pod4", + ipamType: "test-delete", + ipamDel: true, + enableSecondaryNetworkIPAM: false, + secondaryNetworkEnabled: true, + isChaining: false, + disconnectOVS: true, + delLocalIPAMRoute: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + cniserver := mockCNIServer(t, controller, ipamMock, tc.ipamType, tc.enableSecondaryNetworkIPAM, tc.isChaining, tc.secondaryNetworkEnabled) + requestMsg, hostInterfaceName := prepareSetup(t, tc.podName, tc.cniType, ipamResult, tc.ipamType, true) + containerID := requestMsg.CniArgs.ContainerId + containerIfaceConfig := interfacestore.NewContainerInterface(hostInterfaceName, containerID, tc.podName, testPodNamespace, containerVethMac, []net.IP{net.ParseIP("10.1.2.100")}, 0) + containerIfaceConfig.OVSPortConfig = &interfacestore.OVSPortConfig{PortUUID: ovsPortID, OFPort: ovsPort} + ifaceStore.AddInterface(containerIfaceConfig) + testIfaceConfigurator := newTestInterfaceConfigurator() + testIfaceConfigurator.hostIfaceName = hostInterfaceName + cniserver.podConfigurator.ifConfigurator = testIfaceConfigurator + if tc.secondaryNetworkEnabled { + cniInfo := &cnipodcache.CNIConfigInfo{CNIVersion: supportedCNIVersion, PodName: tc.podName, PodNameSpace: testPodNamespace, + ContainerID: containerID, ContainerNetNS: netns, PodCNIDeleted: false, + MTU: 1450} + cniserver.podConfigurator.podInfoStore.AddCNIConfigInfo(cniInfo) + } + if tc.ipamDel { + if tc.enableSecondaryNetworkIPAM { + ipamSecondaryNetworkDel = func(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) error { + return tc.ipamError + } + } else { + ipamMock.EXPECT().Del(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, tc.ipamError).Times(1) + } + } + if tc.disconnectOVS { + mockOVSBridgeClient.EXPECT().DeletePort(ovsPortID).Return(nil).Times(1) + mockOFClient.EXPECT().UninstallPodFlows(hostInterfaceName).Return(nil).Times(1) + } + if tc.migrateRoute { + routeMock.EXPECT().UnMigrateRoutesFromGw(gomock.Any(), "").Return(nil).Times(1) + } + if tc.delLocalIPAMRoute { + routeMock.EXPECT().DeleteLocalAntreaFlexibleIPAMPodRule(gomock.Any()).Return(tc.delLocalIPAMRouteError).Times(1) + } + resp, err := cniserver.CmdDel(ctx, requestMsg) + assert.NoError(t, err) + if tc.response != nil { + assert.Equal(t, tc.response, resp) + } else { + if tc.delLocalIPAMRouteError != nil { + assert.Equal(t, cnipb.ErrorCode_CONFIG_INTERFACE_FAILURE, resp.Error.Code) + } else { + assert.Equal(t, emptyResponse, resp) + } + } + if tc.secondaryNetworkEnabled { + cniConfigInfo := cniserver.podConfigurator.podInfoStore.GetCNIConfigInfoByContainerID(tc.podName, testPodNamespace, containerID) + assert.NotNil(t, cniConfigInfo) + assert.True(t, cniConfigInfo.PodCNIDeleted) + } + }) + } +} + +func TestCmdCheck(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + defer restoreSecondaryIPAM() + + ipamMock := ipamtest.NewMockIPAMDriver(controller) + ovsPortID := generateUUID(t) + ovsPort := int32(100) + ctx := context.TODO() + + prepareRequest := func(name string, cniType string, ipamType string, withPreviousResult bool) (*cnipb.CniCmdRequest, string) { + hostInterfaceName := util.GenerateContainerInterfaceName(name, testPodNamespace, testPodInfraContainerID) + networkCfg := generateNetworkConfiguration("", supportedCNIVersion, cniType, ipamType) + if withPreviousResult { + prevResult := ipamResult + prevResult.Interfaces = []*current.Interface{ + {Name: hostInterfaceName}, + {Name: "eth0", Sandbox: netns, Mac: "01:02:03:04:05:06"}, + } + networkCfg.RawPrevResult, _ = translateRawPrevResult(prevResult, supportedCNIVersion) + } + podArgs := cniservertest.GenerateCNIArgs(name, testPodNamespace, testPodInfraContainerID) + requestMsg, containerID := newRequest(podArgs, networkCfg, "", t) + containerIfaceConfig := interfacestore.NewContainerInterface(hostInterfaceName, containerID, name, testPodNamespace, containerVethMac, []net.IP{net.ParseIP("10.1.2.100")}, 0) + containerIfaceConfig.OVSPortConfig = &interfacestore.OVSPortConfig{PortUUID: ovsPortID, OFPort: ovsPort} + ifaceStore.AddInterface(containerIfaceConfig) + return requestMsg, hostInterfaceName + } + t.Run("secondary-IPAM", func(t *testing.T) { + ipamType := ipam.AntreaIPAMType + cniserver := mockCNIServer(t, controller, ipamMock, ipamType, true, false, false) + requestMsg, _ := prepareRequest("pod0", "cniType", ipamType, false) + ipamSecondaryNetworkCheck = func(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) error { + return nil + } + resp, err := cniserver.CmdCheck(ctx, requestMsg) + assert.NoError(t, err) + assert.Equal(t, emptyResponse, resp) + }) + t.Run("secondary-IPAM-failure", func(t *testing.T) { + ipamType := ipam.AntreaIPAMType + cniserver := mockCNIServer(t, controller, ipamMock, ipamType, true, false, false) + requestMsg, _ := prepareRequest("pod0", "cniType", ipamType, false) + ipamSecondaryNetworkCheck = func(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) error { + return errors.New("failed to check secondary IPAM response") + } + resp, err := cniserver.CmdCheck(ctx, requestMsg) + assert.NoError(t, err) + expResponse := &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_IPAM_FAILURE, + Message: "failed to check secondary IPAM response", + }, + } + assert.Equal(t, expResponse, resp) + }) + t.Run("chaining", func(t *testing.T) { + ipamType := "test-check" + cniserver := mockCNIServer(t, controller, ipamMock, ipamType, false, true, false) + requestMsg, hostInterfaceName := prepareRequest("pod1", "", ipamType, true) + testIfaceConfigurator := newTestInterfaceConfigurator() + testIfaceConfigurator.hostIfaceName = hostInterfaceName + cniserver.podConfigurator.ifConfigurator = testIfaceConfigurator + resp, err := cniserver.CmdCheck(ctx, requestMsg) + assert.NoError(t, err) + assert.Equal(t, emptyResponse, resp) + }) + t.Run("check-general-cni", func(t *testing.T) { + ipamType := "test-check" + cniserver := mockCNIServer(t, controller, ipamMock, ipamType, false, false, false) + requestMsg, hostInterfaceName := prepareRequest("pod2", "", ipamType, true) + testIfaceConfigurator := newTestInterfaceConfigurator() + testIfaceConfigurator.hostIfaceName = hostInterfaceName + testIfaceConfigurator.containerVethPair = &vethPair{ + name: "eth0", + ifIndex: 20, + peerIndex: 40, + } + cniserver.podConfigurator.ifConfigurator = testIfaceConfigurator + ipamMock.EXPECT().Check(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + resp, err := cniserver.CmdCheck(ctx, requestMsg) + assert.NoError(t, err) + assert.Equal(t, emptyResponse, resp) + }) +} + +func TestReconcile(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + mockOVSBridgeClient = ovsconfigtest.NewMockOVSBridgeClient(controller) + mockOFClient = openflowtest.NewMockClient(controller) + ifaceStore = interfacestore.NewInterfaceStore() + routeMock = routetest.NewMockInterface(controller) + nodeName := "node1" + cniServer := newCNIServer(t) + cniServer.routeClient = routeMock + gwMAC, _ := net.ParseMAC("00:00:11:11:11:11") + cniServer.podConfigurator, _ = newPodConfigurator(mockOVSBridgeClient, mockOFClient, routeMock, ifaceStore, gwMAC, "system", false, channel.NewSubscribableChannel("PodUpdate", 100), nil, false) + cniServer.podConfigurator.ifConfigurator = newTestInterfaceConfigurator() + cniServer.nodeConfig = &config.NodeConfig{ + Name: nodeName, + } + pods := []runtime.Object{ + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "p1", + Namespace: testPodNamespace, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "p2", + Namespace: testPodNamespace, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + HostNetwork: true, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "p4", + Namespace: testPodNamespace, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + }, + }, + } + containerIfaces := map[string]*interfacestore.InterfaceConfig{ + "iface1": { + InterfaceName: "iface1", + Type: interfacestore.ContainerInterface, + OVSPortConfig: &interfacestore.OVSPortConfig{ + PortUUID: generateUUID(t), + OFPort: int32(3), + }, + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{ + PodName: "p1", + PodNamespace: testPodNamespace, + ContainerID: generateUUID(t), + }, + }, + "iface3": { + InterfaceName: "iface3", + Type: interfacestore.ContainerInterface, + OVSPortConfig: &interfacestore.OVSPortConfig{ + PortUUID: generateUUID(t), + OFPort: int32(4), + }, + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{ + PodName: "p3", + PodNamespace: testPodNamespace, + ContainerID: generateUUID(t), + }, + }, + "iface4": { + InterfaceName: "iface4", + Type: interfacestore.ContainerInterface, + OVSPortConfig: &interfacestore.OVSPortConfig{ + PortUUID: generateUUID(t), + OFPort: int32(-1), + }, + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{ + PodName: "p4", + PodNamespace: testPodNamespace, + ContainerID: generateUUID(t), + }, + }, + } + kubeClient := fakeclientset.NewSimpleClientset(pods...) + cniServer.kubeClient = kubeClient + for _, containerIface := range containerIfaces { + ifaceStore.AddInterface(containerIface) + } + mockOFClient.EXPECT().InstallPodFlows("iface1", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1) + iface := containerIfaces["iface3"] + mockOFClient.EXPECT().UninstallPodFlows("iface3").Return(nil).Times(1) + mockOVSBridgeClient.EXPECT().DeletePort(iface.PortUUID).Return(nil).Times(1) + routeMock.EXPECT().DeleteLocalAntreaFlexibleIPAMPodRule(gomock.Any()).Return(nil).Times(1) + err := cniServer.reconcile() + assert.NoError(t, err) + _, exists := ifaceStore.GetInterfaceByName("iface3") + assert.False(t, exists) +} + +func restoreSecondaryIPAM() { + ipamSecondaryNetworkAdd = ipam.SecondaryNetworkAdd + ipamSecondaryNetworkDel = ipam.SecondaryNetworkDel + ipamSecondaryNetworkCheck = ipam.SecondaryNetworkCheck +} diff --git a/pkg/agent/cniserver/server_test.go b/pkg/agent/cniserver/server_test.go index c2d8dea5e45..969cb1d96b4 100644 --- a/pkg/agent/cniserver/server_test.go +++ b/pkg/agent/cniserver/server_test.go @@ -12,10 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux -// +build linux - -// TODO: fix the CNI ADD test for Windows. +//go:build linux || windows +// +build linux windows package cniserver @@ -47,7 +45,6 @@ import ( "antrea.io/antrea/pkg/cni" "antrea.io/antrea/pkg/ovs/ovsconfig" ovsconfigtest "antrea.io/antrea/pkg/ovs/ovsconfig/testing" - "antrea.io/antrea/pkg/util/channel" ) const ( @@ -68,10 +65,18 @@ var ( routes = []string{"10.0.0.0/8,10.1.2.1", "0.0.0.0/0,10.1.2.1"} dns = []string{"192.168.100.1"} ips = []string{"10.1.2.100/24,10.1.2.1,4"} + ipamResult = ipamtest.GenerateIPAMResult(supportedCNIVersion, []string{"10.1.2.100/24,10.1.2.1,4"}, []string{"10.0.0.0/8,10.1.2.1", "0.0.0.0/0,10.1.2.1"}, []string{"192.168.100.1"}) args = cniservertest.GenerateCNIArgs(testPodNameA, testPodNamespace, testPodInfraContainerID) testNodeConfig *config.NodeConfig gwIPv4 net.IP gwIPv6 net.IP + + mockOVSBridgeClient *ovsconfigtest.MockOVSBridgeClient + routeMock *routetest.MockInterface + mockOFClient *openflowtest.MockClient + ifaceStore interfacestore.InterfaceStore + + emptyResponse = &cnipb.CniCmdResponse{CniResult: []byte("")} ) func TestLoadNetConfig(t *testing.T) { @@ -440,78 +445,6 @@ func TestValidateRequestMessage(t *testing.T) { } } -func TestValidatePrevResult(t *testing.T) { - cniServer := newCNIServer(t) - cniVersion := "0.4.0" - networkCfg := generateNetworkConfiguration("", cniVersion, "", testIpamType) - k8sPodArgs := &types.K8sArgs{} - cnitypes.LoadArgs(args, k8sPodArgs) - networkCfg.PrevResult = nil - ips := []string{"10.1.2.100/24,10.1.2.1,4"} - routes := []string{"10.0.0.0/8,10.1.2.1", "0.0.0.0/0,10.1.2.1"} - dns := []string{"192.168.100.1"} - ipamResult := ipamtest.GenerateIPAMResult(cniVersion, ips, routes, dns) - networkCfg.RawPrevResult, _ = translateRawPrevResult(ipamResult, cniVersion) - - prevResult, _ := cniServer.parsePrevResultFromRequest(networkCfg) - containerIface := ¤t.Interface{Name: ifname, Sandbox: netns} - containerID := uuid.New().String() - hostIfaceName := util.GenerateContainerInterfaceName(testPodNameA, testPodNamespace, containerID) - hostIface := ¤t.Interface{Name: hostIfaceName} - - baseCNIConfig := func() *CNIConfig { - cniConfig := &CNIConfig{NetworkConfig: networkCfg, CniCmdArgs: &cnipb.CniCmdArgs{Args: args}} - cniConfig.ContainerId = containerID - return cniConfig - } - - t.Run("Invalid container interface veth", func(t *testing.T) { - cniConfig := baseCNIConfig() - cniConfig.Ifname = "invalid_iface" // invalid - sriovVFDeviceID := "" - prevResult.Interfaces = []*current.Interface{hostIface, containerIface} - response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) - checkErrorResponse( - t, response, cnipb.ErrorCode_INVALID_NETWORK_CONFIG, - "prevResult does not match network configuration", - ) - }) - - t.Run("Invalid container interface SR-IOV VF", func(t *testing.T) { - cniConfig := baseCNIConfig() - cniConfig.Ifname = "invalid_iface" // invalid - sriovVFDeviceID := "0000:03:00.6" - prevResult.Interfaces = []*current.Interface{hostIface, containerIface} - response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) - checkErrorResponse( - t, response, cnipb.ErrorCode_INVALID_NETWORK_CONFIG, - "prevResult does not match network configuration", - ) - }) - - t.Run("Interface check failure veth", func(t *testing.T) { - cniConfig := baseCNIConfig() - cniConfig.Ifname = ifname - cniConfig.Netns = "invalid_netns" - sriovVFDeviceID := "" - prevResult.Interfaces = []*current.Interface{hostIface, containerIface} - cniServer.podConfigurator, _ = newPodConfigurator(nil, nil, nil, nil, nil, "", false, channel.NewSubscribableChannel("PodUpdate", 100), nil, false) - response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) - checkErrorResponse(t, response, cnipb.ErrorCode_CHECK_INTERFACE_FAILURE, "") - }) - - t.Run("Interface check failure SR-IOV VF", func(t *testing.T) { - cniConfig := baseCNIConfig() - cniConfig.Ifname = ifname - cniConfig.Netns = "invalid_netns" - sriovVFDeviceID := "0000:03:00.6" - prevResult.Interfaces = []*current.Interface{hostIface, containerIface} - cniServer.podConfigurator, _ = newPodConfigurator(nil, nil, nil, nil, nil, "", true, channel.NewSubscribableChannel("PodUpdate", 100), nil, false) - response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) - checkErrorResponse(t, response, cnipb.ErrorCode_CHECK_INTERFACE_FAILURE, "") - }) -} - func TestParsePrevResultFromRequest(t *testing.T) { cniServer := newCNIServer(t) @@ -618,90 +551,6 @@ func TestValidateOVSInterface(t *testing.T) { assert.Nil(t, err, "Failed to validate OVS port configuration") } -func TestRemoveInterface(t *testing.T) { - controller := gomock.NewController(t) - defer controller.Finish() - mockOVSBridgeClient := ovsconfigtest.NewMockOVSBridgeClient(controller) - mockOFClient := openflowtest.NewMockClient(controller) - ifaceStore := interfacestore.NewInterfaceStore() - routeMock := routetest.NewMockInterface(controller) - gwMAC, _ := net.ParseMAC("00:00:11:11:11:11") - podConfigurator, err := newPodConfigurator(mockOVSBridgeClient, mockOFClient, routeMock, ifaceStore, gwMAC, "system", false, channel.NewSubscribableChannel("PodUpdate", 100), nil, false) - require.Nil(t, err, "No error expected in podConfigurator constructor") - - containerMAC, _ := net.ParseMAC("aa:bb:cc:dd:ee:ff") - containerIP := net.ParseIP("1.1.1.1") - - var cniConfig *CNIConfig - var containerID string - var podName string - var hostIfaceName string - var fakePortUUID string - var containerConfig *interfacestore.InterfaceConfig - - setup := func(name string) { - containerID = uuid.New().String() - podName = name - hostIfaceName = util.GenerateContainerInterfaceName(podName, testPodNamespace, containerID) - fakePortUUID = uuid.New().String() - - netcfg := generateNetworkConfiguration("", supportedCNIVersion, "", testIpamType) - cniConfig = &CNIConfig{NetworkConfig: netcfg, CniCmdArgs: &cnipb.CniCmdArgs{}} - cniConfig.Ifname = "eth0" - cniConfig.ContainerId = containerID - cniConfig.Netns = "" - - containerConfig = interfacestore.NewContainerInterface( - hostIfaceName, - containerID, - podName, - testPodNamespace, - containerMAC, - []net.IP{containerIP}, - 0) - containerConfig.OVSPortConfig = &interfacestore.OVSPortConfig{PortUUID: fakePortUUID, OFPort: 0} - } - - t.Run("Successful remove", func(t *testing.T) { - setup("test1") - ifaceStore.AddInterface(containerConfig) - - mockOFClient.EXPECT().UninstallPodFlows(hostIfaceName).Return(nil) - mockOVSBridgeClient.EXPECT().DeletePort(fakePortUUID).Return(nil) - routeMock.EXPECT().DeleteLocalAntreaFlexibleIPAMPodRule([]net.IP{containerIP}).Return(nil).Times(1) - - err := podConfigurator.removeInterfaces(containerID) - require.Nil(t, err, "Failed to remove interface") - _, found := ifaceStore.GetContainerInterface(containerID) - assert.False(t, found, "Interface should not be in the local cache anymore") - }) - - t.Run("Error in OVS port delete", func(t *testing.T) { - setup("test2") - ifaceStore.AddInterface(containerConfig) - - mockOVSBridgeClient.EXPECT().DeletePort(fakePortUUID).Return(ovsconfig.NewTransactionError(fmt.Errorf("error while deleting OVS port"), true)) - mockOFClient.EXPECT().UninstallPodFlows(hostIfaceName).Return(nil) - - err := podConfigurator.removeInterfaces(containerID) - require.NotNil(t, err, "Expected interface remove to fail") - _, found := ifaceStore.GetContainerInterface(containerID) - assert.True(t, found, "Interface should still be in local cache because of port deletion failure") - }) - - t.Run("Error in Pod flows delete", func(t *testing.T) { - setup("test3") - ifaceStore.AddInterface(containerConfig) - - mockOFClient.EXPECT().UninstallPodFlows(hostIfaceName).Return(fmt.Errorf("failed to delete openflow entry")) - - err := podConfigurator.removeInterfaces(containerID) - require.NotNil(t, err, "Expected interface remove to fail") - _, found := ifaceStore.GetContainerInterface(containerID) - assert.True(t, found, "Interface should still be in local cache because of flow deletion failure") - }) -} - func TestBuildOVSPortExternalIDs(t *testing.T) { containerID := uuid.New().String() containerMAC, _ := net.ParseMAC("aa:bb:cc:dd:ee:ff") @@ -736,7 +585,7 @@ func TestBuildOVSPortExternalIDs(t *testing.T) { PortUUID: "12345678", OFPort: int32(1), } - ifaceConfig := ParseOVSPortInterfaceConfig(mockPort, portConfig, true) + ifaceConfig := ParseOVSPortInterfaceConfig(mockPort, portConfig) assert.Equal(t, len(containerIPs), len(ifaceConfig.IPs)) for _, ip1 := range containerIPs { existed := false diff --git a/pkg/agent/cniserver/server_windows_test.go b/pkg/agent/cniserver/server_windows_test.go new file mode 100644 index 00000000000..88c64a29123 --- /dev/null +++ b/pkg/agent/cniserver/server_windows_test.go @@ -0,0 +1,1079 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cniserver + +import ( + "context" + "fmt" + "net" + "sync" + "testing" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/hcn" + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + fakeclientset "k8s.io/client-go/kubernetes/fake" + + "antrea.io/antrea/pkg/agent/cniserver/ipam" + ipamtest "antrea.io/antrea/pkg/agent/cniserver/ipam/testing" + cniservertest "antrea.io/antrea/pkg/agent/cniserver/testing" + "antrea.io/antrea/pkg/agent/cniserver/types" + "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/interfacestore" + openflowtest "antrea.io/antrea/pkg/agent/openflow/testing" + routetest "antrea.io/antrea/pkg/agent/route/testing" + agenttypes "antrea.io/antrea/pkg/agent/types" + "antrea.io/antrea/pkg/agent/util" + cnipb "antrea.io/antrea/pkg/apis/cni/v1beta1" + ovsconfigtest "antrea.io/antrea/pkg/ovs/ovsconfig/testing" + "antrea.io/antrea/pkg/util/channel" +) + +var ( + containerMACStr = "23:34:56:23:22:45" + dnsSearches = []string{"a.b.c.d"} +) + +func TestUpdateResultDNSConfig(t *testing.T) { + for _, tc := range []struct { + name string + cniConfig *CNIConfig + expDNS cnitypes.DNS + }{ + { + name: "only-dns", + cniConfig: &CNIConfig{ + NetworkConfig: &types.NetworkConfig{ + DNS: cnitypes.DNS{ + Nameservers: []string{"8.8.8.8", "8.8.4.4"}, + Search: []string{"a.b.c"}, + }}, + }, + expDNS: cnitypes.DNS{ + Nameservers: []string{"8.8.8.8", "8.8.4.4"}, + Search: []string{"a.b.c"}, + }, + }, { + name: "only-runtime-nameservers", + cniConfig: &CNIConfig{ + NetworkConfig: &types.NetworkConfig{ + RuntimeConfig: types.RuntimeConfig{ + DNS: types.RuntimeDNS{ + Nameservers: []string{"1.1.1.1"}, + }, + }, + }, + }, + expDNS: cnitypes.DNS{ + Nameservers: []string{"1.1.1.1"}, + }, + }, { + name: "only-runtime-search", + cniConfig: &CNIConfig{ + NetworkConfig: &types.NetworkConfig{ + RuntimeConfig: types.RuntimeConfig{ + DNS: types.RuntimeDNS{ + Search: []string{"c.b.a"}, + }, + }, + }, + }, + expDNS: cnitypes.DNS{ + Search: []string{"c.b.a"}, + }, + }, { + name: "replace-by-runtime-config", + cniConfig: &CNIConfig{ + NetworkConfig: &types.NetworkConfig{ + DNS: cnitypes.DNS{ + Nameservers: []string{"8.8.8.8", "8.8.4.4"}, + Search: []string{"a.b.c"}, + }, + RuntimeConfig: types.RuntimeConfig{ + DNS: types.RuntimeDNS{ + Nameservers: []string{"1.1.1.1"}, + Search: []string{"c.b.a"}, + }, + }, + }, + }, + expDNS: cnitypes.DNS{ + Nameservers: []string{"1.1.1.1"}, + Search: []string{"c.b.a"}, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + result := ¤t.Result{} + updateResultDNSConfig(result, tc.cniConfig) + assert.Equal(t, tc.expDNS, result.DNS) + }) + } +} + +func TestGetInfraContainer(t *testing.T) { + for _, tc := range []struct { + cniConfig *CNIConfig + expInfraContainerID string + expDockerContainer bool + }{ + { + cniConfig: &CNIConfig{CniCmdArgs: &cnipb.CniCmdArgs{ContainerId: "a78fcd2a-ea86-4e36-b3c0-467c81423567", Netns: "none"}}, + expInfraContainerID: "a78fcd2a-ea86-4e36-b3c0-467c81423567", + expDockerContainer: true, + }, + { + cniConfig: &CNIConfig{CniCmdArgs: &cnipb.CniCmdArgs{ContainerId: "a78fcd2a-ea86-4e36-b3c0-467c81423567", Netns: "container:14f294ed-4a06-444b-8198-eb44b4c26962"}}, + expInfraContainerID: "14f294ed-4a06-444b-8198-eb44b4c26962", + expDockerContainer: true, + }, + { + cniConfig: &CNIConfig{CniCmdArgs: &cnipb.CniCmdArgs{ContainerId: "a78fcd2a-ea86-4e36-b3c0-467c81423567", Netns: "14f294ed-4a06-444b-8198-eb44b4c26962"}}, + expInfraContainerID: "a78fcd2a-ea86-4e36-b3c0-467c81423567", + expDockerContainer: false, + }, + } { + infraContainer := tc.cniConfig.getInfraContainer() + assert.Equal(t, tc.expInfraContainerID, infraContainer) + assert.Equal(t, tc.expDockerContainer, isDockerContainer(tc.cniConfig.Netns)) + } +} + +var hostIfaces = sync.Map{} + +func testHostInterfaceExists(ifaceName string) bool { + _, exists := hostIfaces.Load(ifaceName) + return exists +} + +type hnsTestUtil struct { + endpointID string + hostIfaceName string + existingHnsEndpoints []hcsshim.HNSEndpoint + hnsEndpoint *hcsshim.HNSEndpoint + hcnEndpoint *hcn.HostComputeEndpoint + isDocker bool + isAttached bool + hnsEndpointCreatErr error + endpointAttachErr error +} + +func newHnsTestUtil(endpointID string, existingHnsEndpoints []hcsshim.HNSEndpoint, isDocker, isAttached bool, hnsEndpointCreatErr, endpointAttachErr error) *hnsTestUtil { + return &hnsTestUtil{ + endpointID: endpointID, + existingHnsEndpoints: existingHnsEndpoints, + isDocker: isDocker, + isAttached: isAttached, + hnsEndpointCreatErr: hnsEndpointCreatErr, + endpointAttachErr: endpointAttachErr, + } +} + +func (t *hnsTestUtil) listHnsEndpointFunc() ([]hcsshim.HNSEndpoint, error) { + return t.existingHnsEndpoints, nil +} + +func (t *hnsTestUtil) createHnsEndpoint(request *hcsshim.HNSEndpoint) (*hcsshim.HNSEndpoint, error) { + request.Id = t.endpointID + request.MacAddress = containerMACStr + t.hnsEndpoint = request + t.hcnEndpoint = &hcn.HostComputeEndpoint{ + Id: request.Id, + Name: request.Name, + HostComputeNetwork: request.VirtualNetworkName, + HostComputeNamespace: "00000000-0000-0000-0000-000000000000", + } + t.hostIfaceName = fmt.Sprintf("vEthernet (%s)", request.Name) + return request, t.hnsEndpointCreatErr +} + +func (t *hnsTestUtil) getNamespaceEndpointIDs(namespace string) ([]string, error) { + if t.isAttached { + t.addHostInterface() + return []string{t.endpointID}, nil + } + return []string{}, nil +} + +func (t *hnsTestUtil) hotAttachEndpoint(containerID string, epID string) error { + if t.endpointAttachErr == nil { + hostIfaces.Store(t.hostIfaceName, false) + } + return t.endpointAttachErr +} + +func (t *hnsTestUtil) isContainerAttachOnEndpoint(ep *hcsshim.HNSEndpoint, containerID string) (bool, error) { + return t.isAttached, nil +} + +func (t *hnsTestUtil) getHcnEndpointByID(epID string) (*hcn.HostComputeEndpoint, error) { + return t.hcnEndpoint, nil +} + +func (t *hnsTestUtil) deleteHnsEndpoint(endpoint *hcsshim.HNSEndpoint) (*hcsshim.HNSEndpoint, error) { + return t.hnsEndpoint, nil +} + +func (t *hnsTestUtil) attachEndpointInNamespace(ep *hcn.HostComputeEndpoint, namespace string) error { + t.hcnEndpoint.HostComputeNamespace = namespace + if t.endpointAttachErr == nil { + t.addHostInterface() + } + return t.endpointAttachErr +} + +func (t *hnsTestUtil) removeEndpointFromNamespace(namespace string, epID string) error { + return nil +} + +func (t *hnsTestUtil) setFunctions() { + listHnsEndpointFunc = t.listHnsEndpointFunc + createHnsEndpointFunc = t.createHnsEndpoint + getNamespaceEndpointIDsFunc = t.getNamespaceEndpointIDs + hotAttachEndpointFunc = t.hotAttachEndpoint + attachEndpointInNamespaceFunc = t.attachEndpointInNamespace + isContainerAttachOnEndpointFunc = t.isContainerAttachOnEndpoint + getHcnEndpointByIDFunc = t.getHcnEndpointByID + deleteHnsEndpointFunc = t.deleteHnsEndpoint + removeEndpointFromNamespaceFunc = t.removeEndpointFromNamespace +} + +func (t *hnsTestUtil) restore() { + listHnsEndpointFunc = hcsshim.HNSListEndpointRequest + createHnsEndpointFunc = createHnsEndpoint + getNamespaceEndpointIDsFunc = hcn.GetNamespaceEndpointIds + hotAttachEndpointFunc = hcsshim.HotAttachEndpoint + attachEndpointInNamespaceFunc = attachEndpointInNamespace + isContainerAttachOnEndpointFunc = isContainerAttachOnEndpoint + getHcnEndpointByIDFunc = hcn.GetEndpointByID + deleteHnsEndpointFunc = deleteHnsEndpoint + removeEndpointFromNamespaceFunc = hcn.RemoveNamespaceEndpoint +} + +func (t *hnsTestUtil) addHostInterface() { + if _, exists := hostIfaces.Load(t.hostIfaceName); exists { + return + } + go func() { + select { + case <-time.After(time.Millisecond * 650): + hostIfaces.Store(t.hostIfaceName, false) + } + }() +} + +func mockCNIServer(t *testing.T, controller *gomock.Controller, podUpdateNotifier *channel.SubscribableChannel) *CNIServer { + mockOVSBridgeClient = ovsconfigtest.NewMockOVSBridgeClient(controller) + mockOFClient = openflowtest.NewMockClient(controller) + routeMock = routetest.NewMockInterface(controller) + ifaceStore = interfacestore.NewInterfaceStore() + cniServer := newCNIServer(t) + cniServer.routeClient = routeMock + _, nodePodCIDRv4, _ := net.ParseCIDR("192.168.1.0/24") + gwMAC, _ := net.ParseMAC("00:00:11:11:11:11") + gateway := &config.GatewayConfig{Name: "", IPv4: gwIPv4, MAC: gwMAC} + cniServer.nodeConfig = &config.NodeConfig{Name: "node1", PodIPv4CIDR: nodePodCIDRv4, GatewayConfig: gateway} + cniServer.podConfigurator, _ = newPodConfigurator(mockOVSBridgeClient, mockOFClient, routeMock, ifaceStore, gwMAC, "system", false, podUpdateNotifier, nil, false) + return cniServer +} + +func prepareSetup(t *testing.T, ipamType string, name string, containerID, infraContainerID, netns string, prevResult *current.Result) (*cnipb.CniCmdRequest, string) { + networkCfg := generateNetworkConfiguration("", supportedCNIVersion, "", ipamType) + networkCfg.RuntimeConfig = types.RuntimeConfig{ + DNS: types.RuntimeDNS{ + Nameservers: dns, + Search: dnsSearches, + }, + } + if prevResult != nil { + networkCfg.RawPrevResult, _ = translateRawPrevResult(prevResult, supportedCNIVersion) + } + podArgs := cniservertest.GenerateCNIArgs(name, testPodNamespace, containerID) + requestMsg, _ := newRequest(podArgs, networkCfg, "", t) + requestMsg.CniArgs.ContainerId = containerID + requestMsg.CniArgs.Netns = netns + hostIfaceName := util.GenerateContainerInterfaceName(name, testPodNamespace, infraContainerID) + return requestMsg, hostIfaceName +} + +func TestCmdAdd(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + ipamType := "windows-test" + ipamMock := ipamtest.NewMockIPAMDriver(controller) + ipam.ResetIPAMDriver(ipamType, ipamMock) + oriIPAMResult := &ipam.IPAMResult{Result: *ipamResult} + ctx := context.TODO() + + dockerInfraContainer := "261a1970-5b6c-11ed-8caf-000c294e5d03" + dockerWorkContainer := "261e579a-5b6c-11ed-8caf-000c294e5d03" + unknownInfraContainer := generateUUID(t) + containerdInfraContainer := generateUUID(t) + + defer mockHostInterfaceExists()() + defer mockGetHnsNetworkByName()() + defer mockSetInterfaceMTU(nil)() + + for _, tc := range []struct { + name string + podName string + containerID string + infraContainerID string + netns string + ipamAdd bool + ipamDel bool + ipamError error + oriIPAMResult *ipam.IPAMResult + hnsEndpointCreateErr error + endpointAttachErr error + ifaceExist bool + isAttached bool + existingHnsEndpoints []hcsshim.HNSEndpoint + endpointExists bool + connectOVS bool + containerIfaceExist bool + errResponse *cnipb.CniCmdResponse + expectedErr error + }{ + { + name: "docker-infra-create-failure", + podName: "pod0", + containerID: dockerInfraContainer, + infraContainerID: dockerInfraContainer, + netns: "none", + ipamAdd: true, + ipamDel: true, + hnsEndpointCreateErr: fmt.Errorf("unable to create HnsEndpoint"), + errResponse: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_CONFIG_INTERFACE_FAILURE, + Message: "unable to create HnsEndpoint", + }, + }, + }, { + name: "docker-infra-attach-failure", + podName: "pod1", + containerID: dockerInfraContainer, + infraContainerID: dockerInfraContainer, + netns: "none", + ipamAdd: true, + ipamDel: true, + endpointAttachErr: fmt.Errorf("unable to attach HnsEndpoint"), + errResponse: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_CONFIG_INTERFACE_FAILURE, + Message: "failed to configure container IP: unable to attach HnsEndpoint", + }, + }, + }, { + name: "docker-infra-success", + podName: "pod2", + containerID: dockerInfraContainer, + infraContainerID: dockerInfraContainer, + netns: "none", + ipamAdd: true, + connectOVS: true, + containerIfaceExist: true, + }, { + name: "docker-workload-allocate-ip-failure", + podName: "pod3", + containerID: dockerWorkContainer, + infraContainerID: unknownInfraContainer, + netns: fmt.Sprintf("container:%s", unknownInfraContainer), + expectedErr: fmt.Errorf("allocated IP address not found"), + }, { + name: "docker-workload-no-endpoint", + podName: "pod4", + containerID: dockerWorkContainer, + infraContainerID: dockerInfraContainer, + netns: fmt.Sprintf("container:%s", dockerInfraContainer), + oriIPAMResult: oriIPAMResult, + errResponse: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_CONFIG_INTERFACE_FAILURE, + Message: "failed to find HNSEndpoint: pod4-6631b7", + }, + }, + }, { + name: "docker-workload-attach-failure", + podName: "pod5", + containerID: dockerWorkContainer, + infraContainerID: dockerInfraContainer, + netns: fmt.Sprintf("container:%s", dockerInfraContainer), + oriIPAMResult: oriIPAMResult, + endpointAttachErr: fmt.Errorf("unable to attach HnsEndpoint"), + endpointExists: true, + errResponse: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_CONFIG_INTERFACE_FAILURE, + Message: "failed to configure container IP: unable to attach HnsEndpoint", + }, + }, + }, { + name: "docker-workload-success", + podName: "pod6", + containerID: dockerWorkContainer, + infraContainerID: dockerInfraContainer, + netns: fmt.Sprintf("container:%s", dockerInfraContainer), + oriIPAMResult: oriIPAMResult, + endpointExists: true, + }, { + name: "docker-workload-already-attached", + podName: "pod7", + containerID: dockerWorkContainer, + infraContainerID: dockerInfraContainer, + netns: fmt.Sprintf("container:%s", dockerInfraContainer), + isAttached: true, + endpointExists: true, + oriIPAMResult: oriIPAMResult, + }, { + name: "containerd-success", + podName: "pod8", + containerID: containerdInfraContainer, + infraContainerID: containerdInfraContainer, + netns: generateUUID(t), + ipamAdd: true, + connectOVS: true, + containerIfaceExist: true, + }, { + name: "containerd-already-attached", + podName: "pod9", + containerID: containerdInfraContainer, + infraContainerID: containerdInfraContainer, + netns: generateUUID(t), + oriIPAMResult: oriIPAMResult, + connectOVS: true, + containerIfaceExist: true, + isAttached: true, + }, { + name: "containerd-attach-failure", + podName: "pod10", + containerID: containerdInfraContainer, + infraContainerID: containerdInfraContainer, + netns: generateUUID(t), + ipamDel: true, + oriIPAMResult: oriIPAMResult, + endpointAttachErr: fmt.Errorf("unable to attach HnsEndpoint"), + errResponse: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_CONFIG_INTERFACE_FAILURE, + Message: "failed to configure container IP: unable to attach HnsEndpoint", + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + isDocker := isDockerContainer(tc.netns) + testUtil := newHnsTestUtil(generateUUID(t), tc.existingHnsEndpoints, isDocker, tc.isAttached, tc.hnsEndpointCreateErr, tc.endpointAttachErr) + testUtil.setFunctions() + defer testUtil.restore() + waiter := newAsyncWaiter(tc.podName, tc.infraContainerID) + server := mockCNIServer(t, controller, waiter.notifier) + requestMsg, ovsPortName := prepareSetup(t, ipamType, tc.podName, tc.containerID, tc.infraContainerID, tc.netns, nil) + if tc.endpointExists { + server.podConfigurator.ifConfigurator.(*ifConfigurator).addEndpoint(getHnsEndpoint(generateUUID(t), ovsPortName)) + } + if tc.oriIPAMResult != nil { + ipam.AddIPAMResult(tc.infraContainerID, tc.oriIPAMResult) + } + if tc.ipamAdd { + ipamMock.EXPECT().Add(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, oriIPAMResult, tc.ipamError).Times(1) + } + if tc.ipamDel { + ipamMock.EXPECT().Del(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + } + ovsPortID := generateUUID(t) + if tc.connectOVS { + if isDocker { + mockOVSBridgeClient.EXPECT().CreateInternalPort(ovsPortName, int32(0), gomock.Any(), gomock.Any()).Return(ovsPortID, nil).Times(1) + mockOVSBridgeClient.EXPECT().GetOFPort(ovsPortName, false).Return(int32(100), nil).Times(1) + } else { + mockOVSBridgeClient.EXPECT().CreatePort(ovsPortName, ovsPortName, gomock.Any()).Return(ovsPortID, nil).Times(1) + mockOVSBridgeClient.EXPECT().SetInterfaceType(ovsPortName, "internal").Return(nil).Times(1) + mockOVSBridgeClient.EXPECT().GetOFPort(ovsPortName, true).Return(int32(100), nil).Times(1) + } + mockOFClient.EXPECT().InstallPodFlows(ovsPortName, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + routeMock.EXPECT().AddLocalAntreaFlexibleIPAMPodRule(gomock.Any()).Return(nil).Times(1) + } + resp, err := server.CmdAdd(ctx, requestMsg) + assert.Equal(t, tc.expectedErr, err) + if tc.errResponse != nil { + assert.Equal(t, tc.errResponse, resp) + } else if tc.expectedErr == nil { + cniResult := ¤t.Result{ + CNIVersion: supportedCNIVersion, + IPs: oriIPAMResult.IPs, + Routes: oriIPAMResult.Routes, + DNS: cnitypes.DNS{ + Nameservers: dns, + Search: dnsSearches, + }, + Interfaces: []*current.Interface{ + {Name: ovsPortName, Mac: containerMACStr, Sandbox: ""}, + {Name: "eth0", Mac: containerMACStr, Sandbox: tc.netns}, + }, + } + successResponse := resultToResponse(cniResult) + assert.Equal(t, successResponse, resp) + } + containerID := requestMsg.CniArgs.ContainerId + _, exists := ifaceStore.GetContainerInterface(containerID) + assert.Equal(t, exists, tc.containerIfaceExist) + if tc.connectOVS { + waiter.wait() + // Wait for the completion of async function "setInterfaceMTUFunc", otherwise it may lead to the + // race condition failure. + wait.PollImmediate(time.Millisecond*10, time.Second, func() (done bool, err error) { + mtuSet, exist := hostIfaces.Load(ovsPortName) + if !exist { + return false, nil + } + return mtuSet.(bool), nil + }) + } + waiter.close() + }) + } +} + +func TestCmdDel(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + ipamType := "windows-test" + ipamMock := ipamtest.NewMockIPAMDriver(controller) + ipam.ResetIPAMDriver(ipamType, ipamMock) + ctx := context.TODO() + + containerID := "261a1970-5b6c-11ed-8caf-000c294e5d03" + containerMAC, _ := net.ParseMAC("11:22:33:44:33:22") + + defer mockHostInterfaceExists()() + defer mockGetHnsNetworkByName()() + defer mockSetInterfaceMTU(nil)() + + for _, tc := range []struct { + name string + podName string + containerID string + netns string + ipamDel bool + ipamError error + endpointExists bool + disconnectOVS bool + ifaceExists bool + errResponse *cnipb.CniCmdResponse + }{ + { + name: "docker-infra-success", + podName: "pod0", + containerID: containerID, + netns: "none", + ipamDel: true, + disconnectOVS: true, + endpointExists: true, + ifaceExists: true, + }, { + name: "interface-not-exist", + podName: "pod1", + containerID: containerID, + netns: "none", + ipamDel: true, + }, { + name: "ipam-delete-failure", + podName: "pod2", + containerID: containerID, + netns: "none", + ipamDel: true, + ipamError: fmt.Errorf("unable to delete IP"), + errResponse: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_IPAM_FAILURE, + Message: "unable to delete IP", + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + isDocker := isDockerContainer(tc.netns) + requestMsg, ovsPortName := prepareSetup(t, ipamType, tc.podName, tc.containerID, tc.containerID, tc.netns, nil) + hnsEndpoint := getHnsEndpoint(generateUUID(t), ovsPortName) + var existingHnsEndpoints []hcsshim.HNSEndpoint + if tc.endpointExists { + existingHnsEndpoints = append(existingHnsEndpoints, *hnsEndpoint) + } + testUtil := newHnsTestUtil(hnsEndpoint.Id, existingHnsEndpoints, isDocker, true, nil, nil) + testUtil.setFunctions() + defer testUtil.restore() + waiter := newAsyncWaiter(tc.podName, tc.containerID) + server := mockCNIServer(t, controller, waiter.notifier) + ovsPortID := generateUUID(t) + if tc.endpointExists { + server.podConfigurator.ifConfigurator.(*ifConfigurator).addEndpoint(hnsEndpoint) + } + if tc.ifaceExists { + containerIface := interfacestore.NewContainerInterface(ovsPortName, tc.containerID, tc.podName, testPodNamespace, containerMAC, []net.IP{net.ParseIP("10.1.2.100")}, 0) + containerIface.OVSPortConfig = &interfacestore.OVSPortConfig{ + OFPort: 100, + PortUUID: ovsPortID, + } + ifaceStore.AddInterface(containerIface) + } + if tc.ipamDel { + ipamMock.EXPECT().Del(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, tc.ipamError).Times(1) + } + if tc.disconnectOVS { + mockOVSBridgeClient.EXPECT().DeletePort(ovsPortID).Return(nil).Times(1) + mockOFClient.EXPECT().UninstallPodFlows(ovsPortName).Return(nil).Times(1) + routeMock.EXPECT().DeleteLocalAntreaFlexibleIPAMPodRule(gomock.Any()).Return(nil).Times(1) + } + resp, err := server.CmdDel(ctx, requestMsg) + assert.NoError(t, err) + if tc.errResponse != nil { + assert.Equal(t, tc.errResponse, resp) + } else { + assert.Equal(t, emptyResponse, resp) + } + _, exists := ifaceStore.GetContainerInterface(tc.containerID) + assert.False(t, exists) + if tc.endpointExists { + _, exists = server.podConfigurator.ifConfigurator.(*ifConfigurator).getEndpoint(ovsPortName) + assert.False(t, exists) + } + if tc.disconnectOVS { + waiter.wait() + } + waiter.close() + }) + } +} + +func TestCmdCheck(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + ipamType := "windows-test" + ipamMock := ipamtest.NewMockIPAMDriver(controller) + ipam.ResetIPAMDriver(ipamType, ipamMock) + ctx := context.TODO() + + containerID := "261a1970-5b6c-11ed-8caf-000c294e5d03" + mac, _ := net.ParseMAC("11:22:33:44:33:22") + containerIP, containerIPNet, _ := net.ParseCIDR("10.1.2.100/24") + containerIPNet.IP = containerIP + + defer mockHostInterfaceExists()() + defer mockGetHnsNetworkByName()() + defer mockSetInterfaceMTU(nil)() + defer mockListHnsEndpoint(nil, nil)() + defer mockGetNetInterfaceAddrs(containerIPNet, nil)() + defer mockGetHnsEndpointByName(generateUUID(t), mac)() + + wrapperIPAMResult := func(ipamResult current.Result, interfaces []*current.Interface) *current.Result { + result := ipamResult + index := 1 + result.IPs[0].Interface = &index + result.Interfaces = interfaces + return &result + } + wrapperContainerInterface := func(ifaceName, containerID, podName, ovsPortID string, mac net.HardwareAddr, containerIP net.IP) *interfacestore.InterfaceConfig { + containerIface := interfacestore.NewContainerInterface(ifaceName, containerID, podName, testPodNamespace, mac, []net.IP{containerIP}, 0) + containerIface.OVSPortConfig = &interfacestore.OVSPortConfig{ + PortUUID: ovsPortID, + OFPort: 10, + } + return containerIface + } + + for _, tc := range []struct { + name string + podName string + containerID string + netns string + existingIface *interfacestore.InterfaceConfig + prevResult *current.Result + netInterface *net.Interface + getNetInterfaceErr error + errResponse *cnipb.CniCmdResponse + }{ + { + name: "check-success", + podName: "pod0", + netns: "none", + containerID: containerID, + prevResult: wrapperIPAMResult(*ipamResult, []*current.Interface{ + {Name: "pod0-6631b7", Mac: "11:22:33:44:33:22", Sandbox: ""}, + {Name: "pod0-6631b7_eth0", Mac: "11:22:33:44:33:22", Sandbox: "none"}, + }), + existingIface: wrapperContainerInterface("pod0-6631b7", containerID, "pod0", generateUUID(t), mac, containerIP), + netInterface: &net.Interface{ + Name: "vEthernet (pod0-6631b7)", + HardwareAddr: mac, + Index: 4, + Flags: net.FlagUp, + }, + }, { + name: "pod-namespace-mismatch", + podName: "pod1", + netns: "none", + containerID: containerID, + prevResult: wrapperIPAMResult(*ipamResult, []*current.Interface{ + {Name: "pod1-6631b7", Mac: "11:22:33:44:33:22", Sandbox: ""}, + {Name: "pod1-6631b7_eth0", Mac: "11:22:33:44:33:22", Sandbox: "invalid-namespace"}, + }), + existingIface: wrapperContainerInterface("pod1-6631b7", containerID, "pod1", generateUUID(t), mac, containerIP), + netInterface: &net.Interface{ + Name: "vEthernet (pod1-6631b7)", + HardwareAddr: mac, + Index: 4, + Flags: net.FlagUp, + }, + errResponse: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_CHECK_INTERFACE_FAILURE, + Message: "sandbox in prevResult invalid-namespace doesn't match configured netns: none", + }, + }, + }, { + name: "container-host-names-mismatch", + podName: "pod2", + netns: "none", + containerID: containerID, + prevResult: wrapperIPAMResult(*ipamResult, []*current.Interface{ + {Name: "pod2-6631b7", Mac: "11:22:33:44:33:22", Sandbox: ""}, + {Name: "eth0", Mac: "11:22:33:44:33:22", Sandbox: "none"}, + }), + existingIface: wrapperContainerInterface("pod2-6631b7", containerID, "pod2", generateUUID(t), mac, containerIP), + netInterface: &net.Interface{ + Name: "vEthernet (pod2-6631b7)", + HardwareAddr: mac, + Index: 4, + Flags: net.FlagUp, + }, + errResponse: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_CHECK_INTERFACE_FAILURE, + Message: "unable to get net Interface with name vEthernet (eth0)", + }, + }, + }, { + name: "container-host-MAC-mismatch", + podName: "pod3", + netns: "none", + containerID: containerID, + prevResult: wrapperIPAMResult(*ipamResult, []*current.Interface{ + {Name: "pod3-6631b7", Mac: "11:22:33:44:33:22", Sandbox: ""}, + {Name: "pod3-6631b7_eth0", Mac: "11:22:33:44:33:33", Sandbox: "none"}, + }), + existingIface: wrapperContainerInterface("pod3-6631b7", containerID, "pod3", generateUUID(t), mac, containerIP), + netInterface: &net.Interface{ + Name: "vEthernet (pod3-6631b7)", + HardwareAddr: mac, + Index: 4, + Flags: net.FlagUp, + }, + errResponse: &cnipb.CniCmdResponse{ + Error: &cnipb.Error{ + Code: cnipb.ErrorCode_CHECK_INTERFACE_FAILURE, + Message: "container MAC in prevResult 11:22:33:44:33:33 doesn't match configured address: 11:22:33:44:33:22", + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + defer mockGetNetInterfaceByName(tc.netInterface)() + cniserver := mockCNIServer(t, controller, channel.NewSubscribableChannel("podUpdate", 100)) + requestMsg, _ := prepareSetup(t, ipamType, tc.podName, tc.containerID, tc.containerID, tc.netns, tc.prevResult) + ipamMock.EXPECT().Check(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + ifaceStore.AddInterface(tc.existingIface) + resp, err := cniserver.CmdCheck(ctx, requestMsg) + assert.NoError(t, err) + if tc.errResponse != nil { + assert.Equal(t, tc.errResponse, resp) + } else { + assert.Equal(t, emptyResponse, resp) + } + }) + } +} + +type asyncWaiter struct { + podName string + containerID string + waitCh chan struct{} + stopCh chan struct{} + notifier *channel.SubscribableChannel +} + +func (w *asyncWaiter) notify(e interface{}) { + podUpdate := e.(agenttypes.PodUpdate) + if podUpdate.PodName == w.podName && podUpdate.ContainerID == w.containerID { + w.waitCh <- struct{}{} + } +} + +func (w *asyncWaiter) wait() { + <-w.waitCh +} + +func (w *asyncWaiter) close() { + close(w.waitCh) + close(w.stopCh) +} + +func newAsyncWaiter(podName, containerID string) *asyncWaiter { + waiter := &asyncWaiter{ + podName: podName, + containerID: containerID, + waitCh: make(chan struct{}), + stopCh: make(chan struct{}), + notifier: channel.NewSubscribableChannel("PodUpdate", 100), + } + waiter.notifier.Subscribe(waiter.notify) + go waiter.notifier.Run(waiter.stopCh) + return waiter +} + +func TestReconcile(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + mockOVSBridgeClient = ovsconfigtest.NewMockOVSBridgeClient(controller) + mockOFClient = openflowtest.NewMockClient(controller) + ifaceStore = interfacestore.NewInterfaceStore() + routeMock = routetest.NewMockInterface(controller) + nodeName := "node1" + defer mockHostInterfaceExists()() + defer mockGetHnsNetworkByName()() + missingEndpoint := getHnsEndpoint(generateUUID(t), "iface4") + testUtil := newHnsTestUtil(missingEndpoint.Id, []hcsshim.HNSEndpoint{*missingEndpoint}, false, true, nil, nil) + testUtil.createHnsEndpoint(missingEndpoint) + testUtil.setFunctions() + defer testUtil.restore() + + cniServer := newCNIServer(t) + cniServer.routeClient = routeMock + gwMAC, _ := net.ParseMAC("00:00:11:11:11:11") + pods := []runtime.Object{ + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "p1", + Namespace: testPodNamespace, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "p2", + Namespace: testPodNamespace, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + HostNetwork: true, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "p4", + Namespace: testPodNamespace, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + }, + }, + } + kubeClient := fakeclientset.NewSimpleClientset(pods...) + cniServer.kubeClient = kubeClient + containerIfaces := map[string]*interfacestore.InterfaceConfig{ + "iface1": { + InterfaceName: "iface1", + Type: interfacestore.ContainerInterface, + OVSPortConfig: &interfacestore.OVSPortConfig{ + PortUUID: generateUUID(t), + OFPort: int32(3), + }, + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{ + PodName: "p1", + PodNamespace: testPodNamespace, + ContainerID: generateUUID(t), + }, + }, + "iface3": { + InterfaceName: "iface3", + Type: interfacestore.ContainerInterface, + OVSPortConfig: &interfacestore.OVSPortConfig{ + PortUUID: generateUUID(t), + OFPort: int32(4), + }, + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{ + PodName: "p3", + PodNamespace: testPodNamespace, + ContainerID: generateUUID(t), + }, + }, + "iface4": { + InterfaceName: "iface4", + Type: interfacestore.ContainerInterface, + OVSPortConfig: &interfacestore.OVSPortConfig{ + PortUUID: generateUUID(t), + OFPort: int32(-1), + }, + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{ + PodName: "p4", + PodNamespace: testPodNamespace, + ContainerID: generateUUID(t), + }, + }, + } + for _, containerIface := range containerIfaces { + ifaceStore.AddInterface(containerIface) + } + pod4IfaceName := "iface4" + pod4Iface := containerIfaces["iface4"] + waiter := newAsyncWaiter(pod4Iface.PodName, pod4Iface.ContainerID) + cniServer.podConfigurator, _ = newPodConfigurator(mockOVSBridgeClient, mockOFClient, routeMock, ifaceStore, gwMAC, "system", false, waiter.notifier, nil, false) + cniServer.nodeConfig = &config.NodeConfig{Name: nodeName} + + // Re-install Pod1 flows + mockOFClient.EXPECT().InstallPodFlows("iface1", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1) + // Uninstall Pod3 flows which is deleted. + iface := containerIfaces["iface3"] + mockOFClient.EXPECT().UninstallPodFlows("iface3").Return(nil).Times(1) + mockOVSBridgeClient.EXPECT().DeletePort(iface.PortUUID).Return(nil).Times(1) + routeMock.EXPECT().DeleteLocalAntreaFlexibleIPAMPodRule(gomock.Any()).Return(nil).Times(1) + // Re-connect to Pod4 + hostIfaces.Store(fmt.Sprintf("vEthernet (%s)", pod4IfaceName), true) + mockOVSBridgeClient.EXPECT().SetInterfaceType(pod4IfaceName, "internal").Return(nil).Times(1) + mockOVSBridgeClient.EXPECT().GetOFPort(pod4IfaceName, true).Return(int32(5), nil).Times(1) + mockOFClient.EXPECT().InstallPodFlows(pod4IfaceName, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1) + err := cniServer.reconcile() + assert.NoError(t, err) + _, exists := ifaceStore.GetInterfaceByName("iface3") + assert.False(t, exists) + waiter.wait() + waiter.close() +} + +func getHnsEndpoint(id, name string) *hcsshim.HNSEndpoint { + return &hcsshim.HNSEndpoint{ + Id: id, + Name: name, + VirtualNetworkName: util.LocalHNSNetwork, + IPAddress: net.ParseIP("10.1.2.100"), + MacAddress: containerMACStr, + GatewayAddress: "10.1.2.1", + PrefixLength: 24, + } +} + +func getFakeHnsNetworkByName(network string) (*hcsshim.HNSNetwork, error) { + return &hcsshim.HNSNetwork{ + Name: network, + Id: "8B692948-6FB3-4127-ABC7-BE9D12BE1E84", + Type: util.HNSNetworkType, + NetworkAdapterName: "Ethernet0", + SourceMac: "00:50:56:b1:29:a2", + Subnets: []hcsshim.Subnet{ + { + AddressPrefix: "10.1.2.0/24", + GatewayAddress: "10.1.2.1", + }, + }, + DNSSuffix: "test.antrea", + DNSServerList: "92.168.100.1", + ManagementIP: "10.10.10.10/28", + }, nil +} + +func mockHostInterfaceExists() func() { + originalHostInterfaceExistsFunc := hostInterfaceExistsFunc + hostInterfaceExistsFunc = testHostInterfaceExists + return func() { + hostInterfaceExistsFunc = originalHostInterfaceExistsFunc + } +} + +func mockGetHnsNetworkByName() func() { + originalGetHnsNetworkByName := getHnsNetworkByNameFunc + getHnsNetworkByNameFunc = getFakeHnsNetworkByName + return func() { + getHnsNetworkByNameFunc = originalGetHnsNetworkByName + } +} +func mockGetNetInterfaceByName(netInterface *net.Interface) func() { + originalGetNetInterfaceByName := getNetInterfaceByNameFunc + getNetInterfaceByNameFunc = func(name string) (*net.Interface, error) { + if netInterface.Name == name { + return netInterface, nil + } + return nil, fmt.Errorf("unable to get net Interface with name %s", name) + } + return func() { + getNetInterfaceByNameFunc = originalGetNetInterfaceByName + } +} + +func mockGetHnsEndpointByName(uuid string, mac net.HardwareAddr) func() { + originalGetHnsEndpointByName := getHnsEndpointByNameFunc + getHnsEndpointByNameFunc = func(endpointName string) (*hcsshim.HNSEndpoint, error) { + endpoint := getHnsEndpoint(uuid, endpointName) + endpoint.MacAddress = mac.String() + return endpoint, nil + } + return func() { + getHnsEndpointByNameFunc = originalGetHnsEndpointByName + } +} + +func mockGetNetInterfaceAddrs(containerIPNet *net.IPNet, err error) func() { + originalGetNetInterfaceAddrs := getNetInterfaceAddrsFunc + getNetInterfaceAddrsFunc = func(intf *net.Interface) ([]net.Addr, error) { + return []net.Addr{containerIPNet}, nil + } + return func() { + getNetInterfaceAddrsFunc = originalGetNetInterfaceAddrs + } +} + +func mockListHnsEndpoint(endpoints []hcsshim.HNSEndpoint, listError error) func() { + originalListHnsEndpoint := listHnsEndpointFunc + listHnsEndpointFunc = func() ([]hcsshim.HNSEndpoint, error) { + return endpoints, listError + } + return func() { + listHnsEndpointFunc = originalListHnsEndpoint + } +} + +func mockSetInterfaceMTU(setMTUError error) func() { + originalSetInterfaceMTU := setInterfaceMTUFunc + setInterfaceMTUFunc = func(ifaceName string, mtu int) error { + if setMTUError == nil { + hostIfaces.Store(ifaceName, true) + } + return setMTUError + } + return func() { + setInterfaceMTUFunc = originalSetInterfaceMTU + } +} diff --git a/pkg/agent/cniserver/sriov_linux.go b/pkg/agent/cniserver/sriov_linux.go index 22469c4f184..bf2c185ed1d 100644 --- a/pkg/agent/cniserver/sriov_linux.go +++ b/pkg/agent/cniserver/sriov_linux.go @@ -18,19 +18,20 @@ package cniserver import ( + "github.com/Mellanox/sriovnet" sriovcniutils "github.com/k8snetworkplumbingwg/sriov-cni/pkg/utils" ) // getVFInfo takes in a VF's PCI device ID and returns its PF and VF ID. -func getVFInfo(vfPCI string) (string, int, error) { +func (ic *ifConfigurator) getVFInfo(vfPCI string) (string, int, error) { var vfID int - pf, err := sriovcniutils.GetPfName(vfPCI) + pf, err := ic.sriovnet.GetPfName(vfPCI) if err != nil { return "", vfID, err } - vfID, err = sriovcniutils.GetVfid(vfPCI, pf) + vfID, err = ic.sriovnet.GetVfid(vfPCI, pf) if err != nil { return "", vfID, err } @@ -39,6 +40,40 @@ func getVFInfo(vfPCI string) (string, int, error) { } // getVFLinkName returns a VF's network interface name given its PCI address. -func getVFLinkName(pciAddress string) (string, error) { - return sriovcniutils.GetVFLinkNames(pciAddress) +func (ic *ifConfigurator) getVFLinkName(pciAddress string) (string, error) { + return ic.sriovnet.GetVFLinkNames(pciAddress) +} + +type sriovNet struct{} + +func (n *sriovNet) GetNetDevicesFromPci(pciAddress string) ([]string, error) { + return sriovnet.GetNetDevicesFromPci(pciAddress) +} + +func (n *sriovNet) GetUplinkRepresentor(pciAddress string) (string, error) { + return sriovnet.GetUplinkRepresentor(pciAddress) +} + +func (n *sriovNet) GetVfIndexByPciAddress(vfPciAddress string) (int, error) { + return sriovnet.GetVfIndexByPciAddress(vfPciAddress) +} + +func (n *sriovNet) GetVfRepresentor(uplink string, vfIndex int) (string, error) { + return sriovnet.GetVfRepresentor(uplink, vfIndex) +} + +func (n *sriovNet) GetPfName(vf string) (string, error) { + return sriovcniutils.GetPfName(vf) +} + +func (n *sriovNet) GetVfid(addr string, pfName string) (int, error) { + return sriovcniutils.GetVfid(addr, pfName) +} + +func (n *sriovNet) GetVFLinkNames(pciAddr string) (string, error) { + return sriovcniutils.GetVFLinkNames(pciAddr) +} + +func newSriovNet() *sriovNet { + return &sriovNet{} } diff --git a/pkg/agent/cniserver/testing/mock_cniserver.go b/pkg/agent/cniserver/testing/mock_cniserver.go new file mode 100644 index 00000000000..3ed4bcc78ac --- /dev/null +++ b/pkg/agent/cniserver/testing/mock_cniserver.go @@ -0,0 +1,153 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: antrea.io/antrea/pkg/agent/cniserver (interfaces: SriovNet) + +// Package testing is a generated GoMock package. +package testing + +import ( + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockSriovNet is a mock of SriovNet interface +type MockSriovNet struct { + ctrl *gomock.Controller + recorder *MockSriovNetMockRecorder +} + +// MockSriovNetMockRecorder is the mock recorder for MockSriovNet +type MockSriovNetMockRecorder struct { + mock *MockSriovNet +} + +// NewMockSriovNet creates a new mock instance +func NewMockSriovNet(ctrl *gomock.Controller) *MockSriovNet { + mock := &MockSriovNet{ctrl: ctrl} + mock.recorder = &MockSriovNetMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockSriovNet) EXPECT() *MockSriovNetMockRecorder { + return m.recorder +} + +// GetNetDevicesFromPci mocks base method +func (m *MockSriovNet) GetNetDevicesFromPci(arg0 string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetDevicesFromPci", arg0) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetDevicesFromPci indicates an expected call of GetNetDevicesFromPci +func (mr *MockSriovNetMockRecorder) GetNetDevicesFromPci(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetDevicesFromPci", reflect.TypeOf((*MockSriovNet)(nil).GetNetDevicesFromPci), arg0) +} + +// GetPfName mocks base method +func (m *MockSriovNet) GetPfName(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPfName", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPfName indicates an expected call of GetPfName +func (mr *MockSriovNetMockRecorder) GetPfName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPfName", reflect.TypeOf((*MockSriovNet)(nil).GetPfName), arg0) +} + +// GetUplinkRepresentor mocks base method +func (m *MockSriovNet) GetUplinkRepresentor(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUplinkRepresentor", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUplinkRepresentor indicates an expected call of GetUplinkRepresentor +func (mr *MockSriovNetMockRecorder) GetUplinkRepresentor(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUplinkRepresentor", reflect.TypeOf((*MockSriovNet)(nil).GetUplinkRepresentor), arg0) +} + +// GetVFLinkNames mocks base method +func (m *MockSriovNet) GetVFLinkNames(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVFLinkNames", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVFLinkNames indicates an expected call of GetVFLinkNames +func (mr *MockSriovNetMockRecorder) GetVFLinkNames(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVFLinkNames", reflect.TypeOf((*MockSriovNet)(nil).GetVFLinkNames), arg0) +} + +// GetVfIndexByPciAddress mocks base method +func (m *MockSriovNet) GetVfIndexByPciAddress(arg0 string) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVfIndexByPciAddress", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVfIndexByPciAddress indicates an expected call of GetVfIndexByPciAddress +func (mr *MockSriovNetMockRecorder) GetVfIndexByPciAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVfIndexByPciAddress", reflect.TypeOf((*MockSriovNet)(nil).GetVfIndexByPciAddress), arg0) +} + +// GetVfRepresentor mocks base method +func (m *MockSriovNet) GetVfRepresentor(arg0 string, arg1 int) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVfRepresentor", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVfRepresentor indicates an expected call of GetVfRepresentor +func (mr *MockSriovNetMockRecorder) GetVfRepresentor(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVfRepresentor", reflect.TypeOf((*MockSriovNet)(nil).GetVfRepresentor), arg0, arg1) +} + +// GetVfid mocks base method +func (m *MockSriovNet) GetVfid(arg0, arg1 string) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVfid", arg0, arg1) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVfid indicates an expected call of GetVfid +func (mr *MockSriovNetMockRecorder) GetVfid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVfid", reflect.TypeOf((*MockSriovNet)(nil).GetVfid), arg0, arg1) +} diff --git a/pkg/agent/util/netlink/netlink_linux.go b/pkg/agent/util/netlink/netlink_linux.go index fda905e9afa..9db8047fda1 100644 --- a/pkg/agent/util/netlink/netlink_linux.go +++ b/pkg/agent/util/netlink/netlink_linux.go @@ -39,4 +39,10 @@ type Interface interface { NeighDel(neigh *netlink.Neigh) error LinkByName(name string) (netlink.Link, error) + + LinkSetNsFd(link netlink.Link, fd int) error + + LinkSetMTU(link netlink.Link, mtu int) error + + LinkSetUp(link netlink.Link) error } diff --git a/pkg/agent/util/netlink/testing/mock_netlink_linux.go b/pkg/agent/util/netlink/testing/mock_netlink_linux.go index 51ffbed8ac8..7931125e2dc 100644 --- a/pkg/agent/util/netlink/testing/mock_netlink_linux.go +++ b/pkg/agent/util/netlink/testing/mock_netlink_linux.go @@ -106,6 +106,48 @@ func (mr *MockInterfaceMockRecorder) LinkByName(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LinkByName", reflect.TypeOf((*MockInterface)(nil).LinkByName), arg0) } +// LinkSetMTU mocks base method +func (m *MockInterface) LinkSetMTU(arg0 netlink.Link, arg1 int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LinkSetMTU", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// LinkSetMTU indicates an expected call of LinkSetMTU +func (mr *MockInterfaceMockRecorder) LinkSetMTU(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LinkSetMTU", reflect.TypeOf((*MockInterface)(nil).LinkSetMTU), arg0, arg1) +} + +// LinkSetNsFd mocks base method +func (m *MockInterface) LinkSetNsFd(arg0 netlink.Link, arg1 int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LinkSetNsFd", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// LinkSetNsFd indicates an expected call of LinkSetNsFd +func (mr *MockInterfaceMockRecorder) LinkSetNsFd(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LinkSetNsFd", reflect.TypeOf((*MockInterface)(nil).LinkSetNsFd), arg0, arg1) +} + +// LinkSetUp mocks base method +func (m *MockInterface) LinkSetUp(arg0 netlink.Link) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LinkSetUp", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// LinkSetUp indicates an expected call of LinkSetUp +func (mr *MockInterfaceMockRecorder) LinkSetUp(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LinkSetUp", reflect.TypeOf((*MockInterface)(nil).LinkSetUp), arg0) +} + // NeighDel mocks base method func (m *MockInterface) NeighDel(arg0 *netlink.Neigh) error { m.ctrl.T.Helper()