Deploy MacVLAN Network with RDMA Shared Device

Note

You can automate the configuration of this use case with NVIDIA Kubernetes Launch Kit. For more details, see Configuration Assistance with Kubernetes Launch Kit.

Step 1: Create NicClusterPolicy with RDMA shared device

apiVersion: mellanox.com/v1alpha1
kind: NicClusterPolicy
metadata:
  name: nic-cluster-policy
spec:
  rdmaSharedDevicePlugin:
    image: k8s-rdma-shared-dev-plugin
    repository: nvcr.io/nvstaging/mellanox
    version: network-operator-v26.4.0-beta.7
    config: |
      {
        "configList": [
          {
            "resourceName": "rdma_shared_device_a",
            "rdmaHcaMax": 63,
            "selectors": {
              "ifNames": ["ens1f0"]
            }
          }
        ]
      }
  nvIpam:
    image: nvidia-k8s-ipam
    repository: nvcr.io/nvstaging/mellanox
    version: network-operator-v26.4.0-beta.7
    imagePullSecrets: []
    enableWebhook: false
  secondaryNetwork:
    cniPlugins:
      image: plugins
      repository: nvcr.io/nvstaging/mellanox
      version: network-operator-v26.4.0-beta.7
    multus:
      image: multus-cni
      repository: nvcr.io/nvstaging/mellanox
      version: network-operator-v26.4.0-beta.7
kubectl apply -f nicclusterpolicy.yaml

Step 2: Create IPPool for nv-ipam

apiVersion: nv-ipam.nvidia.com/v1alpha1
kind: IPPool
metadata:
  name: macvlan-pool
  namespace: nvidia-network-operator
spec:
  subnet: 192.168.4.0/24
  perNodeBlockSize: 50
  gateway: 192.168.4.1
kubectl apply -f ippool.yaml

Step 3: Create MacvlanNetwork

apiVersion: mellanox.com/v1alpha1
kind: MacvlanNetwork
metadata:
  name: macvlan-network
spec:
  networkNamespace: "default"
  master: "ens1f0"
  mode: "bridge"
  mtu: 1500
  ipam: |
    {
      "type": "nv-ipam",
      "poolName": "macvlan-pool"
    }
kubectl apply -f macvlannetwork.yaml

Step 4: Deploy test workload

apiVersion: v1
kind: Pod
metadata:
  name: macvlan-test-pod
  annotations:
    k8s.v1.cni.cncf.io/networks: macvlan-network
spec:
  containers:
  - name: test-container
    image: mellanox/rping-test
    command: ["/bin/bash", "-c", "sleep infinity"]
    securityContext:
      capabilities:
        add: ["IPC_LOCK"]
    resources:
      requests:
        rdma/rdma_shared_device_a: 1
      limits:
        rdma/rdma_shared_device_a: 1
kubectl apply -f pod.yaml

Verify the deployment:

kubectl exec -it macvlan-test-pod -- ip addr show
kubectl exec -it macvlan-test-pod -- ibv_devinfo

Complete Configuration

apiVersion: mellanox.com/v1alpha1
kind: NicClusterPolicy
metadata:
  name: nic-cluster-policy
spec:
  rdmaSharedDevicePlugin:
    image: k8s-rdma-shared-dev-plugin
    repository: nvcr.io/nvstaging/mellanox
    version: network-operator-v26.4.0-beta.7
    config: |
      {
        "configList": [
          {
            "resourceName": "rdma_shared_device_a",
            "rdmaHcaMax": 63,
            "selectors": {
              "ifNames": ["ens1f0"]
            }
          }
        ]
      }
  nvIpam:
    image: nvidia-k8s-ipam
    repository: nvcr.io/nvstaging/mellanox
    version: network-operator-v26.4.0-beta.7
    imagePullSecrets: []
    enableWebhook: false
  secondaryNetwork:
    cniPlugins:
      image: plugins
      repository: nvcr.io/nvstaging/mellanox
      version: network-operator-v26.4.0-beta.7
    multus:
      image: multus-cni
      repository: nvcr.io/nvstaging/mellanox
      version: network-operator-v26.4.0-beta.7
---
apiVersion: nv-ipam.nvidia.com/v1alpha1
kind: IPPool
metadata:
  name: macvlan-pool
  namespace: nvidia-network-operator
spec:
  subnet: 192.168.4.0/24
  perNodeBlockSize: 50
  gateway: 192.168.4.1
---
apiVersion: mellanox.com/v1alpha1
kind: MacvlanNetwork
metadata:
  name: macvlan-network
spec:
  networkNamespace: "default"
  master: "ens1f0"
  mode: "bridge"
  mtu: 1500
  ipam: |
    {
      "type": "nv-ipam",
      "poolName": "macvlan-pool"
    }
---
apiVersion: v1
kind: Pod
metadata:
  name: macvlan-test-pod
  annotations:
    k8s.v1.cni.cncf.io/networks: macvlan-network
spec:
  containers:
  - name: test-container
    image: mellanox/rping-test
    command: ["/bin/bash", "-c", "sleep infinity"]
    securityContext:
      capabilities:
        add: ["IPC_LOCK"]
    resources:
      requests:
        rdma/rdma_shared_device_a: 1
      limits:
        rdma/rdma_shared_device_a: 1