Zero Code NSX Advanced LB Automation with Terraform

VMware NSX Advanced Load Balancer (Avi Networks) provides multi-cloud load balancing, web application firewall, application analytics and container ingress services across on-premises data centers and any cloud.

Terraform is a widely adopted Infrastructure as Code tool that allows you to define your infrastructure using a simple, declarative programming language, and deploy and manage infrastructure across public cloud providers including AWS, Azure and Google Cloud. NSX Advanced load balancer (Aka Avi load balancer) are fully supported by Terraform and each Avi REST resource is exposed as a resource in Terraform. By using the Terraform Avi Provider, we can achieve Infrastructure as a code for your load balancing service.

In this blog, I will show you how easy it is to build an LBaaS service (local load balancing and global load balancing across two DCs) for a critical (99.99%+ SLA)web application on NSX advanced load balancer via Terraform in minutes.

My testing environment is set up as below:

  • Two DCs: site01 and site02;
  • There is a controller cluster in each site;
  • Two GSLB sites configured: site01 is the leader site.
  • Terraform v0.12
  • NSX Advanced load balancer: v18.2.9

The Terraform plan will create the following resources:

  • 5 web servers as a pool member in each DC;
  • Two local load balancing pools in each DC: the first 2 web servers are members of pool1 and the rest 3 web servers are members of pool2;
  • A pool group in each DC, which includes the above 2 pools: pool1 is In Service and pool2 is Out of Service
  • A virtual service in each DC to provide local load balancing
  • SSL profile in each DC to define how a SSL session is terminated on the NSX advanced load balancer;
  • HTTP Cookie-based persistence profile in each DC to offer web session persistence in the local load balancing;
  • Certificate and Key for the web application HTTPS service;
  • A HTTP health monitor in each DC to check the health of local load balancing pool members
  • Global load balancing PKI profile;
  • Global load balancing health monitor;
  • Global load balancing persistence profile;
  • Global load balancing service;

Also, a few outputs are defined to suggest the results of the Terraform plan.

You can access main.tf and variables.tf onΒ GitHub here.

# For example, restrict template version in 0.1.x
provider "avi" {
  avi_username = "admin"
  avi_tenant = "admin"
  avi_password = "password"
  avi_controller= var.site1controller
}

provider "avi" {
  avi_username = "admin"
  avi_tenant = "admin"
  alias = "site02"
  avi_password = "password"
  avi_controller= var.site2controller
}

data "avi_tenant" "default_tenant" {
  name = "admin"
}

data "avi_cloud" "default_cloud" {
  name = "Default-Cloud"
}

data "avi_tenant" "site02_default_tenant" {
  provider = avi.site02
  name = "admin"
}

data "avi_cloud" "site02_default_cloud" {
  provider = avi.site02
  name = "Default-Cloud"
}

data "avi_serviceenginegroup" "se_group" {
  name      = "Default-Group"
  cloud_ref = data.avi_cloud.default_cloud.id
}

data "avi_gslb" "gslb_demo" {
  name = "Default"
}

data "avi_virtualservice" "site01_vs01" {
  name = "gslb_site01_vs01"
}

data "avi_virtualservice" "site02_vs01" {
  name = "gslb_site02_vs01"
}

data "avi_applicationprofile" "site01_system_https_profile" {
  name = "System-Secure-HTTP"
}

data "avi_applicationprofile" "site02_system_https_profile" {
  provider = avi.site02
  name = "System-Secure-HTTP"
}

### Start of Site01 setup
resource "avi_sslprofile" "site01_sslprofile" {
    name = "site01_sslprofile"
    ssl_session_timeout = 86400
    tenant_ref = data.avi_tenant.default_tenant.id
    accepted_ciphers = "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA"
    prefer_client_cipher_ordering = false
    enable_ssl_session_reuse = true
    accepted_versions {
      type = "SSL_VERSION_TLS1_1"
    }
    accepted_versions {
      type = "SSL_VERSION_TLS1_2"
    }
    cipher_enums = [
      "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
      "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
      "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
      "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
      "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
      "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
      "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
      "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"]
    send_close_notify = true
    type = "SSL_PROFILE_TYPE_APPLICATION"
    enable_early_data = false
    ssl_rating {
      compatibility_rating = "SSL_SCORE_EXCELLENT"
      security_score = 100.0
      performance_rating = "SSL_SCORE_EXCELLENT"
    }
  }

resource "avi_applicationpersistenceprofile" "site01_applicationpersistenceprofile" {
  name  = "site01_app-pers-profile"
  tenant_ref = data.avi_tenant.default_tenant.id
  is_federated = false
  persistence_type = "PERSISTENCE_TYPE_HTTP_COOKIE"
  http_cookie_persistence_profile {
    cookie_name = "sddc01-vs01-cookie01"
    always_send_cookie = false
    timeout = 15
  }
}

resource "avi_vsvip" "site01_vs01_vip" {
  name = "site01_vs01_vip"
  tenant_ref = data.avi_tenant.default_tenant.id
  cloud_ref  = data.avi_cloud.default_cloud.id
  vip {
    vip_id = "0"
    ip_address {
      type = "V4"
      addr = var.gslb_site01_vs01_vip
    }
  }
}

resource "avi_sslkeyandcertificate" "site01_cert1000" {
    name = "site01_cert1000"
    tenant_ref = data.avi_tenant.default_tenant.id
    certificate {
        certificate = file("${path.module}/www.sddc.vmconaws.link.crt")
        }
    key = file("${path.module}/www.sddc.vmconaws.link.key")
    type= "SSL_CERTIFICATE_TYPE_VIRTUALSERVICE"
}

resource "avi_virtualservice" "gslb_site01_vs01" {
  name = "gslb_site01_vs01"
  tenant_ref = data.avi_tenant.default_tenant.id
  cloud_ref  = data.avi_cloud.default_cloud.id
  pool_group_ref = avi_poolgroup.site01_pg-1.id
  vsvip_ref  = avi_vsvip.site01_vs01_vip.id
  application_profile_ref = data.avi_applicationprofile.site01_system_https_profile.id
  services {
        port = 443
        enable_ssl = true
        port_range_end = 443
        }
  cloud_type                   = "CLOUD_VCENTER"
  ssl_key_and_certificate_refs = [avi_sslkeyandcertificate.site01_cert1000.id]
  ssl_profile_ref = avi_sslprofile.site01_sslprofile.id
}

resource "avi_healthmonitor" "site01_hm_1" {
  name = "site01_monitor"
  type = "HEALTH_MONITOR_HTTP"
  tenant_ref = data.avi_tenant.default_tenant.id
  receive_timeout = "4"
  is_federated = false
  failed_checks = "3"
  send_interval = "10"
  http_monitor {
        exact_http_request = false
        http_request = "HEAD / HTTP/1.0"
        http_response_code = ["HTTP_2XX","HTTP_3XX","HTTP_4XX"]
        }
  successful_checks = "3"
}

resource "avi_pool" "site01_pool-1" {
  name = "site01_pool-1"
  health_monitor_refs = [avi_healthmonitor.site01_hm_1.id]
  tenant_ref = data.avi_tenant.default_tenant.id
  cloud_ref  = data.avi_cloud.default_cloud.id
  application_persistence_profile_ref = avi_applicationpersistenceprofile.site01_applicationpersistenceprofile.id
  fail_action {
    type = "FAIL_ACTION_CLOSE_CONN"
  }
  lb_algorithm = "LB_ALGORITHM_LEAST_CONNECTIONS"
}

resource "avi_pool" "site01_pool-2" {
  name = "site01_pool-2"
  tenant_ref = data.avi_tenant.default_tenant.id
  cloud_ref = data.avi_cloud.default_cloud.id
  application_persistence_profile_ref = avi_applicationpersistenceprofile.site01_applicationpersistenceprofile.id
  fail_action {
    type = "FAIL_ACTION_CLOSE_CONN"
  }
  ignore_servers = true
}

resource "avi_poolgroup" "site01_pg-1" {
  name = "site01_pg-1"
  tenant_ref = data.avi_tenant.default_tenant.id
  cloud_ref = data.avi_cloud.default_cloud.id
  members {
    pool_ref = avi_pool.site01_pool-1.id
    ratio = 100
    deployment_state = "IN_SERVICE"
  }
  members {
    pool_ref = avi_pool.site01_pool-2.id
    ratio = 0
    deployment_state = "OUT_OF_SERVICE"
  }
}

resource "avi_server" "site01_server_web11" {
  ip       = var.avi_site01_server_web11
  port     = "80"
  pool_ref = avi_pool.site01_pool-1.id
  hostname = "server_web11"
}

resource "avi_server" "site01_server_web12" {
  ip       = var.avi_site01_server_web12
  port     = "80"
  pool_ref = avi_pool.site01_pool-1.id
  hostname = "server_web12"
}

resource "avi_server" "site01_server_web13" {
  ip       = var.avi_site01_server_web13
  port     = "80"
  pool_ref = avi_pool.site01_pool-2.id
  hostname = "server_webv13"
}

resource "avi_server" "site01_server_web14" {
  ip       = var.avi_site01_server_web14
  port     = "80"
  pool_ref = avi_pool.site01_pool-2.id
  hostname = "server_web14"
}

resource "avi_server" "site01_server_web15" {
  ip = var.avi_site01_server_web15
  port = "80"
  pool_ref = avi_pool.site01_pool-2.id
  hostname = "server_web15"
}

### End of Site01 setup ###
### Start of Site02 setup ###
resource "avi_sslprofile" "site02_sslprofile" {
    provider = avi.site02
    name = "site02_sslprofile"
    ssl_session_timeout = 86400
    tenant_ref = data.avi_tenant.default_tenant.id
    accepted_ciphers = "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA"
    prefer_client_cipher_ordering = false
    enable_ssl_session_reuse = true
    accepted_versions {
      type = "SSL_VERSION_TLS1_1"
    }
    accepted_versions {
      type = "SSL_VERSION_TLS1_2"
    }
    cipher_enums = [
      "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
      "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
      "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
      "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
      "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
      "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
      "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
      "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"]
    send_close_notify = true
    type = "SSL_PROFILE_TYPE_APPLICATION"
    enable_early_data = false
    ssl_rating {
      compatibility_rating = "SSL_SCORE_EXCELLENT"
      security_score = 100.0
      performance_rating = "SSL_SCORE_EXCELLENT"
    }
  }


resource "avi_applicationpersistenceprofile" "site02_applicationpersistenceprofile" {
  provider = avi.site02
  tenant_ref = data.avi_tenant.site02_default_tenant.id
  name  = "site02_app-pers-profile"
  is_federated = false
  persistence_type = "PERSISTENCE_TYPE_HTTP_COOKIE"
  http_cookie_persistence_profile {
    cookie_name = "sddc01-vs01-cookie01"
    always_send_cookie = false
    timeout = 15
  }
}

resource "avi_vsvip" "site02_vs01_vip" {
  provider = avi.site02
  cloud_ref = data.avi_cloud.site02_default_cloud.id
  tenant_ref = data.avi_tenant.site02_default_tenant.id
  name = "site02_vs01_vip"
  vip {
    vip_id = "0"
    ip_address {
      type = "V4"
      addr = var.gslb_site02_vs01_vip
    }
  }
}

resource "avi_sslkeyandcertificate" "site02_cert1000" {
  provider = avi.site02
  tenant_ref = data.avi_tenant.site02_default_tenant.id
  name = "site02_cert1000"
  certificate {
      certificate = file("${path.module}/www.sddc.vmconaws.link.crt")
      }
  key = file("${path.module}/www.sddc.vmconaws.link.key")
  type= "SSL_CERTIFICATE_TYPE_VIRTUALSERVICE"
}

resource "avi_virtualservice" "gslb_site02_vs01" {
  provider = avi.site02
  cloud_ref = data.avi_cloud.site02_default_cloud.id
  tenant_ref = data.avi_tenant.site02_default_tenant.id
  name = "gslb_site02_vs01"
  pool_group_ref = avi_poolgroup.site02_pg-1.id
  vsvip_ref  = avi_vsvip.site02_vs01_vip.id
  application_profile_ref = data.avi_applicationprofile.site02_system_https_profile.id
  services {
        port = 443
        enable_ssl = true
        port_range_end = 443
        }
  cloud_type = "CLOUD_VCENTER"
  ssl_key_and_certificate_refs = [avi_sslkeyandcertificate.site02_cert1000.id]
  ssl_profile_ref = avi_sslprofile.site02_sslprofile.id
}

resource "avi_healthmonitor" "site02_hm_1" {
  provider = avi.site02
  tenant_ref = data.avi_tenant.site02_default_tenant.id
  name = "site02_monitor"
  type  = "HEALTH_MONITOR_HTTP"
  receive_timeout = "4"
  is_federated = false
  failed_checks = "3"
  send_interval = "10"
  http_monitor {
        exact_http_request = false
        http_request = "HEAD / HTTP/1.0"
        http_response_code = ["HTTP_2XX","HTTP_3XX","HTTP_4XX"]
        }
  successful_checks = "3"
}

resource "avi_pool" "site02_pool-1" {
  provider = avi.site02
  cloud_ref = data.avi_cloud.site02_default_cloud.id
  tenant_ref = data.avi_tenant.site02_default_tenant.id
  name = "site02_pool-1"
  health_monitor_refs = [avi_healthmonitor.site02_hm_1.id]
  application_persistence_profile_ref = avi_applicationpersistenceprofile.site02_applicationpersistenceprofile.id
  fail_action {
    type = "FAIL_ACTION_CLOSE_CONN"
  }
  lb_algorithm = "LB_ALGORITHM_LEAST_CONNECTIONS"
}

resource "avi_pool" "site02_pool-2" {
  provider = avi.site02
  cloud_ref = data.avi_cloud.site02_default_cloud.id
  tenant_ref = data.avi_tenant.site02_default_tenant.id
  name = "site02_pool-2"
  application_persistence_profile_ref = avi_applicationpersistenceprofile.site02_applicationpersistenceprofile.id
  fail_action {
    type = "FAIL_ACTION_CLOSE_CONN"
  }
  ignore_servers = true
}

resource "avi_poolgroup" "site02_pg-1" {
  provider = avi.site02
  cloud_ref = data.avi_cloud.site02_default_cloud.id
  tenant_ref = data.avi_tenant.site02_default_tenant.id
  name = "site02_pg-1"
  members {
    pool_ref = avi_pool.site02_pool-1.id
    ratio = 100
    deployment_state = "IN_SERVICE"
  }
  members {
    pool_ref = avi_pool.site02_pool-2.id
    ratio = 0
    deployment_state = "OUT_OF_SERVICE"
  }
}

resource "avi_server" "site02_server_web21" {
  provider = avi.site02
  ip = var.avi_site02_server_web21
  port = "80"
  pool_ref = avi_pool.site02_pool-1.id
  hostname = "serverp_web21"
}

resource "avi_server" "site02_server_web22" {
  provider = avi.site02
  ip = var.avi_site02_server_web22
  port = "80"
  pool_ref = avi_pool.site02_pool-1.id
  hostname = "server_web22"
}


resource "avi_server" "site02_server_web23" {
  provider = avi.site02
  ip = var.avi_site02_server_web23
  port = "80"
  pool_ref = avi_pool.site02_pool-2.id
  hostname = "server_web23"
}

resource "avi_server" "site02_server_web24" {
  provider = avi.site02
  ip = var.avi_site02_server_web24
  port = "80"
  pool_ref = avi_pool.site02_pool-2.id
  hostname = "server_web24"
}

resource "avi_server" "site02_server_web25" {
  provider = avi.site02
  ip = var.avi_site02_server_web25
  port = "80"
  pool_ref = avi_pool.site02_pool-2.id
  hostname = "server_web25"
}

### END of Site02 Setting ###

### Start of GSLB setup ###

# Only one federated PKI Profile is required for one site or DC
resource "avi_pkiprofile" "terraform_gslb_pki" {
    name = "terraform_gslb_pki"
    tenant_ref = data.avi_tenant.default_tenant.id
    crl_check = false
    is_federated = true
    ignore_peer_chain = false
    validate_only_leaf_crl = true
    ca_certs {
      certificate = file("${path.module}/ca-bundle.crt")
    }
}

resource "avi_applicationpersistenceprofile" "terraform_gslbsite_pesistence" {
  name = "terraform_gslbsite_pesistence"
  tenant_ref = data.avi_tenant.default_tenant.id
  is_federated = true
  persistence_type = "PERSISTENCE_TYPE_GSLB_SITE"
  http_cookie_persistence_profile {
    cookie_name = "sddc01-vs01-cookie01"
    always_send_cookie = false
    timeout = 15
  }
}

resource "avi_healthmonitor" "terraform_gslbsite_hm01" {
  name = "terraform_gslbsite_hm01"
  type = "HEALTH_MONITOR_PING"
  tenant_ref = data.avi_tenant.default_tenant.id
  is_federated = true
  failed_checks = "3"
  send_interval = "10"
  successful_checks = "3"
}

resource "avi_gslbservice" "terraform_gslb-01" {
  name = "terraform_gslb-01"
  tenant_ref = data.avi_tenant.default_tenant.id
  domain_names = [var.gslb_dns]
  depends_on = [
    avi_pkiprofile.terraform_gslb_pki
  ]
  wildcard_match = false
  application_persistence_profile_ref = avi_applicationpersistenceprofile.terraform_gslbsite_pesistence.id
  health_monitor_refs = [avi_healthmonitor.terraform_gslbsite_hm01.id]
  site_persistence_enabled = true
  is_federated = false
  use_edns_client_subnet= true
  enabled = true
  groups { 
      priority = 10
      consistent_hash_mask=31
      consistent_hash_mask6=31
      members {
        ip {
           type = "V4"
           addr = var.gslb_site01_vs01_vip
        }
        vs_uuid = avi_virtualservice.gslb_site01_vs01.uuid
        cluster_uuid = element(data.avi_gslb.gslb_demo.sites.*.cluster_uuid, index(data.avi_gslb.gslb_demo.sites.*.name,var.site01_name))
        ratio = 1
        enabled = true
      }
     members {
        ip {
           type = "V4"
           addr = var.gslb_site02_vs01_vip
        }
        vs_uuid = avi_virtualservice.gslb_site02_vs01.uuid
        cluster_uuid = element(data.avi_gslb.gslb_demo.sites.*.cluster_uuid, index(data.avi_gslb.gslb_demo.sites.*.name,var.site02_name))
        ratio = 1
        enabled = true
      }
      name = "${var.gslb_dns}-pool"
      algorithm = "GSLB_ALGORITHM_ROUND_ROBIN"      
    }
}
### Output ###
output "gslb-site01_site_number" {
  value = "${index(data.avi_gslb.gslb_demo.sites.*.name,var.site01_name)}"
  description = "gslb-site01_site_number"
}

output "gslb-site02_site_number" {
  value = "${index(data.avi_gslb.gslb_demo.sites.*.name,var.site02_name)}"
  description = "gslb-site02_site_number"
}

output "gslb_site01" {
  value = "${element(data.avi_gslb.gslb_demo.sites.*.cluster_uuid,0)}"
  description = "gslb_site01"
}

output "gslb_site02" {
  value = "${element(data.avi_gslb.gslb_demo.sites.*.cluster_uuid,1)}"
  description = "gslb_site02"
}

output "gslb_service" {
  value = avi_gslbservice.terraform_gslb-01.groups
  description = "gslb_service"
}

output "site01_vs01" {
  value = avi_virtualservice.gslb_site01_vs01
  description = "site01_vs01"
}

output "site02_vs01" {
  value = avi_virtualservice.gslb_site02_vs01
  description = "site02_vs01"
}

Let’s apply the plan and then we can take it easy and enjoy the day.

zhangda@zhangda-a01 automation % terraform apply --auto-approve
data.avi_virtualservice.site01_vs01: Refreshing state...
data.avi_tenant.site02_default_tenant: Refreshing state...
data.avi_gslb.gslb_demo: Refreshing state...
data.avi_virtualservice.site02_vs01: Refreshing state...
data.avi_cloud.site02_default_cloud: Refreshing state...
data.avi_tenant.default_tenant: Refreshing state...
data.avi_cloud.default_cloud: Refreshing state...
data.avi_applicationprofile.site02_system_https_profile: Refreshing state...
data.avi_applicationprofile.site01_system_https_profile: Refreshing state...
data.avi_serviceenginegroup.se_group: Refreshing state...
avi_applicationpersistenceprofile.site02_applicationpersistenceprofile: Creating...
avi_healthmonitor.site02_hm_1: Creating...
avi_sslkeyandcertificate.site02_cert1000: Creating...
avi_vsvip.site02_vs01_vip: Creating...
avi_sslprofile.site02_sslprofile: Creating...
avi_applicationpersistenceprofile.terraform_gslbsite_pesistence: Creating...
avi_healthmonitor.site01_hm_1: Creating...
avi_healthmonitor.terraform_gslbsite_hm01: Creating...
avi_vsvip.site01_vs01_vip: Creating...
avi_pkiprofile.terraform_gslb_pki: Creating...
avi_healthmonitor.site02_hm_1: Creation complete after 1s [id=https://10.1.1.170/api/healthmonitor/healthmonitor-f05a117d-93fe-4a35-b442-391bc815ff8d]
avi_sslprofile.site01_sslprofile: Creating...
avi_applicationpersistenceprofile.site02_applicationpersistenceprofile: Creation complete after 1s [id=https://10.1.1.170/api/applicationpersistenceprofile/applicationpersistenceprofile-2cd82839-0b86-4a25-a212-694c3b8b41b9]
avi_applicationpersistenceprofile.site01_applicationpersistenceprofile: Creating...
avi_sslprofile.site02_sslprofile: Creation complete after 2s [id=https://10.1.1.170/api/sslprofile/sslprofile-fa44f77c-dfe0-494a-902b-e724980d139e]
avi_sslkeyandcertificate.site01_cert1000: Creating...
avi_vsvip.site02_vs01_vip: Creation complete after 2s [id=https://10.1.1.170/api/vsvip/vsvip-2391e848-1b49-4383-ab7a-b2829c6c5406]
avi_pool.site02_pool-1: Creating...
avi_sslkeyandcertificate.site02_cert1000: Creation complete after 2s [id=https://10.1.1.170/api/sslkeyandcertificate/sslkeyandcertificate-90baec49-afa0-4ef3-974d-7357fef77e0d]
avi_pool.site02_pool-2: Creating...
avi_applicationpersistenceprofile.site01_applicationpersistenceprofile: Creation complete after 1s [id=https://10.1.1.250/api/applicationpersistenceprofile/applicationpersistenceprofile-f45f0852-2515-4528-ae65-c48a670ca7ac]
avi_pool.site01_pool-2: Creating...
avi_pool.site02_pool-1: Creation complete after 0s [id=https://10.1.1.170/api/pool/pool-859248df-8ea6-4a00-a8ea-976cc31175a9]
avi_server.site02_server_web21: Creating...
avi_applicationpersistenceprofile.terraform_gslbsite_pesistence: Creation complete after 3s [id=https://10.1.1.250/api/applicationpersistenceprofile/applicationpersistenceprofile-cf887192-0d57-4b91-a7cb-37d787f9aeb2]
avi_server.site02_server_web22: Creating...
avi_sslprofile.site01_sslprofile: Creation complete after 2s [id=https://10.1.1.250/api/sslprofile/sslprofile-1464ded3-7a10-4e76-bfc3-0cdb186ff248]
avi_server.site02_server_web22: Creation complete after 0s [id=pool-859248df-8ea6-4a00-a8ea-976cc31175a9:192.168.202.20:80]
avi_healthmonitor.terraform_gslbsite_hm01: Creation complete after 4s [id=https://10.1.1.250/api/healthmonitor/healthmonitor-003f5015-2a2a-4e65-aff3-1071365a8428]
avi_healthmonitor.site01_hm_1: Creation complete after 4s [id=https://10.1.1.250/api/healthmonitor/healthmonitor-dacd7a40-dc90-4e67-932f-34e94a550fb8]
avi_pool.site01_pool-1: Creating...
avi_vsvip.site01_vs01_vip: Creation complete after 4s [id=https://10.1.1.250/api/vsvip/vsvip-16b0ba87-2703-4fb2-abab-9a8b0bf34ae0]
avi_pool.site02_pool-2: Creation complete after 2s [id=https://10.1.1.170/api/pool/pool-9ca21978-59d5-455f-ba78-01fb9c747b43]
avi_pool.site01_pool-2: Creation complete after 2s [id=https://10.1.1.250/api/pool/pool-47d64222-46b7-4402-ae38-afd47f3f5272]
avi_server.site02_server_web24: Creating...
avi_server.site02_server_web25: Creating...
avi_server.site02_server_web23: Creating...
avi_pool.site01_pool-1: Creation complete after 0s [id=https://10.1.1.250/api/pool/pool-e3c37b13-0950-4320-a643-afa5d3177624]
avi_poolgroup.site02_pg-1: Creating...
avi_server.site01_server_web14: Creating...
avi_server.site01_server_web15: Creating...
avi_server.site01_server_web13: Creating...
avi_poolgroup.site02_pg-1: Creation complete after 1s [id=https://10.1.1.170/api/poolgroup/poolgroup-4197b0b4-d486-455e-8583-bff1fc173fb8]
avi_server.site02_server_web23: Creation complete after 1s [id=pool-9ca21978-59d5-455f-ba78-01fb9c747b43:192.168.202.30:80]
avi_poolgroup.site01_pg-1: Creating...
avi_server.site01_server_web11: Creating...
avi_server.site02_server_web21: Creation complete after 3s [id=pool-859248df-8ea6-4a00-a8ea-976cc31175a9:192.168.202.10:80]
avi_server.site01_server_web12: Creating...
avi_server.site02_server_web25: Creation complete after 1s [id=pool-9ca21978-59d5-455f-ba78-01fb9c747b43:192.168.202.50:80]
avi_virtualservice.gslb_site02_vs01: Creating...
avi_server.site01_server_web13: Creation complete after 1s [id=pool-47d64222-46b7-4402-ae38-afd47f3f5272:192.168.101.30:80]
avi_server.site02_server_web24: Creation complete after 1s [id=pool-9ca21978-59d5-455f-ba78-01fb9c747b43:192.168.202.40:80]
avi_server.site01_server_web14: Creation complete after 1s [id=pool-47d64222-46b7-4402-ae38-afd47f3f5272:192.168.101.40:80]
avi_sslkeyandcertificate.site01_cert1000: Creation complete after 3s [id=https://10.1.1.250/api/sslkeyandcertificate/sslkeyandcertificate-1963b9c2-7402-4d32-88f7-b8b57d7bf1e5]
avi_virtualservice.gslb_site02_vs01: Creation complete after 0s [id=https://10.1.1.170/api/virtualservice/virtualservice-310ba2ed-f48f-4a0d-a29e-71a2b9dd2567]
avi_poolgroup.site01_pg-1: Creation complete after 0s [id=https://10.1.1.250/api/poolgroup/poolgroup-21284b51-1f7d-41e3-83c3-078800fdea1d]
avi_virtualservice.gslb_site01_vs01: Creating...
avi_server.site01_server_web15: Creation complete after 2s [id=pool-47d64222-46b7-4402-ae38-afd47f3f5272:192.168.101.50:80]
avi_server.site01_server_web11: Creation complete after 1s [id=pool-e3c37b13-0950-4320-a643-afa5d3177624:192.168.101.10:80]
avi_server.site01_server_web12: Creation complete after 1s [id=pool-e3c37b13-0950-4320-a643-afa5d3177624:192.168.101.20:80]
avi_virtualservice.gslb_site01_vs01: Creation complete after 1s [id=https://10.1.1.250/api/virtualservice/virtualservice-fbecfed3-2397-4df8-9b76-659f50fcc5f8]
avi_pkiprofile.terraform_gslb_pki: Still creating... [10s elapsed]
avi_pkiprofile.terraform_gslb_pki: Creation complete after 11s [id=https://10.1.1.250/api/pkiprofile/pkiprofile-4333ded8-6ec5-43d0-a677-d68a632bc523]
avi_gslbservice.terraform_gslb-01: Creating...
avi_gslbservice.terraform_gslb-01: Creation complete after 2s [id=https://10.1.1.250/api/gslbservice/gslbservice-38f887ef-87ed-446d-a66f-83d42da39289]

Apply complete! Resources: 32 added, 0 changed, 0 destroyed.

This is the end of this blog. Thank you for reading!πŸ˜€

Integrate VMware NSX-T with Kubernetes

Kubernetes (K8s) is an open-source system for automating deployment, scaling, and management of containerized applications. K8s use network plugin to provide the required networking functions like routing, switching, firewall and load balancing. VMware NSX-T provides a network plugin called NCP for K8s as well. If you want to know more about VMware NSX-T, please go to docs.vmware.com.

In this blog, I will show you how to integrate VMWare NSX-T with Kubernetes.

Here, we will build a three nodes single master K8s cluster. All 3 nodes are RHEL 7.5 virtual machine.

  • master node:
    • Hostname: master.k8s
    • Mgmt IP: 10.1.73.233
  • worker node1:
    • Hostname: node1.k8s
    • Mgmt IP: 10.1.73.234
  • worker node2:
    • Hostname: node2.k8s
    • Mgmt IP: 10.1.73.235

On each node, there are 2 vNICs attached. The first vNIC is ens192 which is for management and the second vNIC is ens224, which is for K8s transport and connected to an overlay logical switch.

NSX-T version: 2.3.0.0.0.10085405;

NSX-T NCP version: 2.3.1.10693410

Docker version: 18.03.1-ce;

K8s version: 1.11.4

1. Prepare K8s Cluster Setup

1.1 Get Offline Packages and Docker Images

As there is no Internet access in my environment, I have to prepare my K8s cluster offline. To do that, I need to get the following packages:

  • Docker offline installation packages
  • Kubeadm offline installation packages which will be used to set up the K8s cluster;
  • Docker offline images;

1.1.1 Docker Offline Installation Packages

Regarding how to get Docker offline installation packages, please refer to my other blog: Install Docker Offline on Centos7.

1.1.2 Kubeadm Offline Installation Packages

Getting Kubeadm offline installation packages is quite straightforward as well. You can use Yum with downloadonly option.

yum install --downloadonly --downloaddir=/root/ kubelet-1.11.0
yum install --downloadonly --downloaddir=/root/ kubeadm-1.11.0
yum install --downloadonly --downloaddir=/root/ kubectl-1.11.0

1.1.3 Docker Offline Images

Below are the required Docker images for K8s cluster.

  • k8s.gcr.io/kube-proxy-amd64 v1.11.4
  • k8s.gcr.io/kube-apiserver-amd64 v1.11.4
  • k8s.gcr.io/kube-controller-manager-amd64 v1.11.4
  • k8s.gcr.io/kube-scheduler-amd64 v1.11.4
  • k8s.gcr.io/coredns 1.1.3
  • k8s.gcr.io/etcd-amd64 3.2.18
  • k8s.gcr.io/pause-amd64 3.1
  • k8s.gcr.io/pause 3.1

You possibly notice that the above includes two
identical pause images although these two have different repository names. There is a story around this. Initially, I only got the first image
“k8s.gcr.io/pause-amd64” loaded. The setup passed through “kubeadm init” pre-flight but failed at the real cluster setup stage. When I checked the log, I found out that the cluster set up process kept requesting the second image. I guess it is a bug with kubeadm v1.11.0 which I am using.

I put an example here to show how to use “docker pull” CLI to download a docker image in case you don’t know how to do it.

docker pull k8s.gcr.io/kube-proxy-amd64:v1.11.4ο»Ώ

Once you have all Docker images, you need to export these Docker images as offline images via “docker save”.

docker save k8s.gcr.io/pause-amd64:3.1 -o /pause-amd64:3.1.docker

Now it is time to upload all your installation packages and offline images to all your K8s 3 nodes including master node.

1.2 Disable SELinux and Firewalld

# disable SELinux
setenforce 0
# Change SELINUX to permissive for /etc/selinux/config
vi /etc/selinux/config
SELINUX=permissive
# Stop and disable firewalld
systemctl disable firewalld && systemctl stop firewalld

1.3 Config DNS Resolution

# Update the /etc/hosts file as below on all three K8s nodes
10.1.73.233   master.k8s
10.1.73.234   node1.k8s
10.1.73.235   node2.k8s

1.4 Install Docker and Kubeadm

To install Docker and Kubeadm, first you put all required packages for Docker or kubeadm into a different directory. For example, all required packages for kubeadm are put into a directory called kubeadm. Then use rpm to install kubeadm as below:

[root@master kubeadm]# rpm -ivh --replacefiles --replacepkgs *.rpm
warning: 53edc739a0e51a4c17794de26b13ee5df939bd3161b37f503fe2af8980b41a89-cri-tools-1.12.0-0.x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID 3e1ba8d5: NOKEY
warning: socat-1.7.3.2-2.el7.x86_64.rpm: Header V3 RSA/SHA256 Signature, key ID f4a80eb5: NOKEY
Preparing...                          ########################## [100%]
Updating / installing...
   1:socat-1.7.3.2-2.el7              ########################## [ 17%]
   2:kubernetes-cni-0.6.0-0           ########################## [ 33%]
   3:kubelet-1.11.0-0                 ########################## [ 50%]
   4:kubectl-1.11.0-0                 ########################## [ 67%]
   5:cri-tools-1.12.0-0               ########################## [ 83%]
   6:kubeadm-1.11.0-0                 #########################3 [100%]

After Docker and Kubeadm are installed, you can go to enable and start docker and kubelet service:

systemctl enable docker && systemctl start docker
systemctl enable kubelet && systemctl start kubelet

In addition, you need to perform some OS level setup so that your K8s environment can run properly.

# ENABLING THE NET.BRIDGE.BRIDGE-NF-CALL-IPTABLES KERNEL OPTION
sysctl -w net.bridge.bridge-nf-call-iptables=1
echo "net.bridge.bridge-nf-call-iptables=1" > /etc/sysctl.d/k8s.conf
# Disable Swap
swapoff -a && sed -i '/ swap / s/^/#/' /etc/fstab

1.5 Load Docker Offline Images

Now let us load all offline docker images into your local Docker repo on all K8s node via CLI “docker load”.

docker load -i kube-apiserver-amd64:v1.11.4.docker
docker load -i coredns:1.1.3.docker
docker load -i etcd-amd64:3.2.18.docker
docker load -i kube-apiserver-amd64:v1.11.4.docker
docker load -i kube-controller-manager-amd64:v1.11.4.docker
docker load -i kube-proxy-amd64:v1.11.4.docker
docker load -i kube-scheduler-amd64:v1.11.4.docker
docker load -i pause-amd64:3.1.docker
docker load -i pause:3.1.docker

1.6 NSX NCP Plugin

Now you can upload your NSX NCP plugin to all 3 nodes and load the NCP images into local Docker repo.

1.6.1 Load NSX Container Image

docker load -i nsx-ncp-rhel-2.3.1.10693410.tar 

Now the docker image list on your K8s nodes will be similar to below:

[root@master ~]# docker image list
REPOSITORY                                   TAG                 IMAGE ID            CREATED             SIZE
registry.local/2.3.1.10693410/nsx-ncp-rhel   latest              97d54d5c80db        5 months ago        701MB
k8s.gcr.io/kube-proxy-amd64                  v1.11.4             5071d096cfcd        5 months ago        98.2MB
k8s.gcr.io/kube-apiserver-amd64              v1.11.4             de6de495c1f4        5 months ago        187MB
k8s.gcr.io/kube-controller-manager-amd64     v1.11.4             dc1d57df5ac0        5 months ago        155MB
k8s.gcr.io/kube-scheduler-amd64              v1.11.4             569cb58b9c03        5 months ago        56.8MB
k8s.gcr.io/coredns                           1.1.3               b3b94275d97c        11 months ago       45.6MB
k8s.gcr.io/etcd-amd64                        3.2.18              b8df3b177be2        12 months ago       219MB
k8s.gcr.io/pause-amd64                       3.1                 da86e6ba6ca1        16 months ago       742kB
k8s.gcr.io/pause                             3.1                 da86e6ba6ca1        16 months ago       742kB

1.6.2 Install NSX CNI

rpm -ivh --replacefiles nsx-cni-2.3.1.10693410-1.x86_64.rpm

Please note replacefiles option is required as a known bug with NSX-T 2.3. If you don’t include the replacefiles option, you will see an error like below:

[root@master rhel_x86_64]# rpm -i nsx-cni-2.3.1.10693410-1.x86_64.rpm
   file /opt/cni/bin/loopback from install of nsx-cni-2.3.1.10693410-1.x86_64 conflicts with file from package kubernetes-cni-0.6.0-0.x86_64

1.6.3 Install and Config OVS

# Go to OpenvSwitch directory
rpm -ivh openvswitch-2.9.1.9968033.rhel75-1.x86_64.rpm
systemctl start openvswitch.service && systemctl enable openvswitch.service
ovs-vsctl add-br br-int
ovs-vsctl add-port br-int ens224 -- set Interface ens224 ofport_request=1
ip link set br-int up
ip link set ens224 up

2. Setup K8s Cluster

Now you are ready to set up your K8s cluster. I will use kubeadm config file to define my K8s cluster when I initiate the K8s cluster setup. Below is the content of my kubeadm config file.

apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
kubernetesVersion: v1.11.4
api:
  advertiseAddress: 10.1.73.233
  bindPort: 6443

From the above, you can see that Kubernetes version v1.11.4 will be used and the API server IP is 10.1.73.233, which is the master node IP. Run the following CLI from K8s master node to create the K8s cluster.

kubeadm init --config kubeadm.yml

After the K8s cluster is set up, you can join the resting two worker nodes into the cluster via CLI below:

kubeadm join 10.1.73.233:6443 --token up1nz9.iatqv50bkrqf0rcj --discovery-token-ca-cert-hash sha256:3f9e96e70a59f1979429435caa35d12270d60a7ca9f0a8436dff455e4b8ac1da

Note: You can get the required token and discovery-token-ca-cert-hash from the output of “kubeadm init”.

3. NSX-T and K8s Integration

3.1 Prepare NSX Resource

Before the integration, you have to make sure that you have NSX-T resources configured in NSX manager. The required resource includes:

  • Overlay Transport Zone: overlay_tz
  • Tier 0 router: tier0_router
  • K8s Transport Logical Switch
  • IP Blocks for Kubernetes Pods: container_ip_blocks
  • IP Pool for SNAT: external_ip_pools
  • Firewall Marker Sections: top_firewall_section_marker and bottom_firewall_section_marker

Please refer the NSX Container Plug-in for Kubernetes and Cloud Foundry – Installation and Administration Guide to further check how to create the NSX-T resource. The following are the UUID for all created resources:

  • tier0_router = c86a625e-54e0-4510-9185-e9e1b7e26eb9
  • overlay_tz = f6d90300-c56e-4d26-8684-8eff64cdf5a0
  • container_ip_blocks = f9e411f5-654e-4f0d-99e8-2e5a9812f295
  • external_ip_pools = 84ffd635-640f-41c6-be85-71337e112e69
  • top_firewall_section_marker = ab07e559-79aa-4bc9-a6f0-126ea59278c2
  • bottom_firewall_section_marker = 35aaa6c5-0870-4ac4-bf47-114780863956

In addition, make sure that you tagged switching ports which three k8s nodes are attached to in the following ways:

{'ncp/node_name': '<node_name>'}
{'ncp/cluster': '<cluster_name>'}

node_name is the FQDN hostname of the K8s node and the cluster_name is what you call this cluster in NSX not in K8s cluster context. I show you here my K8s nodes’ tags.

k8s master switching port tags
k8s node1 swicthing port tags

k8s node2 swicthing port tags

3.2 Install NSX NCP Plugin

3.2.1 Create Name Space

kubectl create ns nsx-system

3.2.2 Create Service Account for NCP

kubectl apply -f rbac-ncp.yml -n nsx-system

3.2.3 Create NCP ReplicationController

kubectl apply -f ncp-rc.yml -n nsx-system

3.2.4 Create NCP nsx-node-agent and nsx-kube-proxy DaemonSet

kubectl create -f nsx-node-agent-ds.yml -n nsx-system 

You can find the above 3 yaml files in Github
https://github.com/insidepacket/nsxt-k8s-integration-yaml

Now you have completed the NSX-T and K8s integration. If you check the pods running on your K8s cluster, you will see the similar as below:

[root@master ~]# k get pods --all-namespaces 
NAMESPACE     NAME                                   READY     STATUS    RESTARTS   AGE
kube-system   coredns-78fcdf6894-pg4dz               1/1       Running   0          9d
kube-system   coredns-78fcdf6894-q727q               1/1       Running   128        9d
kube-system   etcd-master.k8s                        1/1       Running   3          14d
kube-system   kube-apiserver-master.k8s              1/1       Running   2          14d
kube-system   kube-controller-manager-master.k8s     1/1       Running   3          14d
kube-system   kube-proxy-5p482                       1/1       Running   2          14d
kube-system   kube-proxy-9mnwk                       1/1       Running   0          12d
kube-system   kube-proxy-wj8qw                       1/1       Running   3          14d
kube-system   kube-scheduler-master.k8s              1/1       Running   3          14d
ns-test1000   http-echo-deployment-b5bbfbb86-j4dxq   1/1       Running   0          2d
nsx-system    nsx-ncp-rr989                          1/1       Running   0          11d
nsx-system    nsx-node-agent-kbsld                   2/2       Running   0          9d
nsx-system    nsx-node-agent-pwhlp                   2/2       Running   0          9d
nsx-system    nsx-node-agent-vnd7m                   2/2       Running   0          9d
nszhang       busybox-756b4db447-2b9kx               1/1       Running   0          5d
nszhang       busybox-deployment-5c74f6dd48-n7tp2    1/1       Running   0          9d
nszhang       http-echo-deployment-b5bbfbb86-xnjz6   1/1       Running   0          2d
nszhang       jenkins-deployment-8546d898cd-zdzs2    1/1       Running   0          11d
nszhang       whoami-deployment-85b65d8757-6m7kt     1/1       Running   0          6d
nszhang       whoami-deployment-85b65d8757-b4m99     1/1       Running   0          6d
nszhang       whoami-deployment-85b65d8757-pwwt9     1/1       Running   0          6d

In NSX-T manager GUI, you will see the following resources are created for K8s cluster.

Logical Switches for K8s
Tier1 Router for K8s
NSX LB for K8s

Tips:

I have met a few issues during my journey. The following CLIs are used a lot when I troubleshoot. I shared these CLI here and hope can help you a bit as well.

  • How to check kubelet service’s log
journalctl -xeu kubelet
  • How to check log for a specific pod
kubectl logs nsx-ncp-rr989 -n nsx-system

“nsx-ncp-rr989” is the name of pod and “nsx-system” is the namespace which we created for NCP.

  • How to check log for a specific container when there are more than 1 container in the pod
kubectl logs nsx-node-agent-n7n7g -c nsx-node-agent -n nsx-system

“nsx-node-agent-n7n7g” is the pod name and “nsx-node-agent” is the container name.

  • Show details of a specific pod
kubectl describe pod nsx-ncp-rr989 -n nsx-system

Automate NSX-T Build with Terraform

Terraform is a widely adopted Infrastructure as Code tool that allow you to define your infrastructure using a simple, declarative programming language, and to deploy and manage infrastructure across public cloud providers including AWS, Azure, Google Cloud & IBM Cloud and other infrastructure providers like VMware NSX-T, F5 Big-IP etc.

In this blog, I will show you how to leverage Terraform NSX-T provider to define a NSX-T tenant environment in minutes.

To build the new NSX-T environment, I am going to:

  1. Create a new Tier1 router named tier1_router;
  2. Create three logical switches under newly created Tier1 router for web/app/db security zone;
  3. Connect the newly created Tier1 router to the existing Tier0 router;
  4. Create a new network service group including SSH and HTTPs;
  5. Create a new firewall section and add a firewall rule to allow outbound SSH/HTTPs traffic from any workload in web logical switch to any workload in app logical switch;

Firstly, I define a Terraform module as below. Note: Terraform module is normally used to define reusable components. For example, the module which I defined here can be re-used to complete non-prod and prod environment build when you provide different input.

/*
provider "nsxt" {
  allow_unverified_ssl = true
  max_retries = 10
  retry_min_delay = 500
  retry_max_delay = 5000
  retry_on_status_codes = [429]
}
*/

data "nsxt_transport_zone" "overlay_transport_zone" {
  display_name = "tz-overlay"
}

data "nsxt_logical_tier0_router" "tier0_router" {
  display_name = "t0"
}

data "nsxt_edge_cluster" "edge_cluster" {
  display_name = "edge-cluster"
}

resource "nsxt_logical_router_link_port_on_tier0" "tier0_port_to_tier1" {
  description = "TIER0_PORT1 provisioned by Terraform"
  display_name = "tier0_port_to_tier1"
  logical_router_id = "${data.nsxt_logical_tier0_router.tier0_router.id}"
  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_logical_tier1_router" "tier1_router" {
  description = "RTR1 provisioned by Terraform"
  display_name = "${var.nsxt_logical_tier1_router_name}"
  #failover_mode = "PREEMPTIVE"
  edge_cluster_id = "${data.nsxt_edge_cluster.edge_cluster.id}"
  enable_router_advertisement = true
  advertise_connected_routes = false
  advertise_static_routes = true
  advertise_nat_routes = true
  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_logical_router_link_port_on_tier1" "tier1_port_to_tier0" {
  description  = "TIER1_PORT1 provisioned by Terraform"
  display_name = "tier1_port_to_tier0"
  logical_router_id = "${nsxt_logical_tier1_router.tier1_router.id}"
  linked_logical_router_port_id = "${nsxt_logical_router_link_port_on_tier0.tier0_port_to_tier1.id}"
  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_logical_switch" "LS-terraform-web" {
  admin_state = "UP"
  description = "LogicalSwitch provisioned by Terraform"
  display_name = "${var.logicalswitch1_name}"
  transport_zone_id = "${data.nsxt_transport_zone.overlay_transport_zone.id}"
  replication_mode  = "MTEP"
  tag {
    scope = "ibm"
    tag = "blue"
  }
}

resource "nsxt_logical_switch" "LS-terraform-app" {
  admin_state = "UP"
  description = "LogicalSwitch provisioned by Terraform"
  display_name = "${var.logicalswitch2_name}"
  transport_zone_id = "${data.nsxt_transport_zone.overlay_transport_zone.id}"
  replication_mode  = "MTEP"
  tag {
    scope = "ibm"
    tag = "blue"
  }
}


resource "nsxt_logical_switch" "LS-terraform-db" {
  admin_state = "UP"
  description = "LogicalSwitch provisioned by Terraform"
  display_name = "${var.logicalswitch3_name}"
  transport_zone_id = "${data.nsxt_transport_zone.overlay_transport_zone.id}"
  replication_mode  = "MTEP"
  tag {
    scope = "ibm"
    tag = "blue"
  }
}

resource "nsxt_logical_port" "lp-terraform-web" {
  admin_state = "UP"
  description = "lp provisioned by Terraform"
  display_name = "lp-terraform-web"
  logical_switch_id = "${nsxt_logical_switch.LS-terraform-web.id}"

  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_logical_port" "lp-terraform-app" {
  admin_state = "UP"
  description = "lp provisioned by Terraform"
  display_name = "lp-terraform-app"
  logical_switch_id = "${nsxt_logical_switch.LS-terraform-app.id}"

  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_logical_port" "lp-terraform-db" {
  admin_state = "UP"
  description = "lp provisioned by Terraform"
  display_name = "lp-terraform-db"
  logical_switch_id = "${nsxt_logical_switch.LS-terraform-db.id}"

  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_logical_router_downlink_port" "lif-terraform-web" {
  description = "lif provisioned by Terraform"
  display_name = "lif-terraform-web"
  logical_router_id = "${nsxt_logical_tier1_router.tier1_router.id}"
  linked_logical_switch_port_id = "${nsxt_logical_port.lp-terraform-web.id}"
  ip_address = "${var.logicalswitch1_gw}"

  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_logical_router_downlink_port" "lif-terraform-app" {
  description = "lif provisioned by Terraform"
  display_name = "lif-terraform-app"
  logical_router_id = "${nsxt_logical_tier1_router.tier1_router.id}"
  linked_logical_switch_port_id = "${nsxt_logical_port.lp-terraform-app.id}"
  ip_address = "${var.logicalswitch2_gw}"

  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_logical_router_downlink_port" "lif-terraform-db" {
  description = "lif provisioned by Terraform"
  display_name = "lif-terraform-db"
  logical_router_id = "${nsxt_logical_tier1_router.tier1_router.id}"
  linked_logical_switch_port_id = "${nsxt_logical_port.lp-terraform-db.id}"
  ip_address = "${var.logicalswitch3_gw}"

  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_l4_port_set_ns_service" "ns_service_tcp_443_22_l4" {
  description = "Service provisioned by Terraform"
  display_name = "web_to_app"
  protocol = "TCP"
  destination_ports = ["443", "22"]
  tag {
    scope = "ibm"
    tag   = "blue"
  }
}

resource "nsxt_firewall_section" "terraform" {
  description = "FS provisioned by Terraform"
  display_name = "Web-App"
  tag {
    scope = "ibm"
    tag = "blue"
  }
  
  applied_to {
    target_type = "LogicalSwitch"
    target_id = "${nsxt_logical_switch.LS-terraform-web.id}"
  }

  section_type = "LAYER3"
  stateful = true

  rule {
    display_name = "out_rule"
    description  = "Out going rule"
    action = "ALLOW"
    logged = true
    ip_protocol = "IPV4"
    direction = "OUT"

    source {
      target_type = "LogicalSwitch"
      target_id = "${nsxt_logical_switch.LS-terraform-web.id}"
    }

    destination {
      target_type = "LogicalSwitch"
      target_id = "${nsxt_logical_switch.LS-terraform-app.id}"
    }
    service {
      target_type = "NSService"
      target_id = "${nsxt_l4_port_set_ns_service.ns_service_tcp_443_22_l4.id}"
    }
    applied_to {
      target_type = "LogicalSwitch"
      target_id = "${nsxt_logical_switch.LS-terraform-web.id}"
    }
  }
}  

output "edge-cluster-id" {
  value = "${data.nsxt_edge_cluster.edge_cluster.id}"
}

output "edge-cluster-deployment_type" {
  value = "${data.nsxt_edge_cluster.edge_cluster.deployment_type}"
}

output "tier0-router-port-id" {
  value = "${nsxt_logical_router_link_port_on_tier0.tier0_port_to_tier1.id}"
}

Then I use the below to call this newly created module:

provider "nsxt" {
  allow_unverified_ssl = true
  max_retries = 10
  retry_min_delay = 500
  retry_max_delay = 5000
  retry_on_status_codes = [429]
}

module "nsxtbuild" {
  source = "/root/terraform/modules/nsxtbuild"
  nsxt_logical_tier1_router_name = "tier1-npr-vr"
  logicalswitch1_name = "npr-web"
  logicalswitch2_name = "npr-app"
  logicalswitch3_name = "npr-db"
  logicalswitch1_gw = "192.168.80.1/24"
  logicalswitch2_gw = "192.168.81.1/24"
  logicalswitch3_gw = "192.168.82.1/24"
}

After “terraform apply”, you can find the required environment is built successfully in NSX Manager.

Logical Switches
T1 vRouter
Service
DFW Rules