brunch

You can make anything
by writing

C.S.Lewis

by Master Seo May 28. 2024

NCP 20탄-7. 테라폼2-2024

<25> 테라폼 주석 알아보자

<26>  ncloud  CLI 

<27>  ncloud  소스

<28> 테라폼 네트워크에 NAT 추가

<29> 테라폼으로 네트워크 + 쿠버네티스 구축

<30> 쿠버네티스 사용하기

<31> 테라폼으로 VPC,  쿠버네티스 생성하기 테라폼 파일

<32> NATGW 라우팅 테이블 별도로 만들기 (선택)

<33> 삭제

<34> 네트워크 질문답변



목표

테라폼으로 네이버 네트워크와 쿠버네티스를 생성해보자.NAT 포함



목적 네트워크









<25> 테라폼 주석 알아보자


#


//



/*


*/



cd /root/terraform-provider-ncloud-main/examples/nks



1

vi nat.tf


resource "ncloud_subnet" "subnet_scn_02_public_natgw" {

#  vpc_no         = ncloud_vpc.vpc_scn_02.id

   vpc_no         = ncloud_vpc.vpc.id

#  subnet         = cidrsubnet(ncloud_vpc.vpc_scn_02.ipv4_cidr_block, 8, 2)

  subnet         = "10.0.2.0/24"

 // "10.0.2.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

#  network_acl_no = ncloud_network_acl.network_acl_02_public.id

  subnet_type    = "PUBLIC"

  usage_type     = "NATGW"

}

# NAT Gateway

resource "ncloud_nat_gateway" "nat_gateway_scn_02" {

#  vpc_no    = ncloud_vpc.vpc_scn_02.id

   vpc_no         = ncloud_vpc.vpc.id

  subnet_no = ncloud_subnet.subnet_scn_02_public_natgw.id

  zone      = "KR-1"

  name      = var.name_scn02

  #name      = "nat_gateway_scn_02"

}

# Route Table

resource "ncloud_route" "route_scn_02_nat" {

  route_table_no         = ncloud_vpc.vpc.default_private_route_table_no

  destination_cidr_block = "0.0.0.0/0"

  target_type            = "NATGW"

  // NATGW (NAT Gateway) | VPCPEERING (VPC Peering) | VGW (Virtual Private Gateway).

  target_name            = ncloud_nat_gateway.nat_gateway_scn_02.name

  target_no              = ncloud_nat_gateway.nat_gateway_scn_02.id

}




[root@seo1 nks]# more variables.tf

variable name {

  default = "tf-nks"

}

variable name_scn02 {

  default = "nat1"

}

variable nks_version {

  default = "1.27"



2



[root@command1 nks]# terraform plan

│ Error: Reference to undeclared input variable

│   on nat.tf line 37, in resource "ncloud_nat_gateway" "nat_gateway_scn_02":

│   37:   name      = var.name_scn02

│ An input variable with the name "name_scn02" has not been declared. This variable can be declared with a variable "name_scn02" {}

│ block.

[root@command1 nks]#




vi variables.tf



variable name_scn02 {

  default = "tf-scn02"

}




3

terraform apply -auto-approve





<26>  ncloud  CLI 



1

ncloud CLI 다운로드


wget https://www.ncloud.com/api/support/download/files/cli/CLI_1.1.19_20240321.zip


unzip CLI_1.1.19_20240321.zip

cd CLI_1.1.19_20240321/

cd cli_linux/

cp ncloud /usr/bin

ncloud help



최신본 다운로드

https://cli.ncloud-docs.com/docs/guide-clichange




2

ncloud configure

set [DEFAULT]'s configuration.

Ncloud Access Key ID []: EQCygJㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌ

Ncloud Secret Access Key []: 3z0xYㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌ

Ncloud API URL (default:https://ncloud.apigw.ntruss.com) []:



3

ncloud server getRegionList




<27>  ncloud  소스



1



cd

wget  https://github.com/NaverCloudPlatform/terraform-provider-ncloud/archive/refs/heads/master.zip

unzip master.zip



cd /root/terraform-provider-ncloud-main/examples/nks



참고 사이트

https://github.com/NaverCloudPlatform/terraform-provider-ncloud/



2


vi variables.tf



[root@cl4-na-dev-command1 nks]# vi variables.tf

variable name {

  default = "tf-nks"

}

variable nks_version {

  default = "1.27"

}

variable client_ip {

  default = "2.2.2.2"

}

variable access_key {

  default = "B6wVLur"

}

variable secret_key {

  default = "CvT89R"

}

variable login_key {

  default = "kk1"

}

~

~

~




3

로키 리눅스에 테라폼 설치


sudo yum install -y yum-utils shadow-utils

sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo

sudo yum -y install terraform

terraform -version




terraform init

terraform plan



[root@new2222222 nks]# terraform plan

  + create

Terraform will perform the following actions:

  # ncloud_login_key.loginkey will be created

  + resource "ncloud_login_key" "loginkey" {

      + fingerprint = (known after apply)

      + id          = (known after apply)

      + key_name    = "aaa"

      + private_key = (sensitive value)

    }

  # ncloud_nks_cluster.cluster will be created

  + resource "ncloud_nks_cluster" "cluster" {

      + acg_no                = (known after apply)

      + cluster_type          = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002"

      + endpoint              = (known after apply)

      + hypervisor_code       = (known after apply)

      + id                    = (known after apply)

      + ip_acl                = []

      + ip_acl_default_action = (known after apply)

      + k8s_version           = "1.27.9-nks.1"

      + kube_network_plugin   = "cilium"

      + lb_private_subnet_no  = (known after apply)

      + login_key_name        = "aaa"

      + name                  = "sample-cluster"

      + public_network        = (known after apply)

      + subnet_no_list        = (known after apply)

      + uuid                  = (known after apply)

      + vpc_no                = (known after apply)

      + zone                  = "KR-1"

      + log {

          + audit = true

        }

    }

  # ncloud_nks_node_pool.node_pool will be created

  + resource "ncloud_nks_node_pool" "node_pool" {

      + cluster_uuid   = (known after apply)

      + id             = (known after apply)

      + instance_no    = (known after apply)

      + k8s_version    = (known after apply)

      + label          = [

          + {

              + key   = "foo"

              + value = "bar"

            },

        ]

      + node_count     = 1

      + node_pool_name = "pool1"

      + nodes          = (known after apply)

      + product_code   = "SVR.VSVR.STAND.C002.M008.NET.SSD.B050.G002"

      + software_code  = "SW.VSVR.OS.LNX64.UBNTU.SVR2004.WRKND.B050"

      + storage_size   = (known after apply)

      + subnet_no_list = (known after apply)

      + taint          = [

          + {

              + effect = "NoExecute"

              + key    = "foo"

              + value  = "bar"

            },

        ]

      + autoscale {

          + enabled = true

          + max     = 2

          + min     = 1

        }

    }

  # ncloud_subnet.lb_subnet will be created

  + resource "ncloud_subnet" "lb_subnet" {

      + id             = (known after apply)

      + name           = "lb-subnet"

      + network_acl_no = (known after apply)

      + subnet         = "10.0.100.0/24"

      + subnet_no      = (known after apply)

      + subnet_type    = "PRIVATE"

      + usage_type     = "LOADB"

      + vpc_no         = (known after apply)

      + zone           = "KR-1"

    }

  # ncloud_subnet.node_subnet will be created

  + resource "ncloud_subnet" "node_subnet" {

      + id             = (known after apply)

      + name           = "node-subnet"

      + network_acl_no = (known after apply)

      + subnet         = "10.0.1.0/24"

      + subnet_no      = (known after apply)

      + subnet_type    = "PRIVATE"

      + usage_type     = "GEN"

      + vpc_no         = (known after apply)

      + zone           = "KR-1"

    }

  # ncloud_vpc.vpc will be created

  + resource "ncloud_vpc" "vpc" {

      + default_access_control_group_no = (known after apply)

      + default_network_acl_no          = (known after apply)

      + default_private_route_table_no  = (known after apply)

      + default_public_route_table_no   = (known after apply)

      + id                              = (known after apply)

      + ipv4_cidr_block                 = "10.0.0.0/16"

      + name                            = "vpc"

      + vpc_no                          = (known after apply)

    }

Plan: 6 to add, 0 to change, 0 to destroy.




4


terraform apply -auto-approve



│ Error: Status: 400 Bad Request, Body: {"error":{"errorCode":400,"message":"Bad Request","details":"Require lbPublicSubnetNo"},"timestamp":"2024-05-30T20:22:08.172Z"}

│   with ncloud_nks_cluster.cluster,

│   on main.tf line 48, in resource "ncloud_nks_cluster" "cluster":

│   48: resource "ncloud_nks_cluster" "cluster" {



[root@new2222222 nks]# more lb-pub2.tf

resource "ncloud_subnet" "lb_subnet2" {

  vpc_no         = ncloud_vpc.vpc.id

  subnet         = "10.0.102.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

  subnet_type    = "PUBLIC"

  name           = "lb-subnet2"

  usage_type     = "LOADB"

}




vi main.tf

추가

  lb_public_subnet_no        = ncloud_subnet.lb_subnet2.id




│ Error: Create Vpc Instance, err params={0xc000654618 0xc0001a7838 0xc0001a7868}

│   with ncloud_vpc.vpc,

│   on main.tf line 11, in resource "ncloud_vpc" "vpc":

│   11: resource "ncloud_vpc" "vpc" {

│ Status: 400 Bad Request, Body: {

│   "responseError": {

│     "returnCode": "1000037",

│     "returnMessage": "Cannot create with duplicate VPC name."

│   }

│ }


콘솔에서 기존 VPC 삭제





│ Error: Error Creating LoginKey

│   with ncloud_login_key.loginkey,

│   on main.tf line 44, in resource "ncloud_login_key" "loginkey":

│   44: resource "ncloud_login_key" "loginkey" {

│ Status: 500 Internal Server Error, Body: {

│   "responseError": {

│     "returnCode": "140002",

│     "returnMessage": "LOGIN KEY with the same name already exists."

│   }

│ }


변수 파일에서 login key 변경

[root@new2222222 nks]# vi variables.tf




terraform apply -auto-approve



NKS 생성까지 35분



NKS 상세 정보



노드풀



노드



5

쿠버네티스를 위한 테라폼 코드 보기


root@command2:~/terraform-provider-ncloud-main/examples/nks# la

main.tf  .terraform  .terraform.lock.hcl  terraform.tfstate  terraform.tfstate.backup  variables.tf  versions.tf



more *.tf

::::::::::::::

main.tf

::::::::::::::

# VPC > User scenario > Scenario 1. Single Public Subnet

# https://docs.ncloud.com/ko/networking/vpc/vpc_userscenario1.html

provider "ncloud" {

  support_vpc = true

  region      = "KR"

  access_key  = var.access_key

  secret_key  = var.secret_key

}

resource "ncloud_vpc" "vpc" {

  name            = "vpc"

  ipv4_cidr_block = "10.0.0.0/16"

}

resource "ncloud_subnet" "node_subnet" {

  vpc_no         = ncloud_vpc.vpc.id

  subnet         = "10.0.1.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

  subnet_type    = "PRIVATE"

  name           = "node-subnet"

  usage_type     = "GEN"

}

resource "ncloud_subnet" "lb_subnet" {

  vpc_no         = ncloud_vpc.vpc.id

  subnet         = "10.0.100.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

  subnet_type    = "PRIVATE"

  name           = "lb-subnet"

  usage_type     = "LOADB"

}

resource "ncloud_subnet" "lb_subnet2" {

  vpc_no         = ncloud_vpc.vpc.id

  subnet         = "10.0.102.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

  subnet_type    = "PUBLIC"

  name           = "lb-subnet2"

  usage_type     = "LOADB"

}

data "ncloud_nks_versions" "version" {

  filter {

    name = "value"

    values = [var.nks_version]

    regex = true

  }

}

resource "ncloud_login_key" "loginkey" {

  key_name = var.login_key

}

resource "ncloud_nks_cluster" "cluster" {

  cluster_type                = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002"

  k8s_version                 = data.ncloud_nks_versions.version.versions.0.value

  login_key_name              = ncloud_login_key.loginkey.key_name

  name                        = "sample-cluster"

  lb_private_subnet_no        = ncloud_subnet.lb_subnet.id

  lb_public_subnet_no        = ncloud_subnet.lb_subnet2.id

  kube_network_plugin         = "cilium"

  subnet_no_list              = [ ncloud_subnet.node_subnet.id ]

  vpc_no                      = ncloud_vpc.vpc.id

  zone                        = "KR-1"

  log {

    audit = true

  }

}

data "ncloud_nks_server_images" "image"{

  hypervisor_code = "XEN"

  filter {

    name = "label"

    values = ["ubuntu-20.04"]

    regex = true

  }

}

data "ncloud_nks_server_products" "nks_products"{

  software_code = data.ncloud_nks_server_images.image.images[0].value

  zone = "KR-1"

  filter {

    name = "product_type"

    values = [ "STAND"]

  }

  filter {

    name = "cpu_count"

    values = [ "2"]

  }

  filter {

    name = "memory_size"

    values = [ "8GB" ]

  }

}

resource "ncloud_nks_node_pool" "node_pool" {

  cluster_uuid = ncloud_nks_cluster.cluster.uuid

  node_pool_name = "pool1"

  node_count     = 1

  software_code  = data.ncloud_nks_server_images.image.images[0].value

  product_code   = data.ncloud_nks_server_products.nks_products.products[0].value

  subnet_no_list = [ncloud_subnet.node_subnet.id]

  autoscale {

    enabled = true

    min = 1

    max = 2

  }

  label {

    key = "foo"

    value = "bar"

  }

  taint {

    key = "foo"

    value = "bar"

    effect = "NoExecute"

  }

}

::::::::::::::

variables.tf

::::::::::::::

variable name {

  default = "tf-nks"

}

variable nks_version {

  default = "1.27"

}

variable client_ip {

  default = "213.0.13.2"

}

variable access_key {

  default = "sHiw6j72BVa"

}

variable secret_key {

  default = "Vpy89cpj9lnQPel"

}

variable login_key {

  default = "agame-k8s12"

}

::::::::::::::

versions.tf

::::::::::::::

terraform {

  required_providers {

    ncloud = {

      source = "navercloudplatform/ncloud"

    }

  }

  required_version = ">= 0.13"

}



파일 첨부




삭제


terraform destroy --auto-approve





<28> 테라폼 네트워크에 NAT 추가



아래 네트워크를 구축해보자.

NAT 추가!!!




root@command2:~/terraform-provider-ncloud-main/examples/nks# ls

backup  main.tf  nat.tf  pub1.tf  terraform.tfstate  terraform.tfstate.backup  variables.tf  versions.tf



root@command2:~/terraform-provider-ncloud-main/examples/nks# more *.tf

::::::::::::::

main.tf

::::::::::::::

# VPC > User scenario > Scenario 1. Single Public Subnet

# https://docs.ncloud.com/ko/networking/vpc/vpc_userscenario1.html

provider "ncloud" {

  support_vpc = true

  region      = "KR"

  access_key  = var.access_key

  secret_key  = var.secret_key

}

resource "ncloud_vpc" "vpc" {

  name            = "vpc"

  ipv4_cidr_block = "10.0.0.0/16"

}

resource "ncloud_subnet" "node_subnet" {

  vpc_no         = ncloud_vpc.vpc.id

  subnet         = "10.0.1.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

  subnet_type    = "PRIVATE"

  name           = "node-subnet"

  usage_type     = "GEN"

}

resource "ncloud_subnet" "lb_subnet" {

  vpc_no         = ncloud_vpc.vpc.id

  subnet         = "10.0.100.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

  subnet_type    = "PRIVATE"

  name           = "lb-subnet"

  usage_type     = "LOADB"

}

resource "ncloud_subnet" "lb_subnet2" {

  vpc_no         = ncloud_vpc.vpc.id

  subnet         = "10.0.102.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

  subnet_type    = "PUBLIC"

  name           = "lb-subnet2"

  usage_type     = "LOADB"

}

/*

data "ncloud_nks_versions" "version" {

  filter {

    name = "value"

    values = [var.nks_version]

    regex = true

  }

}

resource "ncloud_login_key" "loginkey" {

  key_name = var.login_key

}

*/

/*

resource "ncloud_nks_cluster" "cluster" {

  cluster_type                = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002"

  k8s_version                 = data.ncloud_nks_versions.version.versions.0.value

  login_key_name              = ncloud_login_key.loginkey.key_name

  name                        = "sample-cluster"

  lb_private_subnet_no        = ncloud_subnet.lb_subnet.id

  lb_public_subnet_no        = ncloud_subnet.lb_subnet2.id

  kube_network_plugin         = "cilium"

  subnet_no_list              = [ ncloud_subnet.node_subnet.id ]

  vpc_no                      = ncloud_vpc.vpc.id

  zone                        = "KR-1"

  log {

    audit = true

  }

}

data "ncloud_nks_server_images" "image"{

  hypervisor_code = "XEN"

  filter {

    name = "label"

    values = ["ubuntu-20.04"]

    regex = true

  }

}

data "ncloud_nks_server_products" "nks_products"{

  software_code = data.ncloud_nks_server_images.image.images[0].value

  zone = "KR-1"

  filter {

    name = "product_type"

    values = [ "STAND"]

  }

  filter {

    name = "cpu_count"

    values = [ "2"]

  }

  filter {

    name = "memory_size"

    values = [ "8GB" ]

  }

}

resource "ncloud_nks_node_pool" "node_pool" {

  cluster_uuid = ncloud_nks_cluster.cluster.uuid

  node_pool_name = "pool1"

  node_count     = 1

  software_code  = data.ncloud_nks_server_images.image.images[0].value

  product_code   = data.ncloud_nks_server_products.nks_products.products[0].value

  subnet_no_list = [ncloud_subnet.node_subnet.id]

  autoscale {

    enabled = true

    min = 1

    max = 2

  }

  label {

    key = "foo"

    value = "bar"

  }

  taint {

    key = "foo"

    value = "bar"

    effect = "NoExecute"

  }

}

*/

::::::::::::::

nat.tf

::::::::::::::

resource "ncloud_subnet" "subnet_scn_02_public_natgw" {

#  vpc_no         = ncloud_vpc.vpc_scn_02.id

   vpc_no         = ncloud_vpc.vpc.id

#  subnet         = cidrsubnet(ncloud_vpc.vpc_scn_02.ipv4_cidr_block, 8, 2)

  subnet         = "10.0.2.0/24"

 // "10.0.2.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

#  network_acl_no = ncloud_network_acl.network_acl_02_public.id

  subnet_type    = "PUBLIC"

  usage_type     = "NATGW"

}

# NAT Gateway

resource "ncloud_nat_gateway" "nat_gateway_scn_02" {

#  vpc_no    = ncloud_vpc.vpc_scn_02.id

   vpc_no         = ncloud_vpc.vpc.id

  subnet_no = ncloud_subnet.subnet_scn_02_public_natgw.id

  zone      = "KR-1"

  name      = var.name_scn02

  #name      = "nat_gateway_scn_02"

}

# Route Table

resource "ncloud_route" "route_scn_02_nat" {

  route_table_no         = ncloud_vpc.vpc.default_private_route_table_no

  destination_cidr_block = "0.0.0.0/0"

  target_type            = "NATGW"

  // NATGW (NAT Gateway) | VPCPEERING (VPC Peering) | VGW (Virtual Private Gateway).

  target_name            = ncloud_nat_gateway.nat_gateway_scn_02.name

  target_no              = ncloud_nat_gateway.nat_gateway_scn_02.id

}

::::::::::::::

pub1.tf

::::::::::::::

resource "ncloud_subnet" "pup1_subnet" {

  vpc_no         = ncloud_vpc.vpc.id

  subnet         = "10.0.0.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

  subnet_type    = "PUBLIC"

  name           = "pub1-subnet"

  usage_type     = "GEN"

}

::::::::::::::

variables.tf

::::::::::::::

variable name {

  default = "tf-nks"

}

variable client_ip {

  default = "213.10.113.252"

}

variable access_key {

  default = "sHiwO72BVa"

}

variable secret_key {

  default = "Vpy81CWg9lnQPel"

}

variable name_scn02 {

  default = "tf-scn02"

}

::::::::::::::

versions.tf

::::::::::::::

terraform {

  required_providers {

    ncloud = {

      source = "navercloudplatform/ncloud"

    }

  }

  required_version = ">= 0.13"

}

root@command2:~/terraform-provider-ncloud-main/examples/nks#






root@command2:~/terraform-provider-ncloud-main/examples/nks# terraform plan

│ Error: Reference to undeclared input variable

│   on main.tf line 58, in data "ncloud_nks_versions" "version":

│   58:     values = [var.nks_version]

│ An input variable with the name "nks_version" has not been declared. This variable can be declared with a variable "nks_version" {} block.

│ Error: Reference to undeclared input variable

│   on main.tf line 63, in resource "ncloud_login_key" "loginkey":

│   63:   key_name = var.login_key

│ An input variable with the name "login_key" has not been declared. This variable can be declared with a variable "login_key" {} block.

root@command2:~/terraform-provider-ncloud-main/examples/nks#




terraform init

terraform plan

terraform apply -auto-approve



terraform destroy --auto-approve




<29> 테라폼으로 네트워크 + 쿠버네티스 구축



1

우선 동작만 하게 코드를 편집했습니다.

코드는 변수처리로 하거나 좀 정리해야 합니다~







terraform init

terraform plan

terraform apply -auto-approve



약 35분 소요 됨.


클러스터 16분

node 19분




아무거나 질문 / 답변 받습니다~



https://vclock.kr/timer/#countdown=00:10:00&enabled=0&seconds=0&sound=xylophone&loop=1



https://brunch.co.kr/@topasvga/3915




<30> 쿠버네티스 사용하기



1

사용을 위한 인증


ncp-iam 설치


curl -o ncp-iam-authenticator -L https://github.com/NaverCloudPlatform/ncp-iam-authenticator/releases/latest/download/ncp-iam-authenticator_linux_amd64


chmod +x ./ncp-iam-authenticator


mkdir -p $HOME/bin && cp ./ncp-iam-authenticator $HOME/bin/ncp-iam-authenticator && 

export PATH=$PATH:$HOME/bin


echo 'export PATH=$PATH:$HOME/bin' >> ~/.bash_profile

ncp-iam-authenticator help


메뉴얼

https://guide.ncloud-docs.com/docs/k8s-iam-auth-ncp-iam-authenticator




등록


ncp-iam-authenticator create-kubeconfig --region KR --clusterUuid   b01xxxxxxxxxx --output kubeconfig.yaml


ncp-iam-authenticator create-kubeconfig --region <region-code> --clusterUuid <cluster-uuid> --output kubeconfig.yaml



2

kubectl 명령어 설치


curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"

sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl





3


vi  ~/.bash_profile  


맨 아래줄에 아래 내용 추가

alias k='kubectl --kubeconfig="/root/kubeconfig.yaml"'

alias kw='watch -d kubectl get deploy,svc,pods --kubeconfig="/root/kubeconfig.yaml"'



source ~/.bash_profile

k get nodes




4


cat <<EOF | kubectl create -f -


cat <<EOF | k create -f -

apiVersion: apps/v1

kind: Deployment

metadata:

  name: deployment-2048

spec:

  selector:

    matchLabels:

      app.kubernetes.io/name: app-2048

  replicas: 2

  template:

    metadata:

      labels:

        app.kubernetes.io/name: app-2048

    spec:

      containers:

      - image: alexwhen/docker-2048

        name: app-2048

        ports:

        - containerPort: 80

EOF





클래식 로드 밸런서 연결

외부에서 접속

k expose deployment deployment-2048 --port=80 --type=LoadBalancer


kubectl expose deployment deployment-2048 --port=80 --type=LoadBalancer








10

https://brunch.co.kr/@topasvga/3183





<31> 테라폼으로 VPC,  쿠버네티스 생성하기 테라폼 파일


1

cd /root/terraform-provider-ncloud-main/examples/nks



2

vi main.tf

소스에서 taints 부분  삭제 또는 주석 처리


https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/



3

terraform init

terraform plan

terraform apply -auto-approve



4

테라폼 파일




5

kw




<32> NATGW 라우팅 테이블 별도로 만들기 (선택)



1

vi nat.tf


resource "ncloud_subnet" "nat_subnet" {

#  vpc_no         = ncloud_vpc.vpc_scn_02.id

   vpc_no         = ncloud_vpc.vpc.id

#  subnet         = cidrsubnet(ncloud_vpc.vpc_scn_02.ipv4_cidr_block, 8, 2)

  subnet         = "10.0.2.0/24"

 // "10.0.2.0/24"

  zone           = "KR-1"

  network_acl_no = ncloud_vpc.vpc.default_network_acl_no

#  network_acl_no = ncloud_network_acl.network_acl_02_public.id

  subnet_type    = "PUBLIC"

  usage_type     = "NATGW"

}

resource "ncloud_nat_gateway" "natgw1" {

  vpc_no    = ncloud_vpc.vpc.id

  subnet_no = ncloud_subnet.nat_subnet.id

  zone      = "KR-1"

  name      = "kubernetes-nat-gw"

}

resource "ncloud_route_table" "kubernetes_route_table" {

  vpc_no                = ncloud_vpc.vpc.id

  supported_subnet_type = "PRIVATE"

  name                  = "kubernetes-route-table"

}

resource "ncloud_route" "kubernetes_route" {

  route_table_no         = ncloud_route_table.kubernetes_route_table.id

  destination_cidr_block = "0.0.0.0/0"

  target_type            = "NATGW"

  target_name            = ncloud_nat_gateway.natgw1.name

  target_no              = ncloud_nat_gateway.natgw1.id

}

resource "ncloud_route_table_association" "kubernetes_route_table_subnet" {

  route_table_no = ncloud_route_table.kubernetes_route_table.id

  subnet_no      = ncloud_subnet.node_subnet.id

}





2

콘솔에서 subnet 확인





<33> 삭제


1

cd /root/terraform-provider-ncloud-main/examples/nks


terraform destroy --auto-approve



2

콘솔에서 수동 확인 후 삭제


3

비용은 다음날 확인





<34> 네트워크 질문답변


1

https://brunch.co.kr/@topasvga/3915



2

질문은 여기서 이야기 하셔도 되고, 카페에 올려주셔도 됩니다~


https://cafe.naver.com/dnspro




3

10분 휴식

실습


https://vclock.kr/timer/#countdown=00:10:00&enabled=0&seconds=0&sound=xylophone&loop=1




4

처음부터 다시 보기

https://brunch.co.kr/@topasvga/3806




감사합니다.


                    

브런치는 최신 브라우저에 최적화 되어있습니다. IE chrome safari