brunch

NCP 20탄-7. 테라폼2-2024

by Master Seo

<25> 테라폼 주석 알아보자

<26> ncloud CLI

<27> ncloud 소스

<28> 테라폼 네트워크에 NAT 추가

<29> 테라폼으로 네트워크 + 쿠버네티스 구축

<30> 쿠버네티스 사용하기

<31> 테라폼으로 VPC, 쿠버네티스 생성하기 테라폼 파일

<32> NATGW 라우팅 테이블 별도로 만들기 (선택)

<33> 삭제

<34> 네트워크 질문답변



목표

테라폼으로 네이버 네트워크와 쿠버네티스를 생성해보자.NAT 포함



목적 네트워크









<25> 테라폼 주석 알아보자


#


//



/*


*/



cd /root/terraform-provider-ncloud-main/examples/nks



1

vi nat.tf


resource "ncloud_subnet" "subnet_scn_02_public_natgw" {

# vpc_no = ncloud_vpc.vpc_scn_02.id

vpc_no = ncloud_vpc.vpc.id

# subnet = cidrsubnet(ncloud_vpc.vpc_scn_02.ipv4_cidr_block, 8, 2)

subnet = "10.0.2.0/24"

// "10.0.2.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

# network_acl_no = ncloud_network_acl.network_acl_02_public.id

subnet_type = "PUBLIC"

usage_type = "NATGW"

}

# NAT Gateway

resource "ncloud_nat_gateway" "nat_gateway_scn_02" {

# vpc_no = ncloud_vpc.vpc_scn_02.id

vpc_no = ncloud_vpc.vpc.id

subnet_no = ncloud_subnet.subnet_scn_02_public_natgw.id

zone = "KR-1"

name = var.name_scn02

#name = "nat_gateway_scn_02"

}

# Route Table

resource "ncloud_route" "route_scn_02_nat" {

route_table_no = ncloud_vpc.vpc.default_private_route_table_no

destination_cidr_block = "0.0.0.0/0"

target_type = "NATGW"

// NATGW (NAT Gateway) | VPCPEERING (VPC Peering) | VGW (Virtual Private Gateway).

target_name = ncloud_nat_gateway.nat_gateway_scn_02.name

target_no = ncloud_nat_gateway.nat_gateway_scn_02.id

}




[root@seo1 nks]# more variables.tf

variable name {

default = "tf-nks"

}

variable name_scn02 {

default = "nat1"

}

variable nks_version {

default = "1.27"



2



[root@command1 nks]# terraform plan

│ Error: Reference to undeclared input variable

│ on nat.tf line 37, in resource "ncloud_nat_gateway" "nat_gateway_scn_02":

│ 37: name = var.name_scn02

│ An input variable with the name "name_scn02" has not been declared. This variable can be declared with a variable "name_scn02" {}

│ block.

[root@command1 nks]#




vi variables.tf



variable name_scn02 {

default = "tf-scn02"

}




3

terraform apply -auto-approve





<26> ncloud CLI



1

ncloud CLI 다운로드


wget https://www.ncloud.com/api/support/download/files/cli/CLI_1.1.19_20240321.zip


unzip CLI_1.1.19_20240321.zip

cd CLI_1.1.19_20240321/

cd cli_linux/

cp ncloud /usr/bin

ncloud help



최신본 다운로드

https://cli.ncloud-docs.com/docs/guide-clichange




2

ncloud configure

set [DEFAULT]'s configuration.

Ncloud Access Key ID []: EQCygJㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌ

Ncloud Secret Access Key []: 3z0xYㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌㅌ

Ncloud API URL (default:https://ncloud.apigw.ntruss.com) []:



3

ncloud server getRegionList




<27> ncloud 소스



1



cd

wget https://github.com/NaverCloudPlatform/terraform-provider-ncloud/archive/refs/heads/master.zip

unzip master.zip



cd /root/terraform-provider-ncloud-main/examples/nks



참고 사이트

https://github.com/NaverCloudPlatform/terraform-provider-ncloud/



2


vi variables.tf



[root@cl4-na-dev-command1 nks]# vi variables.tf

variable name {

default = "tf-nks"

}

variable nks_version {

default = "1.27"

}

variable client_ip {

default = "2.2.2.2"

}

variable access_key {

default = "B6wVLur"

}

variable secret_key {

default = "CvT89R"

}

variable login_key {

default = "kk1"

}

~

~

~




3

로키 리눅스에 테라폼 설치


sudo yum install -y yum-utils shadow-utils

sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo

sudo yum -y install terraform

terraform -version




terraform init

terraform plan



[root@new2222222 nks]# terraform plan

+ create

Terraform will perform the following actions:

# ncloud_login_key.loginkey will be created

+ resource "ncloud_login_key" "loginkey" {

+ fingerprint = (known after apply)

+ id = (known after apply)

+ key_name = "aaa"

+ private_key = (sensitive value)

}

# ncloud_nks_cluster.cluster will be created

+ resource "ncloud_nks_cluster" "cluster" {

+ acg_no = (known after apply)

+ cluster_type = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002"

+ endpoint = (known after apply)

+ hypervisor_code = (known after apply)

+ id = (known after apply)

+ ip_acl = []

+ ip_acl_default_action = (known after apply)

+ k8s_version = "1.27.9-nks.1"

+ kube_network_plugin = "cilium"

+ lb_private_subnet_no = (known after apply)

+ login_key_name = "aaa"

+ name = "sample-cluster"

+ public_network = (known after apply)

+ subnet_no_list = (known after apply)

+ uuid = (known after apply)

+ vpc_no = (known after apply)

+ zone = "KR-1"

+ log {

+ audit = true

}

}

# ncloud_nks_node_pool.node_pool will be created

+ resource "ncloud_nks_node_pool" "node_pool" {

+ cluster_uuid = (known after apply)

+ id = (known after apply)

+ instance_no = (known after apply)

+ k8s_version = (known after apply)

+ label = [

+ {

+ key = "foo"

+ value = "bar"

},

]

+ node_count = 1

+ node_pool_name = "pool1"

+ nodes = (known after apply)

+ product_code = "SVR.VSVR.STAND.C002.M008.NET.SSD.B050.G002"

+ software_code = "SW.VSVR.OS.LNX64.UBNTU.SVR2004.WRKND.B050"

+ storage_size = (known after apply)

+ subnet_no_list = (known after apply)

+ taint = [

+ {

+ effect = "NoExecute"

+ key = "foo"

+ value = "bar"

},

]

+ autoscale {

+ enabled = true

+ max = 2

+ min = 1

}

}

# ncloud_subnet.lb_subnet will be created

+ resource "ncloud_subnet" "lb_subnet" {

+ id = (known after apply)

+ name = "lb-subnet"

+ network_acl_no = (known after apply)

+ subnet = "10.0.100.0/24"

+ subnet_no = (known after apply)

+ subnet_type = "PRIVATE"

+ usage_type = "LOADB"

+ vpc_no = (known after apply)

+ zone = "KR-1"

}

# ncloud_subnet.node_subnet will be created

+ resource "ncloud_subnet" "node_subnet" {

+ id = (known after apply)

+ name = "node-subnet"

+ network_acl_no = (known after apply)

+ subnet = "10.0.1.0/24"

+ subnet_no = (known after apply)

+ subnet_type = "PRIVATE"

+ usage_type = "GEN"

+ vpc_no = (known after apply)

+ zone = "KR-1"

}

# ncloud_vpc.vpc will be created

+ resource "ncloud_vpc" "vpc" {

+ default_access_control_group_no = (known after apply)

+ default_network_acl_no = (known after apply)

+ default_private_route_table_no = (known after apply)

+ default_public_route_table_no = (known after apply)

+ id = (known after apply)

+ ipv4_cidr_block = "10.0.0.0/16"

+ name = "vpc"

+ vpc_no = (known after apply)

}

Plan: 6 to add, 0 to change, 0 to destroy.




4


terraform apply -auto-approve




# 변수 파일에서 login key 부분 삭제

[root@new2222222 nks]# vi variables.tf




terraform apply -auto-approve



NKS 생성까지 35분



NKS 상세 정보



노드풀



노드



5

쿠버네티스를 위한 테라폼 코드 보기


root@command2:~/terraform-provider-ncloud-main/examples/nks# la

main.tf .terraform .terraform.lock.hcl terraform.tfstate terraform.tfstate.backup variables.tf versions.tf



more *.tf

::::::::::::::

main.tf

::::::::::::::

# VPC > User scenario > Scenario 1. Single Public Subnet

# https://docs.ncloud.com/ko/networking/vpc/vpc_userscenario1.html

provider "ncloud" {

support_vpc = true

region = "KR"

access_key = var.access_key

secret_key = var.secret_key

}

resource "ncloud_vpc" "vpc" {

name = "vpc"

ipv4_cidr_block = "10.0.0.0/16"

}

resource "ncloud_subnet" "node_subnet" {

vpc_no = ncloud_vpc.vpc.id

subnet = "10.0.1.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

subnet_type = "PRIVATE"

name = "node-subnet"

usage_type = "GEN"

}

resource "ncloud_subnet" "lb_subnet" {

vpc_no = ncloud_vpc.vpc.id

subnet = "10.0.100.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

subnet_type = "PRIVATE"

name = "lb-subnet"

usage_type = "LOADB"

}

resource "ncloud_subnet" "lb_subnet2" {

vpc_no = ncloud_vpc.vpc.id

subnet = "10.0.102.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

subnet_type = "PUBLIC"

name = "lb-subnet2"

usage_type = "LOADB"

}

data "ncloud_nks_versions" "version" {

filter {

name = "value"

values = [var.nks_version]

regex = true

}

}



resource "ncloud_login_key" "loginkey" {

key_name = var.login_key

}



resource "ncloud_nks_cluster" "cluster" {

cluster_type = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002"

k8s_version = data.ncloud_nks_versions.version.versions.0.value

login_key_name = ncloud_login_key.loginkey.key_name

name = "sample-cluster"

lb_private_subnet_no = ncloud_subnet.lb_subnet.id

lb_public_subnet_no = ncloud_subnet.lb_subnet2.id

kube_network_plugin = "cilium"

subnet_no_list = [ ncloud_subnet.node_subnet.id ]

vpc_no = ncloud_vpc.vpc.id

zone = "KR-1"

log {

audit = true

}

}

data "ncloud_nks_server_images" "image"{

hypervisor_code = "XEN"

filter {

name = "label"

values = ["ubuntu-20.04"]

regex = true

}

}

data "ncloud_nks_server_products" "nks_products"{

software_code = data.ncloud_nks_server_images.image.images[0].value

zone = "KR-1"

filter {

name = "product_type"

values = [ "STAND"]

}

filter {

name = "cpu_count"

values = [ "2"]

}

filter {

name = "memory_size"

values = [ "8GB" ]

}

}

resource "ncloud_nks_node_pool" "node_pool" {

cluster_uuid = ncloud_nks_cluster.cluster.uuid

node_pool_name = "pool1"

node_count = 1

software_code = data.ncloud_nks_server_images.image.images[0].value

product_code = data.ncloud_nks_server_products.nks_products.products[0].value

subnet_no_list = [ncloud_subnet.node_subnet.id]

autoscale {

enabled = true

min = 1

max = 2

}



label {

key = "foo"

value = "bar"

}

taint {

key = "foo"

value = "bar"

effect = "NoExecute"

}



}

::::::::::::::

variables.tf

::::::::::::::

variable name {

default = "tf-nks"

}

variable nks_version {

default = "1.32"

}

variable client_ip {

default = "213.0.13.2"

}

variable access_key {

default = "sHiw6j72BVa"

}

variable secret_key {

default = "Vpy89cpj9lnQPel"

}



variable login_key {

default = "agame-k8s12"

}



::::::::::::::

versions.tf

::::::::::::::

terraform {

required_providers {

ncloud = {

source = "navercloudplatform/ncloud"

}

}

required_version = ">= 0.13"

}



파일 첨부







<28> 테라폼 네트워크에 NAT 추가



아래 네트워크를 구축해보자.

NAT 추가!!!




root@command2:~/terraform-provider-ncloud-main/examples/nks# ls

backup main.tf nat.tf pub1.tf terraform.tfstate terraform.tfstate.backup variables.tf versions.tf



root@command2:~/terraform-provider-ncloud-main/examples/nks# more *.tf

::::::::::::::

main.tf

::::::::::::::

# VPC > User scenario > Scenario 1. Single Public Subnet

# https://docs.ncloud.com/ko/networking/vpc/vpc_userscenario1.html

provider "ncloud" {

support_vpc = true

region = "KR"

access_key = var.access_key

secret_key = var.secret_key

}

resource "ncloud_vpc" "vpc" {

name = "vpc"

ipv4_cidr_block = "10.0.0.0/16"

}

resource "ncloud_subnet" "node_subnet" {

vpc_no = ncloud_vpc.vpc.id

subnet = "10.0.1.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

subnet_type = "PRIVATE"

name = "node-subnet"

usage_type = "GEN"

}

resource "ncloud_subnet" "lb_subnet" {

vpc_no = ncloud_vpc.vpc.id

subnet = "10.0.100.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

subnet_type = "PRIVATE"

name = "lb-subnet"

usage_type = "LOADB"

}

resource "ncloud_subnet" "lb_subnet2" {

vpc_no = ncloud_vpc.vpc.id

subnet = "10.0.102.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

subnet_type = "PUBLIC"

name = "lb-subnet2"

usage_type = "LOADB"

}

/*

data "ncloud_nks_versions" "version" {

filter {

name = "value"

values = [var.nks_version]

regex = true

}

}

resource "ncloud_login_key" "loginkey" {

key_name = var.login_key

}

*/

/*

resource "ncloud_nks_cluster" "cluster" {

cluster_type = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002"

k8s_version = data.ncloud_nks_versions.version.versions.0.value

login_key_name = ncloud_login_key.loginkey.key_name

name = "sample-cluster"

lb_private_subnet_no = ncloud_subnet.lb_subnet.id

lb_public_subnet_no = ncloud_subnet.lb_subnet2.id

kube_network_plugin = "cilium"

subnet_no_list = [ ncloud_subnet.node_subnet.id ]

vpc_no = ncloud_vpc.vpc.id

zone = "KR-1"

log {

audit = true

}

}

data "ncloud_nks_server_images" "image"{

hypervisor_code = "XEN"

filter {

name = "label"

values = ["ubuntu-20.04"]

regex = true

}

}

data "ncloud_nks_server_products" "nks_products"{

software_code = data.ncloud_nks_server_images.image.images[0].value

zone = "KR-1"

filter {

name = "product_type"

values = [ "STAND"]

}

filter {

name = "cpu_count"

values = [ "2"]

}

filter {

name = "memory_size"

values = [ "8GB" ]

}

}

resource "ncloud_nks_node_pool" "node_pool" {

cluster_uuid = ncloud_nks_cluster.cluster.uuid

node_pool_name = "pool1"

node_count = 1

software_code = data.ncloud_nks_server_images.image.images[0].value

product_code = data.ncloud_nks_server_products.nks_products.products[0].value

subnet_no_list = [ncloud_subnet.node_subnet.id]

autoscale {

enabled = true

min = 1

max = 2

}

label {

key = "foo"

value = "bar"

}

taint {

key = "foo"

value = "bar"

effect = "NoExecute"

}

}

*/



::::::::::::::

nat.tf

::::::::::::::

resource "ncloud_subnet" "subnet_scn_02_public_natgw" {

# vpc_no = ncloud_vpc.vpc_scn_02.id

vpc_no = ncloud_vpc.vpc.id

# subnet = cidrsubnet(ncloud_vpc.vpc_scn_02.ipv4_cidr_block, 8, 2)

subnet = "10.0.2.0/24"

// "10.0.2.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

# network_acl_no = ncloud_network_acl.network_acl_02_public.id

subnet_type = "PUBLIC"

usage_type = "NATGW"

}

# NAT Gateway

resource "ncloud_nat_gateway" "nat_gateway_scn_02" {

# vpc_no = ncloud_vpc.vpc_scn_02.id

vpc_no = ncloud_vpc.vpc.id

subnet_no = ncloud_subnet.subnet_scn_02_public_natgw.id

zone = "KR-1"

name = var.name_scn02

#name = "nat_gateway_scn_02"

}

# Route Table

resource "ncloud_route" "route_scn_02_nat" {

route_table_no = ncloud_vpc.vpc.default_private_route_table_no

destination_cidr_block = "0.0.0.0/0"

target_type = "NATGW"

// NATGW (NAT Gateway) | VPCPEERING (VPC Peering) | VGW (Virtual Private Gateway).

target_name = ncloud_nat_gateway.nat_gateway_scn_02.name

target_no = ncloud_nat_gateway.nat_gateway_scn_02.id

}




::::::::::::::

pub1.tf

::::::::::::::

resource "ncloud_subnet" "pup1_subnet" {

vpc_no = ncloud_vpc.vpc.id

subnet = "10.0.0.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

subnet_type = "PUBLIC"

name = "pub1-subnet"

usage_type = "GEN"

}

::::::::::::::

variables.tf

::::::::::::::

variable name {

default = "tf-nks"

}

variable client_ip {

default = "213.10.113.252"

}

variable access_key {

default = "sHiwO72BVa"

}

variable secret_key {

default = "Vpy81CWg9lnQPel"

}

variable name_scn02 {

default = "tf-scn02"

}



::::::::::::::

versions.tf

::::::::::::::

terraform {

required_providers {

ncloud = {

source = "navercloudplatform/ncloud"

}

}

required_version = ">= 0.13"

}

root@command2:~/terraform-provider-ncloud-main/examples/nks#



NAT 설정 파일





1

cd /root/terraform-provider-ncloud-main/examples/nks


vi nat.tf



2

vi variables.tf


variable name_scn02 {

default = "nat1"

}



3

# 콘솔에서


acg

80 허용




terraform init

terraform plan

terraform apply -auto-approve



terraform destroy --auto-approve




<29> 테라폼으로 네트워크 + 쿠버네티스 구축



1

우선 동작만 하게 코드를 편집했습니다.

코드는 변수처리로 하거나 좀 정리해야 합니다~







terraform init

terraform plan

terraform apply -auto-approve



약 35분 소요 됨.


클러스터 16분

node 19분



https://vclock.kr/timer/#countdown=00:10:00&enabled=0&seconds=0&sound=xylophone&loop=1



https://brunch.co.kr/@topasvga/3915




<30> 쿠버네티스 사용하기



1

사용을 위한 인증


ncp-iam 설치


curl -o ncp-iam-authenticator -L https://github.com/NaverCloudPlatform/ncp-iam-authenticator/releases/latest/download/ncp-iam-authenticator_linux_amd64


chmod +x ./ncp-iam-authenticator


mkdir -p $HOME/bin && cp ./ncp-iam-authenticator $HOME/bin/ncp-iam-authenticator &&

export PATH=$PATH:$HOME/bin


echo 'export PATH=$PATH:$HOME/bin' >> ~/.bash_profile

ncp-iam-authenticator help


메뉴얼

https://guide.ncloud-docs.com/docs/k8s-iam-auth-ncp-iam-authenticator




등록


ncp-iam-authenticator create-kubeconfig --region KR --clusterUuid b01xxxxxxxxxx --output kubeconfig.yaml


ncp-iam-authenticator create-kubeconfig --region <region-code> --clusterUuid <cluster-uuid> --output kubeconfig.yaml



2

kubectl 명령어 설치


curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"

sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl





3


vi ~/.bash_profile


맨 아래줄에 아래 내용 추가

alias k='kubectl --kubeconfig="/root/kubeconfig.yaml"'

alias kw='watch -d kubectl get deploy,svc,pods --kubeconfig="/root/kubeconfig.yaml"'



source ~/.bash_profile

k get nodes




4


cat <<EOF | kubectl create -f -


cat <<EOF | k create -f -

apiVersion: apps/v1

kind: Deployment

metadata:

name: deployment-2048

spec:

selector:

matchLabels:

app.kubernetes.io/name: app-2048

replicas: 2

template:

metadata:

labels:

app.kubernetes.io/name: app-2048

spec:

containers:

- image: alexwhen/docker-2048

name: app-2048

ports:

- containerPort: 80

EOF





클래식 로드 밸런서 연결

외부에서 접속

k expose deployment deployment-2048 --port=80 --type=LoadBalancer


kubectl expose deployment deployment-2048 --port=80 --type=LoadBalancer








10

https://brunch.co.kr/@topasvga/3183





<31> 테라폼으로 VPC, 쿠버네티스 생성하기 테라폼 파일


1

cd /root/terraform-provider-ncloud-main/examples/nks



2

vi main.tf

소스에서 taints 부분 삭제 또는 주석 처리 - 해당 조건에 맞아야 Pod가 동작하므로 제거.


https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/



3

terraform init

terraform plan

terraform apply -auto-approve



4

테라폼 파일




5

kw

100 ok.png





<32> NATGW 라우팅 테이블 별도로 만들기 (선택)


1

vi nat.tf


resource "ncloud_subnet" "nat_subnet" {

# vpc_no = ncloud_vpc.vpc_scn_02.id

vpc_no = ncloud_vpc.vpc.id

# subnet = cidrsubnet(ncloud_vpc.vpc_scn_02.ipv4_cidr_block, 8, 2)

subnet = "10.0.2.0/24"

// "10.0.2.0/24"

zone = "KR-1"

network_acl_no = ncloud_vpc.vpc.default_network_acl_no

# network_acl_no = ncloud_network_acl.network_acl_02_public.id

subnet_type = "PUBLIC"

usage_type = "NATGW"

}

resource "ncloud_nat_gateway" "natgw1" {

vpc_no = ncloud_vpc.vpc.id

subnet_no = ncloud_subnet.nat_subnet.id

zone = "KR-1"

name = "kubernetes-nat-gw"

}

resource "ncloud_route_table" "kubernetes_route_table" {

vpc_no = ncloud_vpc.vpc.id

supported_subnet_type = "PRIVATE"

name = "kubernetes-route-table"

}

resource "ncloud_route" "kubernetes_route" {

route_table_no = ncloud_route_table.kubernetes_route_table.id

destination_cidr_block = "0.0.0.0/0"

target_type = "NATGW"

target_name = ncloud_nat_gateway.natgw1.name

target_no = ncloud_nat_gateway.natgw1.id

}

resource "ncloud_route_table_association" "kubernetes_route_table_subnet" {

route_table_no = ncloud_route_table.kubernetes_route_table.id

subnet_no = ncloud_subnet.node_subnet.id

}





2

콘솔에서 subnet 확인

110 subnet.png





<33> 삭제


1

cd /root/terraform-provider-ncloud-main/examples/nks


terraform destroy --auto-approve



2


https://vclock.kr/timer/#countdown=00:10:00&enabled=0&seconds=0&sound=xylophone&loop=1




감사합니다.



매거진의 이전글NCP 20탄-6. 테라폼1 -2024