cd /root/terraform-provider-ncloud-main/examples/nks
[root@sssssss nks]# more *.tf
::::::::::::::
main.tf
::::::::::::::
# VPC > User scenario > Scenario 1. Single Public Subnet
# https://docs.ncloud.com/ko/networking/vpc/vpc_userscenario1.html
provider "ncloud" {
support_vpc = true
region = "KR"
access_key = var.access_key
secret_key = var.secret_key
}
resource "ncloud_vpc" "vpc" {
name = "vpc"
ipv4_cidr_block = "10.0.0.0/16"
}
resource "ncloud_subnet" "node_subnet" {
vpc_no = ncloud_vpc.vpc.id
subnet = "10.0.1.0/24"
zone = "KR-1"
network_acl_no = ncloud_vpc.vpc.default_network_acl_no
subnet_type = "PRIVATE"
name = "node-subnet"
usage_type = "GEN"
}
resource "ncloud_subnet" "lb_subnet" {
vpc_no = ncloud_vpc.vpc.id
subnet = "10.0.100.0/24"
zone = "KR-1"
network_acl_no = ncloud_vpc.vpc.default_network_acl_no
subnet_type = "PRIVATE"
name = "lb-subnet"
usage_type = "LOADB"
}
data "ncloud_nks_versions" "version" {
filter {
name = "value"
values = [var.nks_version]
regex = true
}
}
resource "ncloud_login_key" "loginkey" {
key_name = var.login_key
}
/*
resource "ncloud_nks_cluster" "cluster" {
cluster_type = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002"
k8s_version = data.ncloud_nks_versions.version.versions.0.value
login_key_name = ncloud_login_key.loginkey.key_name
name = "sample-cluster"
lb_private_subnet_no = ncloud_subnet.lb_subnet.id
kube_network_plugin = "cilium"
#subnet_no_list = [ ncloud_subnet.node_subnet.id ]
subnet_no_list = [ ncloud_subnet.node_subnet.id ]
vpc_no = ncloud_vpc.vpc.id
zone = "KR-1"
log {
audit = true
}
}
*/
data "ncloud_nks_server_images" "image"{
hypervisor_code = "XEN"
filter {
name = "label"
values = ["ubuntu-20.04"]
regex = true
}
}
data "ncloud_nks_server_products" "nks_products"{
software_code = data.ncloud_nks_server_images.image.images[0].value
zone = "KR-1"
filter {
name = "product_type"
values = [ "STAND"]
}
filter {
name = "cpu_count"
values = [ "2"]
}
filter {
name = "memory_size"
values = [ "8GB" ]
}
}
/*
resource "ncloud_nks_node_pool" "node_pool" {
cluster_uuid = ncloud_nks_cluster.cluster.uuid
node_pool_name = "pool1"
node_count = 1
software_code = data.ncloud_nks_server_images.image.images[0].value
product_code = data.ncloud_nks_server_products.nks_products.products[0].value
subnet_no_list = [ncloud_subnet.node_subnet.id]
autoscale {
enabled = true
min = 1
max = 2
}
label {
key = "foo"
value = "bar"
}
taint {
key = "foo"
value = "bar"
effect = "NoExecute"
}
}
*/
::::::::::::::
variables.tf
::::::::::::::
variable name {
default = "tf-nks"
}
variable nks_version {
default = "1.25"
}
variable client_ip {
default = "223.130.137.181"
}
variable access_key {
default = "NWGYugiGef"
}
variable secret_key {
default = "vouseD7CsnM5wY7as"
}
variable login_key {
default = "test"
}
::::::::::::::
versions.tf
::::::::::::::
terraform {
required_providers {
ncloud = {
source = "navercloudplatform/ncloud"
}
}
required_version = ">= 0.13"
}
[root@sssssss nks]#
참고
https://registry.terraform.io/providers/NaverCloudPlatform/ncloud/2.3.19/docs/resources/nks_cluster
Plan: 2 to add, 0 to change, 0 to destroy.
ncloud_nks_cluster.cluster: Creating...
╷
│ Error: Status: 400 Bad Request, Body: {"error":{"errorCode":400,"message":"Bad Request","details":"Require lbPublicSubnetNo"},"timestamp":"2024-01-04T13:44:40.804Z"}
│
│ with ncloud_nks_cluster.cluster,
│ on main.tf line 59, in resource "ncloud_nks_cluster" "cluster":
│ 59: resource "ncloud_nks_cluster" "cluster"
resource "ncloud_subnet" "lb_subnet2" {
vpc_no = ncloud_vpc.vpc.id
subnet = "10.0.102.0/24"
zone = "KR-1"
network_acl_no = ncloud_vpc.vpc.default_network_acl_no
subnet_type = "PUBLIC"
name = "lb-subnet2"
usage_type = "LOADB"
}
data "ncloud_nks_versions" "version" {
filter {
name = "value"
values = [var.nks_version]
regex = true
}
}
resource "ncloud_login_key" "loginkey" {
key_name = var.login_key
}
/*
resource "ncloud_nks_cluster" "cluster" {
cluster_type = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002"
k8s_version = data.ncloud_nks_versions.version.versions.0.value
login_key_name = ncloud_login_key.loginkey.key_name
name = "sample-cluster"
lb_private_subnet_no = ncloud_subnet.lb_subnet.id
lb_public_subnet_no = ncloud_subnet.lb_subnet2.id
kube_network_plugin = "cilium"
subnet_no_list = [ ncloud_subnet.node_subnet.id ]
vpc_no = ncloud_vpc.vpc.id
zone = "KR-1"
log {
audit = true
}
}
*/
resource "ncloud_nks_cluster" "cluster" {
cluster_type = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002"
k8s_version = data.ncloud_nks_versions.version.versions.0.value
login_key_name = ncloud_login_key.loginkey.key_name
name = "sample-cluster"
lb_private_subnet_no = ncloud_subnet.lb_subnet.id
lb_public_subnet_no = ncloud_subnet.lb_subnet2.id
kube_network_plugin = "cilium"
subnet_no_list = [ ncloud_subnet.node_subnet.id ]
vpc_no = ncloud_vpc.vpc.id
zone = "KR-1"
log {
audit = true
}
}
terraform init
terraform plan
terraform apply -auto-approve
(15분만에 클러스터 생성)
ncloud_nks_cluster.cluster: Still creating... [13m40s elapsed]
ncloud_nks_cluster.cluster: Still creating... [13m50s elapsed]
ncloud_nks_cluster.cluster: Still creating... [14m0s elapsed]
ncloud_nks_cluster.cluster: Still creating... [14m10s elapsed]
ncloud_nks_cluster.cluster: Still creating... [14m20s elapsed]
ncloud_nks_cluster.cluster: Still creating... [14m30s elapsed]
ncloud_nks_cluster.cluster: Creation complete after 14m37s [id=92cab43c-d09e-4b6a-b85d-9f924fd202c3]
(이후 노드 생성 10분 )
ncloud_nks_node_pool.node_pool: Creating...
ncloud_nks_node_pool.node_pool: Still creating... [10s elapsed]
ncloud_nks_node_pool.node_pool: Still creating... [20s elapsed]
ncloud_nks_node_pool.node_pool: Still creating... [9m1s elapsed]
ncloud_nks_node_pool.node_pool: Still creating... [9m11s elapsed]
ncloud_nks_node_pool.node_pool: Still creating... [9m21s elapsed]
ncloud_nks_node_pool.node_pool: Still creating... [9m31s elapsed]
ncloud_nks_node_pool.node_pool: Still creating... [9m41s elapsed]
ncloud_nks_node_pool.node_pool: Creation complete after 9m41s [id=92cab43c-d09e-4b6a-b85d-9f924fd202c3:pool1]
Apply complete! Resources: 6 added, 0 changed, 0 destroyed.
[root@sssssss nks]#
# 처음 생성되는데 시간이 오래 걸렸다.
다시 두번째 하니 15분, 10분 걸렸다.
총 25분 소요 !!!!
첨부
1
ncp-iam-authenticator create-kubeconfig --region KR --clusterUuid b01xxxxxxxxxx --output kubeconfig.yaml
2
kubectl create deployment nginx-project --image=nginx --dry-run=client -o yaml --port=80 > nginx-deploy.yaml --kubeconfig kubeconfig.yaml
kubectl apply -f nginx-deploy.yaml --kubeconfig kubeconfig.yaml
kubectl get pods --kubeconfig kubeconfig.yaml
3
참고
https://brunch.co.kr/@topasvga/3183
4
[root@sssssss ~]# vi test.yaml
[root@sssssss ~]# k apply -f test.yaml
kubectl expose deployment deployment-2048 --port=80 --type=LoadBalancer --kubeconfig kubeconfig.yaml
5
pod들을 배포 했으나 NATGW가 없어서 서비스는 안된다.
NATGW 구축하고 프라이빗 서브넷을 NATGW로 라우팅을 추가하자.
https://brunch.co.kr/@topasvga/3599
https://brunch.co.kr/@topasvga/3597