1. How to use existing resource group in terraform?
terraform import azurerm_resource_group.rg /subscriptions/<sub_id>/resourceGroups/<rg_name)
2.
1. How to use existing resource group in terraform?
terraform import azurerm_resource_group.rg /subscriptions/<sub_id>/resourceGroups/<rg_name)
2.
Complete code: https://github.com/MaheshMagadum/cloudops/tree/main/terraform-02
terraform {
required_version = ">=1.0.0"
required_providers { azapi = {
source =
"azure/azapi" version
= "~>1.5" } azurerm =
{
source =
"hashicorp/azurerm" version
= "~>3.0.0" } random = {
source =
"hashicorp/random" version
= "~>3.0" } } } provider "azurerm" { features{} } resource "azurerm_resource_group"
"rg" { name =
"dev-rg" location =
var.location } resource "azurerm_virtual_network"
"azure_vnet" {
resource_group_name = azurerm_resource_group.rg.name name =
"aro-vnet" location =
azurerm_resource_group.rg.location
address_space = ["10.0.4.0/25"] } resource "azurerm_subnet"
"azure_subnet" { name =
var.subnet_name
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.azure_vnet.name
address_prefixes = ["10.0.4.0/29"] } # Create public IPs resource "azurerm_public_ip"
"public_IP" { name = "public_IP"
location =
azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
allocation_method =
"Dynamic" } resource "azurerm_network_interface"
"azure_ni" { name =
azurerm_virtual_network.azure_vnet.name location =
var.location
resource_group_name = azurerm_resource_group.rg.name
ip_configuration { name =
"my_azure_ni" subnet_id
= azurerm_subnet.azure_subnet.id
private_ip_address_allocation = var.private_ip_allocation
public_ip_address_id =
azurerm_public_ip.public_IP.id } } resource "azurerm_network_security_group"
"nsg" { name =
"myNetworkSecurityGroup"
location =
azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
security_rule { name = "SSH"
priority =
1001
direction =
"Inbound"
access =
"Allow"
protocol =
"Tcp"
source_port_range =
"*"
destination_port_range =
"22"
source_address_prefix =
"*"
destination_address_prefix = "*" } } # Connect the security group to the network
interface resource
"azurerm_network_interface_security_group_association"
"namehere" {
network_interface_id =
azurerm_network_interface.azure_ni.id
network_security_group_id = azurerm_network_security_group.nsg.id } resource "azurerm_linux_virtual_machine"
"azure_vm" { name =
var.vm_name
resource_group_name = azurerm_resource_group.rg.name location =
var.location
network_interface_ids = [azurerm_network_interface.azure_ni.id] size = "Standard_B2s" os_disk { name = "myOsDisk"
caching =
"ReadWrite"
storage_account_type = "Standard_LRS" } source_image_reference { publisher
= "Canonical" offer =
"0001-com-ubuntu-server-jammy" sku = "22_04-lts-gen2"
version = "latest" }
computer_name = var.hostname
admin_username = var.username
admin_ssh_key {
username = var.username public_key
= jsondecode(azapi_resource_action.ssh_public_key_gen.output).publicKey } } |
Create an Azure Resource Group using Terraform:
main.tf
terraform {
required_version = ">=1.0.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>3.0.0"
}
}
}
provider "azurerm" {
features{}
}
variable "location" {
type = string
default = "East US"
}
resource "azurerm_resource_group" "rg" {
name = "dev-rg"
location = var.location
}
Execute below commands to install the provider (Azure) plugin, create source and destroy it
>terraform init -upgrade
>terraform validate
>terraform plan
>terraform apply
>terraform destroy
Ref: https://learn.microsoft.com/en-us/azure/openshift/howto-use-key-vault-secrets
oc login https://api.alstt7ftx43328907e.eastus.aroapp.io:6443/ -u kubeadmin -p g9i2H-KVUqo-7SjUm-UthrL
oc new-project k8s-secrets-store-csi
oc adm policy add-scc-to-user privileged system:serviceaccount:k8s-secrets-store-csi:secrets-store-csi-driver
helm repo add secrets-store-csi-driver https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts
helm repo update
helm install -n k8s-secrets-store-csi csi-secrets-store secrets-store-csi-driver/secrets-store-csi-driver --version v1.3.1 --set "linux.providersDir=/var/run/secrets-store-csi-providers"
Next,
helm repo add csi-secrets-store-provider-azure https://azure.github.io/secrets-store-csi-driver-provider-azure/charts
helm repo update
Next,
helm install -n k8s-secrets-store-csi azure-csi-provider csi-secrets-store-provider-azure/csi-secrets-store-provider-azure --set linux.privileged=true --set secrets-store-csi-driver.install=false --set "linux.providersDir=/var/run/secrets-store-csi-providers" --version=v1.4.1
oc adm policy add-scc-to-user privileged system:serviceaccount:k8s-secrets-store-csi:csi-secrets-store-provider-azure
Next (Create key vault and a secret)
oc new-project my-application
az keyvault create -n ${KEYVAULT_NAME} -g ${KEYVAULT_RESOURCE_GROUP} --location ${KEYVAULT_LOCATION}
az keyvault secret set --vault-name secret-store-oljy7AQDbV --name secret1 --value "Hello"
export SERVICE_PRINCIPAL_CLIENT_SECRET="ces8Q~kBm~YYJTPLDOSsqrbLT0yDFWcil7r-XbbB"
export SERVICE_PRINCIPAL_CLIENT_ID="e8d92000-2a2c-4581-890f-6fb611717706"
az keyvault set-policy -n secret-store-oljy7AQDbV --secret-permissions get --spn ${SERVICE_PRINCIPAL_CLIENT_ID}
kubectl create secret generic secrets-store-creds --from-literal clientid=${SERVICE_PRINCIPAL_CLIENT_ID} --from-literal clientsecret=${SERVICE_PRINCIPAL_CLIENT_SECRET}
kubectl -n my-application label secret secrets-store-creds secrets-store.csi.k8s.io/used=true
kubectl exec busybox-secrets-store-inline -- ls /mnt/secrets-store/
Error from server (Forbidden): error when creating "<pod>.yaml": pods "busybox-secrets-store-inline" is forbidden: busybox-secrets-store-inline uses an inline volume provided by CSIDriver secrets-store.csi.k8s.io and namespace my-application has a pod security enforce level that is lower than privileged
ISSUE THIS COMMAND:
kubectl label --overwrite ns my-application pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/enforce-version=v1.29
AND HAXE POD YAML:
Find address of the API Server
apiServer=$(az aro show -g $RESOURCEGROUP -n $CLUSTER --query apiserverProfile.url -o tsv)
ex: apiServer=$(az aro show -g aro_group -n arocluster --query apiserverProfile.url -o tsv)
oc login $apiServer -u kubeadmin -p <kubeadmin password>
C:\Users\santosh>helm install my-kong2 kong/kong -n kong --values ./full-k4k8s-with-kong-enterprise.conf.txt
coalesce.go:289: warning: destination for kong.proxy.stream is a table. Ignoring non-table value ([])
coalesce.go:289: warning: destination for kong.proxy.stream is a table. Ignoring non-table value ([])
NAME: my-kong2
LAST DEPLOYED: Sun Feb 18 15:16:30 2024
NAMESPACE: kong
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
To connect to Kong, please execute the following commands:
HOST=$(kubectl get svc --namespace kong my-kong2-kong-proxy -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
PORT=$(kubectl get svc --namespace kong my-kong2-kong-proxy -o jsonpath='{.spec.ports[0].port}')
export PROXY_IP=${HOST}:${PORT}
curl $PROXY_IP
Once installed, please follow along the getting started guide to start using
Kong: https://docs.konghq.com/kubernetes-ingress-controller/latest/guides/getting-started/
Article: https://arifkiziltepe.medium.com/kong-installation-on-openshift-3eb3291d3998