Drollery Medieval drollery of a knight on a horse

🏆 欢迎来到本站: https://xuchangwei.com/希望这里有你感兴趣的内容

flowery border with man falling
flowery border with man falling

Kubernetes: Kubernetes 示例

各类文件示例

kubectl

自定义客户端kube-login-cli

#1.下载自定义客户端
wget https://devops-xxxx.cos.ap-beijing.cici.com/kube-login-cli/mac/kube-login-cli --no-check-certificate -O /usr/local/bin/kube-login-cli && chmod +x /usr/local/bin/kube-login-cli
或者
#1. 在浏览器输入网址  https://devops-1254024480.cos.ap-beijing.cici.com/kube-login-cli/mac/kube-login-cli
#2. 将下载的程序存放到任意位置,执行 chmod +x xxxx 授予执行权限即可

#帮助
kube-login-cli -h
Usage of kube-login-cli:
  -p string
        ldap password. type string (default "nil")
  -u string
        ldap username. type string (default "nil")

#2.登录用户
xxx@xxxdeMacBook-Pro~ kube-login-cli -u xiaoming
请输入LDAP密码 >>>>:

+++ 请选择环境 +++
maxwell 环境: kubectl config use-context xiaoming-maxwell-context
canary 环境: kubectl config use-context xiaoming-canary-context
bigdata 环境: kubectl config use-context xiaoming-bigdata-context
devops 环境: kubectl config use-context xiaoming-devops-context
bedin 环境: kubectl config use-context xiaoming-bedin-context

#生成集群相关文件 
~ ls ~/.kube
cache                 http-cache            xxx-bigdata-ca.pem xxx-devops-ca.pem
config                xxx-bedin-ca.pem   xxx-canary-ca.pem  xxx-maxwell-ca.pem
~ cat ~/.kube/config
xxx@xxxdeMacBook-Pro~ cat  ~/.kube/config
apiVersion: v1
clusters:
- cluster:
    certificate-authority: xxx-bedin-ca.pem
    server: https://10.22.0.227:60002
  name: xxx-bedin

- cluster:
    certificate-authority: xxx-bigdata-ca.pem
    server: https://10.101.100.185:5443
  name: xxx-bigdata
...省略

contexts:
- context:
    cluster: xxx-bedin
    user: xiaoming-bedin-context
  name: xiaoming-bedin-context

- context:
    cluster: xxx-bigdata
    user: xiaoming-bigdata-context
  name: xiaoming-bigdata-context
...省略

kind: Config
preferences: {}
users:
- name: xiaoming-bedin-context
  user:
    auth-provider:
      config:
        client-id: loginapp
        client-secret: hL2gbCex75P2AzpW
        id-token: eyJhbGci...BqxYounRHi1Fv1NIe54w
        idp-issuer-url: https://dex-bedin.xxxtech.com/dex
        refresh-token: Chl2ZzJ5cTdpNDNyaWdudzRya212YTJmYXBvEhl0ZXNma3l0Z2l6b3lreWdiaG1qN3FleTZs
      name: oidc

- name: xiaoming-bigdata-context
  user:
    auth-provider:
      config:
        client-id: loginapp
        client-secret:
        id-token: eyJhbGciOiJSUzI...cL8PfuIpThQ
        idp-issuer-url: https://dex-bigdata.xxxtech.com/dex
        refresh-token: ChlsdHV0amFxN21jNWpodXpoZnI1MzMza3J0Ehl1NmpoNjRuY3VzNmdhZnRvZ2NlNWN0cXdt
      name: oidc
...省略
current-context: xiaoming-bedin-context%

#3.设置用户
kubectl config use-context xiaoming-bedin-context

#4.使用kubectl 命令操作集群

安装指定集群版本的kubectl客户端

curl -LO "https://dl.k8s.io/release/v1.18.8/bin/darwin/amd64/kubectl"
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
sudo chown root: /usr/local/bin/kubectl
kubectl version --client


#以下为根据情况可选
mkdir -p $HOME/.kube
mv -f kubeconfig.json $HOME/.kube/config
#根据使用场景,按需切换kubectl的访问模
kubectl config use-context internal
设置完成后,可以通过以下命令查看kubernetes集群信息
kubectl cluster-info

#kubectl自动补全
#bash
echo 'alias k=kubectl' >>~/.bash_profile
echo 'source <(kubectl completion bash)' >>~/.bashrc
kubectl completion bash >/usr/local/etc/bash_completion.d/kubectl
#zsh
echo 'alias k=kubectl' >>~/.zshrc
echo 'source <(kubectl completion zsh)' >>~/.zshrc

密文

证书

apiVersion: v1
data:
  tls.crt: base64
  tls.key: base64
kind: Secret
metadata:  
  name: xxxcom
  namespace: english-prod
type: kubernetes.io/tls

镜像凭证

apiVersion: v1
data:
  .dockerconfigjson: eyJhdXRocyI6eyJyZWdpc3Ryexxxxxxxxxxxx=
kind: Secret
metadata:
  name: cn-beijing-ali-register
  namespace: english-prod
type: kubernetes.io/dockerconfigjson

映射

apiVersion: v1
data:
  mysql.db.database: nacos
  mysql.db.name: rm.com
  mysql.db.password: 4|K0/Bl@s_ABzQK8F4ji
  mysql.db.port: "3306"
  mysql.db.user: nacos_rw
kind: ConfigMap
metadata:
  name: nacos-cm
  namespace: english-prod

hpa

apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
  name: data-point-server-admin
  namespace: english-prod
spec:
  maxReplicas: 12
  metrics:
  - resource:
      name: memory
      target:
        averageUtilization: 80
        type: Utilization
    type: Resource
  minReplicas: 2
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: data-point-server-admin

负载均衡ingress

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /$1
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
  generation: 7
  name: etp-agent
  namespace: english-prod
spec:
  rules:
  - host: new.xxx.com
    http:
      paths:
      - backend:
          serviceName: english-etp-page
          servicePort: 8090
        path: /(.*)
        pathType: ImplementationSpecific
      - backend:
          serviceName: english-etp-page-new
          servicePort: 8090
        path: /new/(.*)
        pathType: ImplementationSpecific
  tls:
  - hosts:
    - new.xxx.com  # SSL 证书对应的域名 (必填)。
    secretName: xxxcom
status:
  loadBalancer:
    ingress:
    - ip: 39.103.36.c

awk eks ingress 绑定alb 7层

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  annotations:
    alb.ingress.kubernetes.io/actions.forward-summy-sc-gateway: |
      {"type":"forward","forwardConfig":{"targetGroups":[{"serviceName":"gateway","servicePort":18085,"weight":0},{"serviceName":"gateway-common","servicePort":18085,"weight":100}],"targetGroupStickinessConfig":{"enabled":true,"durationSeconds":300}}}
    alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:ap-south-1:<ACCOUNT_ID>:certificate/582fb25b-d603-49f4-9970-8b80d8157370
    alb.ingress.kubernetes.io/healthcheck-interval-seconds: "30"
    alb.ingress.kubernetes.io/healthcheck-path: /api/health/check
    alb.ingress.kubernetes.io/healthcheck-timeout-seconds: "25"
    alb.ingress.kubernetes.io/scheme: internet-facing
    alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS-1-2-2017-01
    alb.ingress.kubernetes.io/tags: Environment=Production,BusinessUnit=CICI,Name=CICI-NewRu-SC,Owner=Klas.m,Techteam=PFChina,Application=Ru,public-sg=true,IgnoreCostAdvisor=true,SubModule=Ru-sc
    alb.ingress.kubernetes.io/target-type: ip
    kubernetes.io/ingress.class: alb
  name: gateway
  namespace: rummy-sc
spec:
  rules:
  - http:
      paths:
      - backend:
          service:
            name: forward-summy-sc-gateway
            port:
              name: use-annotation
        path: /api/user/appLog/submitLogV2
        pathType: ImplementationSpecific
      - backend:
          service:
            name: forward-summy-sc-gateway
            port:
              name: use-annotation
        path: /api/user/appLog/submitPokerLog
        pathType: ImplementationSpecific
      - backend:
          service:
            name: gateway
            port:
              number: 18085
        path: /*
        pathType: ImplementationSpecific

-----
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: es-new
  namespace: "public-middleware"
  annotations:
    kubernetes.io/ingress.class: "alb"
    alb.ingress.kubernetes.io/group.name: "pfgc-ops-internet-facing" #同一组名下所有Ingress共用一个alb
    alb.ingress.kubernetes.io/group.order: "10" # 0-1000可取 0优先级最高,不支持重复
    alb.ingress.kubernetes.io/target-type: ip
    alb.ingress.kubernetes.io/success-codes: '200'
    alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS": 443}]'
    alb.ingress.kubernetes.io/healthcheck-path: /
    alb.ingress.kubernetes.io/healthcheck-port: "9200"
    alb.ingress.kubernetes.io/backend-protocol: HTTP
    alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:ap-south-1:<ACCOUNT_ID>:certificate/582fb25b-d603-49f4-9970-8b80d8157370
spec:
  rules:
    - host: "ops-es-new.cici.com"
      http:
        paths:
          - pathType: ImplementationSpecific
            path: /*
            backend:
              service:
                name: elasticsearch-log-master
                port:
                  number: 9300             

域名安全问题修复,屏蔽/api/actuator接口 参考: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/#actions

#callbreak-cms-site-ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: callbreak-cms-site
  namespace: callbreak
  annotations:
    kubernetes.io/ingress.class: "alb"
    #alb.ingress.kubernetes.io/healthcheck-path: /api/pub/health
    #alb.ingress.kubernetes.io/healthcheck-port: '10904'
    alb.ingress.kubernetes.io/scheme: internet-facing
    alb.ingress.kubernetes.io/target-type: ip
    alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS-1-2-2017-01
    alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:ap-south-1:<ACCOUNT_ID>:certificate/582fb25b-d603-49f4-9970-8b80d8157370
    alb.ingress.kubernetes.io/tags: Environment=Production,BusinessUnit=CICI,Name=CICI-Callbreak,Owner=Klas.m,Techteam=CICI-China,Application=Ru,pfg=true,Module=Ru-callbreak,SubModule=Ru-callbreak,IgnoreCostAdvisor=true,public-sg=true
    alb.ingress.kubernetes.io/actions.response-444: >
      {"type":"fixed-response","fixedResponseConfig":{"contentType":"text/plain","statusCode":"444","messageBody":"Access blocked by Admin"}}
spec:
  rules:
  - host: callbreakcms.cici.com
    http:
      paths:
        - path: /api/actuator*
          backend:
            serviceName: response-444
            servicePort: use-annotation
        - path: /*
          backend:
            serviceName: callbreak-cms-site
            servicePort: 80


apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  annotations:
    alb.ingress.kubernetes.io/actions.response-444: |
      {"type":"fixed-response","fixedResponseConfig":{"contentType":"text/plain","statusCode":"444","messageBody":"Access blocked by Admin"}}
    alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:ap-south-1:<ACCOUNT_ID>:certificate/582fb25b-d603-49f4-9970-8b80d8157370
    alb.ingress.kubernetes.io/group.name: pfgc-callbreak-backstage
    alb.ingress.kubernetes.io/group.order: "900"
    alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS": 443}]'
    alb.ingress.kubernetes.io/load-balancer-name: pfgc-callbreak-slb-ingress-backstage
    alb.ingress.kubernetes.io/scheme: internet-facing
    alb.ingress.kubernetes.io/security-groups: sg-0e9a3835c5f401d15, sg-0c30c767ba9769b64
    alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-2016-08
    alb.ingress.kubernetes.io/subnets: subnet-3fa2cc56, subnet-68e94425
    alb.ingress.kubernetes.io/tags: Techteam=CICI, Application=Callbreak, IgnoreCostAdvisor=true,
      ignoreoldgen=true, Name=callbreak-slb-ingress-backstage, BusinessUnit=CICI,
      Owner=roy.xi, Environment=Production, Author=jasper.xu
    alb.ingress.kubernetes.io/target-type: ip
    kubernetes.io/ingress.class: alb
  name: callbreakcms.xxx.com
  namespace: callbreak
spec:
  rules:
  - host: callbreakcms.xxx.com
    http:
      paths:
      - backend:
          service:
            name: response-444
            port:
              name: use-annotation
        path: /api/actuator*
        pathType: ImplementationSpecific
      - backend:
          service:
            name: callbreak-cms-site
            port:
              number: 80
        path: /*
        pathType: ImplementationSpecific

ingress.class 参考:https://www.jianshu.com/p/78e27347076c

ingress-nginx 指定路径拒绝访问 添加 header 头部

# cat admin-website-ingress.yaml
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/proxy-body-size: 50M
    nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
    nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
    nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
    nginx.ingress.kubernetes.io/server-snippet: |
        location ~* /api/actuator {
            deny all;
        }
    nginx.ingress.kubernetes.io/configuration-snippet: |
        add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always;
        more_set_headers "xxerver: hide";
  name: rummy-admin-website
  namespace: rummy
spec:
  rules:
    - host: rummycmsstaging.xxx.com
      http:
        paths:
          - backend:
              serviceName: rummy-admin-website
              servicePort: 80
            path: /

服务发现service

apiVersion: v1
kind: Service
metadata:
  labels:
    app: big-data-student-study-report-generate
    release: prod
  name: big-data-student-study-report-generate
  namespace: english-prod
spec:
  ports:
  - name: big-data-student-study-report-generate
    port: 8090
    protocol: TCP
    targetPort: 8090
  selector:
    app: big-data-student-study-report-generate
    release: prod
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}

aws

ingress service

apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/alicloud-loadbalancer-address-type: intranet
    service.beta.kubernetes.io/alicloud-loadbalancer-force-override-listeners: "true"
    service.beta.kubernetes.io/alicloud-loadbalancer-id: lb-2zedwkcu9wgl7unhamia9
  labels:
    app: nginx-ingress-lb-intranet
    service.beta.kubernetes.io/hash: 584a18bdc95241cdc9307c82e638df19215dc3a0b5f583dec606bc82
  name: nginx-ingress-lb-intranet
  namespace: kube-system
spec:
  externalTrafficPolicy: Cluster
  ports:
  - name: http
    nodePort: 31472
    port: 80
    protocol: TCP
    targetPort: 80
  - name: https
    nodePort: 32503
    port: 443
    protocol: TCP
    targetPort: 443
  selector:
    app: ingress-nginx
  sessionAffinity: None
  type: LoadBalancer

eks service 绑定alb 4层

cat intra-gateway-service.yaml
apiVersion: v1
kind: Service
metadata:
  annotations:
    #prometheus.io/path: /actuator/prometheus
    #prometheus.io/port: "61025"
    #prometheus.io/scrape: "true"
    service.beta.kubernetes.io/aws-load-balancer-type: "nlb-ip"
    service.beta.kubernetes.io/aws-load-balancer-internal: "true"
    service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: Environment=Production,BusinessUnit=CICI,Name=CICI-Messagecenter,Owner=Klaus.ma,Techteam=PFG-China,Application=AppBE,public-sg=true,IgnoreCostAdvisor=true,SubModule=Messagecenter
  labels:
    app: gateway
  name: gateway-intra-service
  namespace: messagecenter
spec:
  ports:
  - name: gateway-intra-port
    port: 80
    protocol: TCP
    targetPort: 18085
  selector:
    app: gateway
  type: LoadBalancer

# 实际绑定的
]# kubectl -n messagecenter get svc gateway-intra-service -oyaml
apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: Environment=Production,BusinessUnit=CICI,Name=CICI-Messagecenter,Owner=Klaus.ma,Techteam=PFG-China,Application=AppBE,public-sg=true,IgnoreCostAdvisor=true,SubModule=Messagecenter
    service.beta.kubernetes.io/aws-load-balancer-internal: "true"
    service.beta.kubernetes.io/aws-load-balancer-type: nlb-ip
  creationTimestamp: "2021-07-07T11:20:02Z"
  finalizers:
  - service.kubernetes.io/load-balancer-cleanup
  - service.k8s.aws/resources
  labels:
    app: gateway
  name: gateway-intra-service
  namespace: messagecenter
  resourceVersion: "174023792"
  selfLink: /api/v1/namespaces/messagecenter/services/gateway-intra-service
  uid: 245f4f2c-04ae-40f8-bbd5-a2e25d93d072
spec:
  clusterIP: 10.100.203.169
  externalTrafficPolicy: Cluster
  ports:
  - name: gateway-intra-port
    nodePort: 32561
    port: 80
    protocol: TCP
    targetPort: 18085
  selector:
    app: gateway
  sessionAffinity: None
  type: LoadBalancer
status:
  loadBalancer:
    ingress:
    - hostname: k8s-messagec-gatewayi-46baa86a3f-ae9b1f7ad2d6fce9.elb.ap-south-1.amazonaws.com

aws

> cat  eks/ops/template-service-nlb.yaml
---
# 公网配置
apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-name: pfgc-ops-template-nlb
    service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
    service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true'
    service.beta.kubernetes.io/aws-load-balancer-type: "external" #使用aws loadbalance controller,否则 name 无效
    service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip"
    service.beta.kubernetes.io/aws-load-balancer-target-group-attributes: stickiness.enabled=true,stickiness.type=source_ip,deregistration_delay.timeout_seconds=120,deregistration_delay.connection_termination.enabled=true,preserve_client_ip.enabled=true # 源ip关联(源ip亲和),延迟注销时间120s, 注销时连接终止,保留客户端ip,因为 aws-load-balancer-nlb-target-type: "ip"
    service.beta.kubernetes.io/aws-load-balancer-subnets: subnet-0d81fb125f688f939, subnet-0ba764e6283d8ac38
    service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" #外网
    service.beta.kubernetes.io/load-balancer-source-ranges: 13.235.32.25/32 #新的北京子网的nat网关ip pfgc-devops-natgateway  #固定配置  #在安全组中手动增加白名单cidr和服务端口配

    service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: Techteam=CICI, Application=Ops, IgnoreCostAdvisor=true, ignoreoldgen=true, Name=pfgc-ops-template-nlb, BusinessUnit=CICI, Owner=roy.xiao, Environment=Ops
    # tls 配置
    service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:ap-south-1:<ACCOUNT_ID>:certificate/582fb25b-d603-49f4-9970-8b80d8157370
    service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: ELBSecurityPolicy-2016-08
    service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443"

  name: template-web
  namespace: ops
spec:
  type: LoadBalancer
  externalTrafficPolicy: Local
  ports:
    - port: 80
      targetPort: 80
      protocol: TCP
      name: template-80
    - port: 443
      targetPort: 443
      protocol: TCP
      name: template-443
  selector:
    app: template


---
#内网配置
apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-name: pfgc-ops-template-nlb
    service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
    service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true'
    service.beta.kubernetes.io/aws-load-balancer-type: "external" #使用aws loadbalance controller,否则 name 无效
    service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip"
    service.beta.kubernetes.io/aws-load-balancer-target-group-attributes: stickiness.enabled=true,stickiness.type=source_ip,deregistration_delay.timeout_seconds=120,deregistration_delay.connection_termination.enabled=true,preserve_client_ip.enabled=true # 源ip关联(session 源ip亲和),延迟注销时间120s, 注销时连接终止,保留客户端ip,因为 aws-load-balancer-nlb-target-type: "ip"
    service.beta.kubernetes.io/aws-load-balancer-subnets: subnet-03a9f55b57af8553a, subnet-0b1b06f28613696f1
    service.beta.kubernetes.io/aws-load-balancer-scheme: "internal" #外网
    service.beta.kubernetes.io/load-balancer-source-ranges: 35.154.134.76/32 #pfgc-ops-natgateway  #固定安全组,其他环境就是其他环境的natgateway,考虑端口安全,统一设计;具体的白
  单需要在安全组中手动指定
    service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: Techteam=CICI, Application=Ops, IgnoreCostAdvisor=true, ignoreoldgen=true, Name=pfgc-ops-template-nlb, BusinessUnit=CICI, Owner=roy.xiao, Environment=Ops
    # tls 配置
    service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:ap-south-1:<ACCOUNT_ID>:certificate/582fb25b-d603-49f4-9970-8b80d8157370
    service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: ELBSecurityPolicy-2016-08
    service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443"

  name: template-web
  namespace: ops
spec:
  type: LoadBalancer
  externalTrafficPolicy: Local
  ports:
    - port: 80
      targetPort: 80
      protocol: TCP
      name: template-80
    - port: 443
      targetPort: 443
      protocol: TCP
      name: template-443
  selector:
    app: template%
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: poker-gateway-new
  namespace: poker
  annotations:
    kubernetes.io/ingress.class: "alb"
    alb.ingress.kubernetes.io/healthcheck-path: /api/health/check
    alb.ingress.kubernetes.io/load-balancer-name: AGT-Poker-Prod-Internet-ALB-New
    alb.ingress.kubernetes.io/shield-advanced-protection: 'true'
    alb.ingress.kubernetes.io/healthcheck-port: '9008'
    alb.ingress.kubernetes.io/scheme: internet-facing
    alb.ingress.kubernetes.io/target-type: ip
    alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS": 443}]'
    alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-2016-08
    alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:ap-south-1:<ACCOUNT_ID>:certificate/582fb25b-d603-49f4-9970-8b80d8157370
    alb.ingress.kubernetes.io/tags: Environment=Production,BusinessUnit=CICI,Name=CICI-Poker,Owner=Klaus.ma,Techteam=CICI-China,Application=Poker,Module=Poker-server,SubModule=Poker-core,IgnoreCostAdvisor=true,public-sg=true
    alb.ingress.kubernetes.io/actions.forward-poker-gateway: >
        {"type":"forward","forwardConfig":{"targetGroups":[{"serviceName":"poker-gateway-1","servicePort":9008,"weight":50},{"serviceName":"poker-gateway-2","servicePort":9008,"weight":50}],"targetGroupStickinessConfig":{"enabled":true,"durationSeconds":300}}}
spec:
  rules:
  - host: poker.cici.com
    http:
      paths:
        - path: /ws*
          backend:
            serviceName: forward-poker-gateway
            servicePort: use-annotation
报错

awk eks 支持 action https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/ 可做以灰度发布,接口限制等

根据您的报错信息,当前安全组已经达到安全组规则上限,默认每个安全组的入 站或出站规则数量为60[1],而您先前也曾经提交調整,当前数量为 100。

建议您可以考虑透过配置以下 AWS Load Balancer Controller annotation [2] 取消自动更新相应安全组规则,此部分建议需要依据您后端实例开放端口,自行 设置安全组。如果禁用NLB 安全组规则的自动管理,则需要手动将适当的入口规 则添加到 EC2 实例或ENI 安全组,以允许访问流量和健康检查端口。

service.beta.kubernetes.io/aws-load-balancer-manage-backend-security-group-rules : "false"

希望以上的说明对可以有些帮助,如果您配置上述 annotation后仍遭遇到相同 报错 ,欢迎让我们知道,我们很乐意协助您。

参考文档: [1] Amazon VPC 配额 - 安全组 - https://docs.aws.amazon.com/zh_cn/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-security-groups [2] https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/service/annotations/#manage-backend-sg-rules

腾讯云ingress service clb

# Source: gitlab/charts/nginx-ingress/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
  annotations:
    service.kubernetes.io/qcloud-loadbalancer-internal-subnetid: "subnet-lw2qzcl9"
  labels:
    app: nginx-ingress
    chart: nginx-ingress-3.11.1
    release: gitlab-test-xcw
    heritage: Helm

    component: "controller"
    helm.sh/chart: nginx-ingress-3.11.1
    app.kubernetes.io/version: "0.41.2"
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: controller
  name: gitlab-test-xcw-nginx-ingress-controller
spec:
  type: LoadBalancer
  loadBalancerIP: 10.22.0.128
  externalTrafficPolicy: Local
  ports:
    - name: http
      port: 80
      protocol: TCP
      targetPort: http
    - name: https
      port: 443
      protocol: TCP
      targetPort: https
    - name: gitlab-shell
      port: 22
      protocol: TCP
      targetPort: gitlab-shell
  selector:
    app: nginx-ingress
    release: gitlab-test-xcw
    component: "controller"

以 token 生成 kubeconfig

设置 rbac 只读权限

[jasper.xu@ip-10-204-9-241 dashboard]$ cat account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: cluster-readonly
  namespace: kube-system
#老方法
#secrets:
#- name: cluster-readonly
---
#v1.29 手动为 ServiceAccount 创建长期有效的 API 令牌
apiVersion: v1
kind: Secret
metadata:
  name: cluster-readonly
  namespace: kube-system
  annotations:
    kubernetes.io/service-account.name: "cluster-readonly"
type: kubernetes.io/service-account-token

[jasper.xu@ip-10-204-9-241 dashboard]$ cat readonly.yaml
#apiVersion: v1
#kind: ServiceAccount
#metadata:
#  name: cluster-readonly
#  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: cluster-readonly
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - pods/attach
  - pods/exec
  - pods/portforward
  - pods/proxy
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - pods/attach
  - pods/exec
  verbs:
  - create
- apiGroups:
  - ""
  resources:
  - configmaps
  - endpoints
  - persistentvolumeclaims
  - replicationcontrollers
  - replicationcontrollers/scale
  - serviceaccounts
  - services
  - services/proxy
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - bindings
  - events
  - limitranges
  - namespaces/status
  - pods/log
  - pods/status
  - replicationcontrollers/status
  - resourcequotas
  - resourcequotas/status
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - namespaces
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - apps
  resources:
  - deployments
  - deployments/rollback
  - deployments/scale
  - statefulsets
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - autoscaling
  resources:
  - horizontalpodautoscalers
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - batch
  resources:
  - cronjobs
  - jobs
  - scheduledjobs
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - extensions
  resources:
  - daemonsets
  - deployments
  - ingresses
  - replicasets
  verbs:
  - get
  - list
  - watch
- apiGroups: [""]
  resources: ["nodes"]
  verbs: ["get", "list", "watch"]
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses
  - ingressclasses
  verbs:
  - list
  - watch
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: cluster-readonly
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-readonly
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: develop:readonly
- kind: ServiceAccount
  name: cluster-readonly
  namespace: kube-system
alias gett='kubectl -n kube-system get secret cluster-readonly -o jsonpath={.data.token} | base64 -d'

获取

kubectl config set-context $(kubectl config current-context) --namespace  kube-system
#secret=$(kubectl get sa cluster-readonly -o json | jq -r .secrets[].name)  #v1.29之前
secret=cluster-readonly

# 获取ca
kubectl get secrets $secret  -o "jsonpath={.data['ca\.crt']}"
#kubectl get secret $secret -o json | jq -r '.data["ca.crt"]' | base64 -d > ca.crt

#获取token
#kubectl get secret $secret -o json | jq -r '.data["token"]' | base64 -d
kubectl get secret $secret -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d

生成 kubeconfig

kubectl config set-context $(kubectl config current-context) --namespace  kube-system
#secret=$(kubectl get sa cluster-readonly -o json | jq -r .secrets[].name)  #v1.29之前
secret=cluster-readonly

#获取token
user_token=$(kubectl -n kube-system get secret $secret -o jsonpath={".data.token"} | base64 -d)
server="https://66F42A859F2A1D83394D56B7F7018F92.sk1.ap-south-1.eks.amazonaws.com"

# set-cluster:设置一个集群项,
kubectl config set-cluster eks-prod \
     --insecure-skip-tls-verify=true \
     --server=$server \
     --kubeconfig=./config

# set-credentials 设置一个用户项
kubectl config set-credentials dashboard-readonly \
     --token="${user_token}" \
     --kubeconfig=./config

# 设置一个环境项,一个上下文
kubectl config set-context dashboard-readonly --cluster=eks-prod \
    --user=dashboard-readonly \
    --kubeconfig=./config


# 使用某个环境当做默认环境
kubectl config use-context dashboard-readonly --kubeconfig=./config

# 验证
kubectl --kubeconfig=./config get po -n kube-system

config 文件

apiVersion: v1
clusters:
- cluster:
    insecure-skip-tls-verify: true
    server: https://66F42A859F2A1D83394D56B7F7018F92.sk1.ap-south-1.eks.amazonaws.com
  name: eks-prod
contexts:
- context:
    cluster: eks-prod
    user: dashboard-readonly
  name: dashboard-readonly
current-context: dashboard-readonly
kind: Config
preferences: {}
users:
- name: dashboard-readonly
  user:
    token: eyJhbGciOiJSUzI1NiIsImtpZCI6IkwwLUpvOVZyVE9pUWVfSl96VWJOUExrZ01tU3pEbDRFaFhxZTBmYVY2Y0EifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJjbHVzdGVyLXJlYWRvbmx5LXRva2VuLXpjc2pqIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImNsdXN0ZXItcmVhZG9ubHkiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI0MmI4MDk2Yi1jOWZlLTRhODEtYTU3Mi1lYjE1OTJhZDY3MmEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06Y2x1c3Rlci1yZWFkb25seSJ9.UYMY6VFf9UjqIGRBsjp1o2ttCIAHbExo_y3TmThecUCYgMmNjEagRbaAFPxRHLiUj5PVBRdd6gtYnW5YCvQ3u91OothCjQUDDudb8ldYqkHKRoKYUO6xJ3VoRaV4MG7nUwlXyc6pX8Szaj0ZXbkBY4Uqywf2dsJdiaLgEgCPV-dldGdyedUuSWZNUwI1eCd3Hycjjxlc331-1oOSVFj2VmLmL3tkeIV-hIVhIamc7tgE0hnTo-WuT8oCTIJnsH4t6dF1Q4MidG-7rRypsKi9rCBKbAiiG6lXD7B2DN8LaO_bdNBuQh7N1IWAJPshDq2OmsVhcMuT2nsqGY9Ta0SZUQ

参考: Kubernetes生成kubeconfig https://bbs.huaweicloud.com/blogs/344996 https://gist.github.com/mreferre/6aae10ddc313dd28b72bdc9961949978

alias 别名

alias kcn='kubectl config set-context $(kubectl config current-context) --namespace'
alias kgp='kubectl get pods'
alias kf='kubectl apply -f'
alias cdg='cd /data/pfg-prod-k8s/'
alias cp='cp -i'
alias egrep='egrep --color=auto'
alias fgrep='fgrep --color=auto'
alias grep='grep --color=auto'
alias k='kubectl'
alias kaf='kubectl apply -f'
alias kca='f(){ kubectl "$@" --all-namespaces;  unset -f f; }; f'
alias kccc='kubectl config current-context'
alias kcd='kubectl config set-context $(kubectl config current-context) --namespace'
alias kcdc='kubectl config delete-context'
alias kcg='k config get-contexts'
alias kcgc='kubectl config get-contexts'
alias kcn='kubectl config set-context $(kubectl config current-context) --namespace'
alias kcp='kubectl cp'
alias kcsc='kubectl config set-context'
alias kcu='k config use-context'
alias kcuc='kubectl config use-context'
alias kd='kubectl describe'
alias kdcm='kubectl describe configmap'
alias kdd='kubectl describe deployment'
alias kdel='kubectl delete'
alias kdelcm='kubectl delete configmap'
alias kdeld='kubectl delete deployment'
alias kdelf='kubectl delete -f'
alias kdeli='kubectl delete ingress'
alias kdelno='kubectl delete node'
alias kdelns='kubectl delete namespace'
alias kdelp='kubectl delete pods'
alias kdelpvc='kubectl delete pvc'
alias kdels='kubectl delete svc'
alias kdelsec='kubectl delete secret'
alias kdelss='kubectl delete statefulset'
alias kdi='kubectl describe ingress'
alias kdno='kubectl describe node'
alias kdns='kubectl describe namespace'
alias kdp='kubectl describe pods'
alias kdpvc='kubectl describe pvc'
alias kds='kubectl describe svc'
alias kdsec='kubectl describe secret'
alias kdss='kubectl describe statefulset'
alias ke='kubectl edit'
alias kecm='kubectl edit configmap'
alias ked='kubectl edit deployment'
alias kei='kubectl edit ingress'
alias keno='kubectl edit node'
alias kens='kubectl edit namespace'
alias kep='kubectl edit pods'
alias kepvc='kubectl edit pvc'
alias kes='kubectl edit svc'
alias kess='kubectl edit statefulset'
alias keti='kubectl exec -ti'
alias kg='kubectl get'
alias kga='kubectl get all'
alias kgaa='kubectl get all --all-namespaces'
alias kgap='kubectl get pods --all-namespaces'
alias kgc='kubectl get configmap'
alias kgcm='kubectl get configmaps'
alias kgd='kubectl get deployment'
alias kgdw='kgd --watch'
alias kgdwide='kgd -o wide'
alias kgi='kubectl get ingress'
alias kgm='kubectl get servicemonitor'
alias kgn='kubectl get namespace'
alias kgno='kubectl get nodes'
alias kgns='kubectl get namespaces'
alias kgp='kubectl get pods'
alias kgpa='kubectl get pods --all-namespaces'
alias kgpl='kgp -l'
alias kgpvc='kubectl get pvc'
alias kgpvcw='kgpvc --watch'
alias kgpw='kgp --watch'
alias kgpwide='kgp -o wide'
alias kgrs='kubectl get rs'
alias kgs='kubectl get service'
alias kgsec='kubectl get secret'
alias kgss='kubectl get statefulset'
alias kgssw='kgss --watch'
alias kgsswide='kgss -o wide'
alias kgsw='kgs --watch'
alias kgswide='kgs -o wide'
alias kl='kubectl logs'
alias klf='kubectl logs -f'
alias kp='kubectl port-forward'
alias krh='kubectl rollout history'
alias krsd='kubectl rollout status deployment'
alias krsss='kubectl rollout status statefulset'
alias kru='kubectl rollout undo'
alias ksd='kubectl scale deployment'
alias ksss='kubectl scale statefulset'
alias kx='kubectl exec'
alias l.='ls -d .* --color=auto'
alias ll='ls -l --color=auto'
alias ls='ls --color=auto'
alias mv='mv -i'
alias pfg='cd /data/arvin/pfg-prod-k8s'
alias pull_prod_k8s='git pull origin master'
alias rm='rm -i'
alias which='alias | /usr/bin/which --tty-only --read-alias --show-dot --show-tilde'

参考文档