Infra chart update 06/1006/1
authorSzekeres, Balazs (Nokia - HU/Budapest) <balazs.szekeres@nokia.com>
Mon, 17 Jun 2019 10:22:10 +0000 (12:22 +0200)
committerSzekeres, Balazs (Nokia - HU/Budapest) <balazs.szekeres@nokia.com>
Mon, 17 Jun 2019 10:22:10 +0000 (12:22 +0200)
Elasticsearch charts updated for ES verion 7.1.0
Promethetus chart updated (TLS error)

Change-Id: Id35f03568ed6aa03fc219dcc6c0bcf0c66ec0bfa
Signed-off-by: Szekeres, Balazs (Nokia - HU/Budapest) <balazs.szekeres@nokia.com>
SPECS/infra-charts.spec
infra-charts/templates/elasticsearch-configmap.yaml
infra-charts/templates/elasticsearch-data-statefulset.yaml
infra-charts/templates/elasticsearch-master-statefulset.yaml
infra-charts/templates/elasticsearch-service.yaml
infra-charts/templates/prometheus-cfg.yaml
infra-charts/values.yaml.j2

index 69c096e..bb2c1e2 100644 (file)
@@ -15,7 +15,7 @@
 %define COMPONENT infra-charts
 %define RPM_NAME caas-%{COMPONENT}
 %define RPM_MAJOR_VERSION 1.0.0
-%define RPM_MINOR_VERSION 5
+%define RPM_MINOR_VERSION 6
 
 Name:           %{RPM_NAME}
 Version:        %{RPM_MAJOR_VERSION}
index 3e78938..181c5dd 100644 (file)
@@ -25,23 +25,17 @@ metadata:
 data:
   elasticsearch.yml: |-
     cluster.name: {{ .Values.elasticsearch.cluster.name }}
-
     node.data: ${NODE_DATA:true}
     node.master: ${NODE_MASTER:true}
     node.ingest: ${NODE_INGEST:true}
-    node.name: ${HOSTNAME}
-
+    cluster.initial_master_nodes: ${CLUSTER_INITIAL_MASTER_NODES}
     network.host: 0.0.0.0
     bootstrap.memory_lock: ${BOOTSTRAP_MEMORY_LOCK:false}
-
     discovery:
       zen:
-        ping.unicast.hosts: ${DISCOVERY_SERVICE:}
-        minimum_master_nodes: ${MINIMUM_MASTER_NODES:2}
-
+        minimum_master_nodes: ${MINIMUM_MASTER_NODES}
+      seed_hosts: ${DISCOVERY_SERVICE}
     processors: ${PROCESSORS:}
-
-    # avoid split-brain w/ a minimum consensus of 3 masters plus 3 data nodes
     gateway.expected_master_nodes: ${EXPECTED_MASTER_NODES:3}
     gateway.expected_data_nodes: ${EXPECTED_DATA_NODES:3}
     gateway.recover_after_time: ${RECOVER_AFTER_TIME:5m}
index 1343bda..d49aa3e 100644 (file)
@@ -52,25 +52,33 @@ spec:
       containers:
       - name: elasticsearch-data
         env:
+        - name: node.name
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.name
+        - name: CLUSTER_INITIAL_MASTER_NODES
+          value: '{{ .Values.elasticsearch.clustermasternodes }}'
         - name: NODE_MASTER
           value: "false"
+        - name: NODE_INGEST
+          value: "true"
+        - name: NODE_DATA
+          value: "true"
         - name: DISCOVERY_SERVICE
-          value: elasticsearch-data.kube-system.svc.{{ .Values.domain }}
+          value: {{ .Values.elasticsearch.url }}
         - name: PROCESSORS
           valueFrom:
             resourceFieldRef:
               resource: limits.cpu
         - name: ES_JAVA_OPTS
           value: "-Xms{{ .Values.elasticsearch.java_heap_request }} -Xmx{{ .Values.elasticsearch.java_heap_max }} -Dmapper.allow_dots_in_name=true"
-        {{- range $key, $value :=  .Values.elasticsearch.cluster.env }}
+        {{- range $key, $value :=  .Values.elasticsearch.env }}
         - name: {{ $key }}
           value: {{ $value | quote }}
         {{- end }}
         image: {{ .Values.elasticsearch.image_name }}
         command: ["/usr/bin/supervisord","-n","-c","/etc/supervisord.conf"]
-        ports:
-        - containerPort: {{ .Values.elasticsearch.port_for_cluster_communication }}
-          name: transport
         resources:
           limits:
             cpu: "{{ .Values.elasticsearch.cpu_limit }}"
index 1f553fa..d384405 100644 (file)
@@ -52,13 +52,23 @@ spec:
       containers:
       - name: elasticsearch-master
         env:
+        - name: node.name
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: CLUSTER_INITIAL_MASTER_NODES
+          value: '{{ .Values.elasticsearch.clustermasternodes }}'
+        - name: NODE_MASTER
+          value: "true"
+        - name: NODE_INGEST
+          value: "false"
         - name: NODE_DATA
           value: "false"
         - name: DISCOVERY_SERVICE
-          value: elasticsearch-data.kube-system.svc.{{ .Values.domain }}
+          value: {{ .Values.elasticsearch.url }}
         - name: ES_JAVA_OPTS
           value: "-Xms{{ .Values.elasticsearch.java_heap_request }} -Xmx{{ .Values.elasticsearch.java_heap_max }} -Dmapper.allow_dots_in_name=true"
-        {{- range $key, $value :=  .Values.elasticsearch.cluster.env }}
+        {{- range $key, $value :=  .Values.elasticsearch.env }}
         - name: {{ $key }}
           value: {{ $value | quote }}
         {{- end }}
@@ -74,11 +84,6 @@ spec:
           initialDelaySeconds: 5
         image: {{ .Values.elasticsearch.image_name }}
         command: ["/usr/bin/supervisord","-n","-c","/etc/supervisord.conf"]
-        ports:
-        - containerPort: {{ .Values.elasticsearch.port_for_cluster_communication }}
-          name: transport
-        - containerPort: {{ .Values.elasticsearch.port }}
-          name: http
         volumeMounts:
         - name: time-mount
           mountPath: /etc/localtime
index 5e70e20..430cbcf 100644 (file)
@@ -25,8 +25,12 @@ metadata:
   namespace: kube-system
 spec:
   ports:
+    - port: {{ .Values.elasticsearch.port }}
+      name: http
+      protocol: TCP
     - port: {{ .Values.elasticsearch.port_for_cluster_communication }}
-      targetPort: transport
+      name: node-to-node
+      protocol: TCP
   selector:
     app: elasticsearch-data
     component: "{{ .Values.elasticsearch.data.name }}"
@@ -42,7 +46,11 @@ metadata:
 spec:
   ports:
     - port: {{ .Values.elasticsearch.port }}
-      targetPort: http
+      name: http
+      protocol: TCP
+    - port: {{ .Values.elasticsearch.port_for_cluster_communication }}
+      name: node-to-node
+      protocol: TCP
   selector:
     app: elasticsearch-master
     component: "{{ .Values.elasticsearch.master.name }}"
index 7a8b559..214c5f3 100644 (file)
@@ -81,84 +81,6 @@ data:
         action: keep
         regex: default;kubernetes;https
 
-    # Scrape config for nodes (kubelet).
-    #
-    # Rather than connecting directly to the node, the scrape is proxied though the
-    # Kubernetes apiserver.  This means it will work if Prometheus is running out of
-    # cluster, or can't connect to nodes for some other reason (e.g. because of
-    # firewalling).
-    - job_name: 'kubernetes-nodes'
-
-      # Default to scraping over https. If required, just disable this or change to
-      # `http`.
-      scheme: https
-
-      # This TLS & bearer token file config is used to connect to the actual scrape
-      # endpoints for cluster components. This is separate to discovery auth
-      # configuration because discovery & scraping are two separate concerns in
-      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
-      # the cluster. Otherwise, more config options have to be provided within the
-      # <kubernetes_sd_config>.
-      tls_config:
-        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-      kubernetes_sd_configs:
-      - role: node
-
-      relabel_configs:
-      - action: labelmap
-        regex: __meta_kubernetes_node_label_(.+)
-      - target_label: __address__
-        replacement: kubernetes.default.svc:443
-      - source_labels: [__meta_kubernetes_node_name]
-        regex: (.+)
-        target_label: __metrics_path__
-        replacement: /api/v1/nodes/${1}/proxy/metrics
-
-    # Scrape config for Kubelet cAdvisor.
-    #
-    # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
-    # (those whose names begin with 'container_') have been removed from the
-    # Kubelet metrics endpoint.  This job scrapes the cAdvisor endpoint to
-    # retrieve those metrics.
-    #
-    # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
-    # HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
-    # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
-    # the --cadvisor-port=0 Kubelet flag).
-    #
-    # This job is not necessary and should be removed in Kubernetes 1.6 and
-    # earlier versions, or it will cause the metrics to be scraped twice.
-    - job_name: 'kubernetes-cadvisor'
-
-      # Default to scraping over https. If required, just disable this or change to
-      # `http`.
-      scheme: https
-
-      # This TLS & bearer token file config is used to connect to the actual scrape
-      # endpoints for cluster components. This is separate to discovery auth
-      # configuration because discovery & scraping are two separate concerns in
-      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
-      # the cluster. Otherwise, more config options have to be provided within the
-      # <kubernetes_sd_config>.
-      tls_config:
-        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-      kubernetes_sd_configs:
-      - role: node
-
-      relabel_configs:
-      - action: labelmap
-        regex: __meta_kubernetes_node_label_(.+)
-      - target_label: __address__
-        replacement: kubernetes.default.svc:443
-      - source_labels: [__meta_kubernetes_node_name]
-        regex: (.+)
-        target_label: __metrics_path__
-        replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
-
     # Scrape config for service endpoints.
     #
     # The relabeling allows the actual service scrape endpoint to be configured
index e03794c..d87b6f6 100644 (file)
@@ -60,7 +60,13 @@ cpupooler:
     {{ ansible_local['cpu_pooler_config'] | default([]) | to_nice_yaml | indent(4) }}
 
 elasticsearch:
+{% set clustermasternodes = [] -%}
+{%- for masternumber in range(groups['caas_master']|length|int) -%}
+{%- if clustermasternodes.append('elasticsearch-master-' + (masternumber|string)) -%}{%- endif -%}
+{%- endfor %}
+  clustermasternodes: '{{ clustermasternodes|join(", ") }}'
   required: true
+  url: {{ caas.elasticsearch_url }}
   port: {{ caas.elasticsearch_port }}
   image_name: {{ container_image_names | select('search', '/elasticsearch') | list | last }}
   memory_limit: {{ caas.elasticsearch_memory_limit }}
@@ -73,13 +79,14 @@ elasticsearch:
   cluster:
     name: "elasticsearch"
   env:
-    MINIMUM_MASTER_NODES: "3"
+{% set minimummasternodes = ((((groups['caas_master']|length|int)/2)+1)|round(0, 'floor'))|int %}
+    MINIMUM_MASTER_NODES: {{ minimummasternodes }}
   master:
     name: elasticsearch-master
-    replicas: 3
+    replicas: "{{ groups['caas_master']|length|int }}"
   data:
     name: elasticsearch-data
-    replicas: 3
+    replicas: "{{ groups['caas_master']|length|int }}"
     terminationGracePeriodSeconds: 3600
 
 sriovdp: