%define COMPONENT infra-charts
%define RPM_NAME caas-%{COMPONENT}
%define RPM_MAJOR_VERSION 1.0.0
-%define RPM_MINOR_VERSION 5
+%define RPM_MINOR_VERSION 6
Name: %{RPM_NAME}
Version: %{RPM_MAJOR_VERSION}
data:
elasticsearch.yml: |-
cluster.name: {{ .Values.elasticsearch.cluster.name }}
-
node.data: ${NODE_DATA:true}
node.master: ${NODE_MASTER:true}
node.ingest: ${NODE_INGEST:true}
- node.name: ${HOSTNAME}
-
+ cluster.initial_master_nodes: ${CLUSTER_INITIAL_MASTER_NODES}
network.host: 0.0.0.0
bootstrap.memory_lock: ${BOOTSTRAP_MEMORY_LOCK:false}
-
discovery:
zen:
- ping.unicast.hosts: ${DISCOVERY_SERVICE:}
- minimum_master_nodes: ${MINIMUM_MASTER_NODES:2}
-
+ minimum_master_nodes: ${MINIMUM_MASTER_NODES}
+ seed_hosts: ${DISCOVERY_SERVICE}
processors: ${PROCESSORS:}
-
- # avoid split-brain w/ a minimum consensus of 3 masters plus 3 data nodes
gateway.expected_master_nodes: ${EXPECTED_MASTER_NODES:3}
gateway.expected_data_nodes: ${EXPECTED_DATA_NODES:3}
gateway.recover_after_time: ${RECOVER_AFTER_TIME:5m}
containers:
- name: elasticsearch-data
env:
+ - name: node.name
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: CLUSTER_INITIAL_MASTER_NODES
+ value: '{{ .Values.elasticsearch.clustermasternodes }}'
- name: NODE_MASTER
value: "false"
+ - name: NODE_INGEST
+ value: "true"
+ - name: NODE_DATA
+ value: "true"
- name: DISCOVERY_SERVICE
- value: elasticsearch-data.kube-system.svc.{{ .Values.domain }}
+ value: {{ .Values.elasticsearch.url }}
- name: PROCESSORS
valueFrom:
resourceFieldRef:
resource: limits.cpu
- name: ES_JAVA_OPTS
value: "-Xms{{ .Values.elasticsearch.java_heap_request }} -Xmx{{ .Values.elasticsearch.java_heap_max }} -Dmapper.allow_dots_in_name=true"
- {{- range $key, $value := .Values.elasticsearch.cluster.env }}
+ {{- range $key, $value := .Values.elasticsearch.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
image: {{ .Values.elasticsearch.image_name }}
command: ["/usr/bin/supervisord","-n","-c","/etc/supervisord.conf"]
- ports:
- - containerPort: {{ .Values.elasticsearch.port_for_cluster_communication }}
- name: transport
resources:
limits:
cpu: "{{ .Values.elasticsearch.cpu_limit }}"
containers:
- name: elasticsearch-master
env:
+ - name: node.name
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: CLUSTER_INITIAL_MASTER_NODES
+ value: '{{ .Values.elasticsearch.clustermasternodes }}'
+ - name: NODE_MASTER
+ value: "true"
+ - name: NODE_INGEST
+ value: "false"
- name: NODE_DATA
value: "false"
- name: DISCOVERY_SERVICE
- value: elasticsearch-data.kube-system.svc.{{ .Values.domain }}
+ value: {{ .Values.elasticsearch.url }}
- name: ES_JAVA_OPTS
value: "-Xms{{ .Values.elasticsearch.java_heap_request }} -Xmx{{ .Values.elasticsearch.java_heap_max }} -Dmapper.allow_dots_in_name=true"
- {{- range $key, $value := .Values.elasticsearch.cluster.env }}
+ {{- range $key, $value := .Values.elasticsearch.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
initialDelaySeconds: 5
image: {{ .Values.elasticsearch.image_name }}
command: ["/usr/bin/supervisord","-n","-c","/etc/supervisord.conf"]
- ports:
- - containerPort: {{ .Values.elasticsearch.port_for_cluster_communication }}
- name: transport
- - containerPort: {{ .Values.elasticsearch.port }}
- name: http
volumeMounts:
- name: time-mount
mountPath: /etc/localtime
namespace: kube-system
spec:
ports:
+ - port: {{ .Values.elasticsearch.port }}
+ name: http
+ protocol: TCP
- port: {{ .Values.elasticsearch.port_for_cluster_communication }}
- targetPort: transport
+ name: node-to-node
+ protocol: TCP
selector:
app: elasticsearch-data
component: "{{ .Values.elasticsearch.data.name }}"
spec:
ports:
- port: {{ .Values.elasticsearch.port }}
- targetPort: http
+ name: http
+ protocol: TCP
+ - port: {{ .Values.elasticsearch.port_for_cluster_communication }}
+ name: node-to-node
+ protocol: TCP
selector:
app: elasticsearch-master
component: "{{ .Values.elasticsearch.master.name }}"
action: keep
regex: default;kubernetes;https
- # Scrape config for nodes (kubelet).
- #
- # Rather than connecting directly to the node, the scrape is proxied though the
- # Kubernetes apiserver. This means it will work if Prometheus is running out of
- # cluster, or can't connect to nodes for some other reason (e.g. because of
- # firewalling).
- - job_name: 'kubernetes-nodes'
-
- # Default to scraping over https. If required, just disable this or change to
- # `http`.
- scheme: https
-
- # This TLS & bearer token file config is used to connect to the actual scrape
- # endpoints for cluster components. This is separate to discovery auth
- # configuration because discovery & scraping are two separate concerns in
- # Prometheus. The discovery auth config is automatic if Prometheus runs inside
- # the cluster. Otherwise, more config options have to be provided within the
- # <kubernetes_sd_config>.
- tls_config:
- ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
- kubernetes_sd_configs:
- - role: node
-
- relabel_configs:
- - action: labelmap
- regex: __meta_kubernetes_node_label_(.+)
- - target_label: __address__
- replacement: kubernetes.default.svc:443
- - source_labels: [__meta_kubernetes_node_name]
- regex: (.+)
- target_label: __metrics_path__
- replacement: /api/v1/nodes/${1}/proxy/metrics
-
- # Scrape config for Kubelet cAdvisor.
- #
- # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
- # (those whose names begin with 'container_') have been removed from the
- # Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
- # retrieve those metrics.
- #
- # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
- # HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
- # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
- # the --cadvisor-port=0 Kubelet flag).
- #
- # This job is not necessary and should be removed in Kubernetes 1.6 and
- # earlier versions, or it will cause the metrics to be scraped twice.
- - job_name: 'kubernetes-cadvisor'
-
- # Default to scraping over https. If required, just disable this or change to
- # `http`.
- scheme: https
-
- # This TLS & bearer token file config is used to connect to the actual scrape
- # endpoints for cluster components. This is separate to discovery auth
- # configuration because discovery & scraping are two separate concerns in
- # Prometheus. The discovery auth config is automatic if Prometheus runs inside
- # the cluster. Otherwise, more config options have to be provided within the
- # <kubernetes_sd_config>.
- tls_config:
- ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
- kubernetes_sd_configs:
- - role: node
-
- relabel_configs:
- - action: labelmap
- regex: __meta_kubernetes_node_label_(.+)
- - target_label: __address__
- replacement: kubernetes.default.svc:443
- - source_labels: [__meta_kubernetes_node_name]
- regex: (.+)
- target_label: __metrics_path__
- replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
-
# Scrape config for service endpoints.
#
# The relabeling allows the actual service scrape endpoint to be configured
{{ ansible_local['cpu_pooler_config'] | default([]) | to_nice_yaml | indent(4) }}
elasticsearch:
+{% set clustermasternodes = [] -%}
+{%- for masternumber in range(groups['caas_master']|length|int) -%}
+{%- if clustermasternodes.append('elasticsearch-master-' + (masternumber|string)) -%}{%- endif -%}
+{%- endfor %}
+ clustermasternodes: '{{ clustermasternodes|join(", ") }}'
required: true
+ url: {{ caas.elasticsearch_url }}
port: {{ caas.elasticsearch_port }}
image_name: {{ container_image_names | select('search', '/elasticsearch') | list | last }}
memory_limit: {{ caas.elasticsearch_memory_limit }}
cluster:
name: "elasticsearch"
env:
- MINIMUM_MASTER_NODES: "3"
+{% set minimummasternodes = ((((groups['caas_master']|length|int)/2)+1)|round(0, 'floor'))|int %}
+ MINIMUM_MASTER_NODES: {{ minimummasternodes }}
master:
name: elasticsearch-master
- replicas: 3
+ replicas: "{{ groups['caas_master']|length|int }}"
data:
name: elasticsearch-data
- replicas: 3
+ replicas: "{{ groups['caas_master']|length|int }}"
terminationGracePeriodSeconds: 3600
sriovdp: