提交 8dc1e72e 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 0df696c5
...@@ -662,7 +662,7 @@ Crossplane runs inside your Kubernetes cluster and supports secure connectivity ...@@ -662,7 +662,7 @@ Crossplane runs inside your Kubernetes cluster and supports secure connectivity
:uninstall-successful="applications.elastic_stack.uninstallSuccessful" :uninstall-successful="applications.elastic_stack.uninstallSuccessful"
:uninstall-failed="applications.elastic_stack.uninstallFailed" :uninstall-failed="applications.elastic_stack.uninstallFailed"
:disabled="!helmInstalled" :disabled="!helmInstalled"
title-link="https://github.com/helm/charts/tree/master/stable/elastic-stack" title-link="https://gitlab.com/gitlab-org/charts/elastic-stack"
> >
<div slot="description"> <div slot="description">
<p> <p>
......
...@@ -14,4 +14,8 @@ export default [ ...@@ -14,4 +14,8 @@ export default [
path: '/success', path: '/success',
component: Success, component: Success,
}, },
{
path: '*',
redirect: HOME_ROUTE,
},
]; ];
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
module Clusters module Clusters
module Applications module Applications
class ElasticStack < ApplicationRecord class ElasticStack < ApplicationRecord
VERSION = '2.0.0' VERSION = '3.0.0'
ELASTICSEARCH_PORT = 9200 ELASTICSEARCH_PORT = 9200
...@@ -18,7 +18,11 @@ module Clusters ...@@ -18,7 +18,11 @@ module Clusters
default_value_for :version, VERSION default_value_for :version, VERSION
def chart def chart
'stable/elastic-stack' 'elastic-stack/elastic-stack'
end
def repository
'https://charts.gitlab.io'
end end
def install_command def install_command
...@@ -27,8 +31,9 @@ module Clusters ...@@ -27,8 +31,9 @@ module Clusters
version: VERSION, version: VERSION,
rbac: cluster.platform_kubernetes_rbac?, rbac: cluster.platform_kubernetes_rbac?,
chart: chart, chart: chart,
repository: repository,
files: files, files: files,
preinstall: migrate_to_2_script, preinstall: migrate_to_3_script,
postinstall: post_install_script postinstall: post_install_script
) )
end end
...@@ -50,7 +55,7 @@ module Clusters ...@@ -50,7 +55,7 @@ module Clusters
strong_memoize(:elasticsearch_client) do strong_memoize(:elasticsearch_client) do
next unless kube_client next unless kube_client
proxy_url = kube_client.proxy_url('service', 'elastic-stack-elasticsearch-client', ::Clusters::Applications::ElasticStack::ELASTICSEARCH_PORT, Gitlab::Kubernetes::Helm::NAMESPACE) proxy_url = kube_client.proxy_url('service', service_name, ::Clusters::Applications::ElasticStack::ELASTICSEARCH_PORT, Gitlab::Kubernetes::Helm::NAMESPACE)
Elasticsearch::Client.new(url: proxy_url) do |faraday| Elasticsearch::Client.new(url: proxy_url) do |faraday|
# ensures headers containing auth data are appended to original client options # ensures headers containing auth data are appended to original client options
...@@ -70,21 +75,33 @@ module Clusters ...@@ -70,21 +75,33 @@ module Clusters
end end
end end
def filebeat7? def chart_above_v2?
Gem::Version.new(version) >= Gem::Version.new('2.0.0') Gem::Version.new(version) >= Gem::Version.new('2.0.0')
end end
def chart_above_v3?
Gem::Version.new(version) >= Gem::Version.new('3.0.0')
end
private private
def service_name
chart_above_v3? ? 'elastic-stack-elasticsearch-master' : 'elastic-stack-elasticsearch-client'
end
def pvc_selector
chart_above_v3? ? "app=elastic-stack-elasticsearch-master" : "release=elastic-stack"
end
def post_install_script def post_install_script
[ [
"timeout -t60 sh /data/helm/elastic-stack/config/wait-for-elasticsearch.sh http://elastic-stack-elasticsearch-client:9200" "timeout -t60 sh /data/helm/elastic-stack/config/wait-for-elasticsearch.sh http://elastic-stack-elasticsearch-master:9200"
] ]
end end
def post_delete_script def post_delete_script
[ [
Gitlab::Kubernetes::KubectlCmd.delete("pvc", "--selector", "release=elastic-stack") Gitlab::Kubernetes::KubectlCmd.delete("pvc", "--selector", pvc_selector, "--namespace", Gitlab::Kubernetes::Helm::NAMESPACE)
] ]
end end
...@@ -92,25 +109,19 @@ module Clusters ...@@ -92,25 +109,19 @@ module Clusters
cluster&.kubeclient&.core_client cluster&.kubeclient&.core_client
end end
def migrate_to_2_script def migrate_to_3_script
# Updating the chart to 2.0.0 includes an update of the filebeat chart from 1.7.0 to 3.1.1 https://github.com/helm/charts/pull/21640 return [] if !updating? || chart_above_v3?
# This includes the following commit that changes labels on the filebeat deployment https://github.com/helm/charts/commit/9b009170686c6f4b202c36ceb1da4bb9ba15ddd0
# Unfortunately those fields are immutable, and we can't use `helm upgrade` to change them. We first have to delete the associated filebeat resources # Chart version 3.0.0 moves to our own chart at https://gitlab.com/gitlab-org/charts/elastic-stack
# The following pre-install command runs before updating to 2.0.0 and sets filebeat.enable=false so the filebeat deployment is deleted. # and is not compatible with pre-existing resources. We first remove them.
# Then the main install command re-creates them properly [
if updating? && !filebeat7? Gitlab::Kubernetes::Helm::DeleteCommand.new(
[ name: 'elastic-stack',
Gitlab::Kubernetes::Helm::InstallCommand.new( rbac: cluster.platform_kubernetes_rbac?,
name: 'elastic-stack', files: files
version: version, ).delete_command,
rbac: cluster.platform_kubernetes_rbac?, Gitlab::Kubernetes::KubectlCmd.delete("pvc", "--selector", "release=elastic-stack", "--namespace", Gitlab::Kubernetes::Helm::NAMESPACE)
chart: chart, ]
files: files
).install_command + ' --set filebeat.enabled\\=false'
]
else
[]
end
end end
end end
end end
......
...@@ -11,7 +11,7 @@ class PlanLimits < ApplicationRecord ...@@ -11,7 +11,7 @@ class PlanLimits < ApplicationRecord
else else
# object.count >= limit value is slower than checking # object.count >= limit value is slower than checking
# if a record exists at the limit value - 1 position. # if a record exists at the limit value - 1 position.
object.limit(1).offset(read_attribute(limit_name) - 1).exists? object.offset(read_attribute(limit_name) - 1).exists?
end end
end end
......
...@@ -70,7 +70,7 @@ module PodLogs ...@@ -70,7 +70,7 @@ module PodLogs
client = cluster&.application_elastic_stack&.elasticsearch_client client = cluster&.application_elastic_stack&.elasticsearch_client
return error(_('Unable to connect to Elasticsearch')) unless client return error(_('Unable to connect to Elasticsearch')) unless client
filebeat7 = cluster.application_elastic_stack.filebeat7? chart_above_v2 = cluster.application_elastic_stack.chart_above_v2?
response = ::Gitlab::Elasticsearch::Logs::Lines.new(client).pod_logs( response = ::Gitlab::Elasticsearch::Logs::Lines.new(client).pod_logs(
namespace, namespace,
...@@ -80,7 +80,7 @@ module PodLogs ...@@ -80,7 +80,7 @@ module PodLogs
start_time: result[:start_time], start_time: result[:start_time],
end_time: result[:end_time], end_time: result[:end_time],
cursor: result[:cursor], cursor: result[:cursor],
filebeat7: filebeat7 chart_above_v2: chart_above_v2
) )
result.merge!(response) result.merge!(response)
......
...@@ -17,8 +17,7 @@ module Prometheus ...@@ -17,8 +17,7 @@ module Prometheus
steps :validate_variables, steps :validate_variables,
:add_params_to_result, :add_params_to_result,
:substitute_params, :substitute_params,
:substitute_ruby_variables, :substitute_variables
:substitute_liquid_variables
def initialize(environment, params = {}) def initialize(environment, params = {})
@environment, @params = environment, params.deep_dup @environment, @params = environment, params.deep_dup
...@@ -56,7 +55,7 @@ module Prometheus ...@@ -56,7 +55,7 @@ module Prometheus
success(result) success(result)
end end
def substitute_liquid_variables(result) def substitute_variables(result)
return success(result) unless query(result) return success(result) unless query(result)
result[:params][:query] = gsub(query(result), full_context) result[:params][:query] = gsub(query(result), full_context)
...@@ -64,24 +63,6 @@ module Prometheus ...@@ -64,24 +63,6 @@ module Prometheus
success(result) success(result)
end end
def substitute_ruby_variables(result)
return success(result) unless query(result)
# The % operator doesn't replace variables if the hash contains string
# keys.
result[:params][:query] = query(result) % predefined_context.symbolize_keys
success(result)
rescue TypeError, ArgumentError => exception
log_error(exception.message)
Gitlab::ErrorTracking.track_exception(exception, {
template_string: query(result),
variables: predefined_context
})
error(_('Malformed string'))
end
def gsub(string, context) def gsub(string, context)
# Search for variables of the form `{{variable}}` in the string and replace # Search for variables of the form `{{variable}}` in the string and replace
# them with their value. # them with their value.
...@@ -95,11 +76,11 @@ module Prometheus ...@@ -95,11 +76,11 @@ module Prometheus
end end
def predefined_context def predefined_context
@predefined_context ||= Gitlab::Prometheus::QueryVariables.call(@environment) Gitlab::Prometheus::QueryVariables.call(@environment).stringify_keys
end end
def full_context def full_context
@full_context ||= predefined_context.stringify_keys.reverse_merge(variables_hash) @full_context ||= predefined_context.reverse_merge(variables_hash)
end end
def variables def variables
......
---
title: Remove support for Ruby format variable interpolation (`%{variable}`) in custom
dashboards
merge_request: 31581
author:
type: removed
---
title: Move to supported Elastic helm charts
merge_request: 30528
author:
type: changed
---
title: Add expunge deleted messages option to mailroom
merge_request: 31531
author: Diego Louzán
type: added
...@@ -202,6 +202,9 @@ production: &base ...@@ -202,6 +202,9 @@ production: &base
# #
# log_path: log/mail_room_json.log # log_path: log/mail_room_json.log
# Whether to expunge (permanently remove) messages from the mailbox when they are deleted after delivery
expunge_deleted: false
## Build Artifacts ## Build Artifacts
artifacts: artifacts:
enabled: true enabled: true
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
:name: <%= config[:mailbox].to_json %> :name: <%= config[:mailbox].to_json %>
:delete_after_delivery: true :delete_after_delivery: true
:expunge_deleted: <%= config[:expunge_deleted].to_json %>
:delivery_method: sidekiq :delivery_method: sidekiq
:delivery_options: :delivery_options:
......
...@@ -182,6 +182,9 @@ gitlab_rails['incoming_email_start_tls'] = false ...@@ -182,6 +182,9 @@ gitlab_rails['incoming_email_start_tls'] = false
gitlab_rails['incoming_email_mailbox_name'] = "inbox" gitlab_rails['incoming_email_mailbox_name'] = "inbox"
# The IDLE command timeout. # The IDLE command timeout.
gitlab_rails['incoming_email_idle_timeout'] = 60 gitlab_rails['incoming_email_idle_timeout'] = 60
# Whether to expunge (permanently remove) messages from the mailbox when they are deleted after delivery
gitlab_rails['incoming_email_expunge_deleted'] = true
``` ```
Example for source installs: Example for source installs:
...@@ -214,6 +217,9 @@ incoming_email: ...@@ -214,6 +217,9 @@ incoming_email:
mailbox: "inbox" mailbox: "inbox"
# The IDLE command timeout. # The IDLE command timeout.
idle_timeout: 60 idle_timeout: 60
# Whether to expunge (permanently remove) messages from the mailbox when they are deleted after delivery
expunge_deleted: true
``` ```
#### Gmail #### Gmail
...@@ -249,6 +255,9 @@ gitlab_rails['incoming_email_start_tls'] = false ...@@ -249,6 +255,9 @@ gitlab_rails['incoming_email_start_tls'] = false
gitlab_rails['incoming_email_mailbox_name'] = "inbox" gitlab_rails['incoming_email_mailbox_name'] = "inbox"
# The IDLE command timeout. # The IDLE command timeout.
gitlab_rails['incoming_email_idle_timeout'] = 60 gitlab_rails['incoming_email_idle_timeout'] = 60
# Whether to expunge (permanently remove) messages from the mailbox when they are deleted after delivery
gitlab_rails['incoming_email_expunge_deleted'] = true
``` ```
Example for source installs: Example for source installs:
...@@ -281,6 +290,9 @@ incoming_email: ...@@ -281,6 +290,9 @@ incoming_email:
mailbox: "inbox" mailbox: "inbox"
# The IDLE command timeout. # The IDLE command timeout.
idle_timeout: 60 idle_timeout: 60
# Whether to expunge (permanently remove) messages from the mailbox when they are deleted after delivery
expunge_deleted: true
``` ```
#### Microsoft Exchange Server #### Microsoft Exchange Server
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
The SCIM API implements the [the RFC7644 protocol](https://tools.ietf.org/html/rfc7644). The SCIM API implements the [the RFC7644 protocol](https://tools.ietf.org/html/rfc7644).
CAUTION: **Caution:**
This API is for internal system use for connecting with a SCIM provider. While it can be used directly, it is subject to change without notice.
NOTE: **Note:** NOTE: **Note:**
[Group SSO](../user/group/saml_sso/index.md) must be enabled for the group. For more information, see [SCIM setup documentation](../user/group/saml_sso/scim_setup.md#requirements). [Group SSO](../user/group/saml_sso/index.md) must be enabled for the group. For more information, see [SCIM setup documentation](../user/group/saml_sso/scim_setup.md#requirements).
......
...@@ -57,6 +57,9 @@ See the [Rails guides](https://guides.rubyonrails.org/action_mailer_basics.html# ...@@ -57,6 +57,9 @@ See the [Rails guides](https://guides.rubyonrails.org/action_mailer_basics.html#
mailbox: "inbox" mailbox: "inbox"
# The IDLE command timeout. # The IDLE command timeout.
idle_timeout: 60 idle_timeout: 60
# Whether to expunge (permanently remove) messages from the mailbox when they are deleted after delivery
expunge_deleted: false
``` ```
As mentioned, the part after `+` is ignored, and this will end up in the mailbox for `gitlab-incoming@gmail.com`. As mentioned, the part after `+` is ignored, and this will end up in the mailbox for `gitlab-incoming@gmail.com`.
......
...@@ -505,20 +505,23 @@ Log data is automatically deleted after 30 days using [Curator](https://www.elas ...@@ -505,20 +505,23 @@ Log data is automatically deleted after 30 days using [Curator](https://www.elas
To enable log shipping: To enable log shipping:
1. Ensure your cluster contains at least 3 nodes of instance types larger than
`f1-micro`, `g1-small`, or `n1-standard-1`.
1. Navigate to **{cloud-gear}** **Operations > Kubernetes**. 1. Navigate to **{cloud-gear}** **Operations > Kubernetes**.
1. In **Kubernetes Cluster**, select a cluster. 1. In **Kubernetes Cluster**, select a cluster.
1. In the **Applications** section, find **Elastic Stack** and click **Install**. 1. In the **Applications** section, find **Elastic Stack** and click **Install**.
NOTE: **Note:** NOTE: **Note:**
The [`stable/elastic-stack`](https://github.com/helm/charts/tree/master/stable/elastic-stack) The [`gitlab/elastic-stack`](https://gitlab.com/gitlab-org/charts/elastic-stack)
chart is used to install this application with a chart is used to install this application with a
[`values.yaml`](https://gitlab.com/gitlab-org/gitlab/blob/master/vendor/elastic_stack/values.yaml) [`values.yaml`](https://gitlab.com/gitlab-org/gitlab/blob/master/vendor/elastic_stack/values.yaml)
file. file.
NOTE: **Note:** NOTE: **Note:**
The chart will deploy 5 Elasticsearch nodes: 2 masters, 2 data and 1 client node, The chart deploys 3 identical Elasticsearch pods which can't be colocated, and each
with resource requests totalling 0.125 CPU and 4.5GB RAM. Each data node requests 1.5GB of memory, require 1 CPU and 2 GB of RAM, making them incompatible with clusters containing
which makes it incompatible with clusters of `f1-micro` and `g1-small` instance types. fewer than 3 nodes or consisting of `f1-micro`, `g1-small`, `n1-standard-1`, or
`*-highcpu-2` instance types.
NOTE: **Note:** NOTE: **Note:**
The Elastic Stack cluster application is intended as a log aggregation solution and is not related to our The Elastic Stack cluster application is intended as a log aggregation solution and is not related to our
...@@ -542,20 +545,23 @@ logstash: ...@@ -542,20 +545,23 @@ logstash:
kibana: kibana:
enabled: true enabled: true
env: elasticsearchHosts: http://elastic-stack-elasticsearch-master.gitlab-managed-apps.svc.cluster.local:9200
ELASTICSEARCH_HOSTS: http://elastic-stack-elasticsearch-client.gitlab-managed-apps.svc.cluster.local:9200
elasticseach-curator:
enabled: false
``` ```
Then install it on your cluster: Then install it on your cluster:
```shell ```shell
helm install --name kibana stable/elastic-stack --values kibana.yml helm repo add gitlab https://charts.gitlab.io
helm install --name kibana gitlab/elastic-stack --values kibana.yml
``` ```
To access Kibana, forward the port to your local machine: To access Kibana, forward the port to your local machine:
```shell ```shell
kubectl port-forward svc/kibana 5601:443 kubectl port-forward svc/kibana 5601:5601
``` ```
Then, you can visit Kibana at `http://localhost:5601`. Then, you can visit Kibana at `http://localhost:5601`.
...@@ -1069,7 +1075,7 @@ You can check the default [`values.yaml`](https://gitlab.com/gitlab-org/gitlab/- ...@@ -1069,7 +1075,7 @@ You can check the default [`values.yaml`](https://gitlab.com/gitlab-org/gitlab/-
You can customize the installation of Elastic Stack by defining You can customize the installation of Elastic Stack by defining
`.gitlab/managed-apps/elastic-stack/values.yaml` file in your cluster `.gitlab/managed-apps/elastic-stack/values.yaml` file in your cluster
management project. Refer to the management project. Refer to the
[chart](https://github.com/helm/charts/blob/master/stable/elastic-stack/values.yaml) for the [chart](https://gitlab.com/gitlab-org/charts/elastic-stack) for the
available configuration options. available configuration options.
NOTE: **Note:** NOTE: **Note:**
......
...@@ -192,10 +192,11 @@ GitLab supports a limited set of [CI variables](../../../ci/variables/README.md) ...@@ -192,10 +192,11 @@ GitLab supports a limited set of [CI variables](../../../ci/variables/README.md)
NOTE: **Note:** NOTE: **Note:**
Variables for Prometheus queries must be lowercase. Variables for Prometheus queries must be lowercase.
There are 2 methods to specify a variable in a query or dashboard: Variables can be specified using double curly braces, such as `"{{ci_environment_slug}}"` ([added](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20793) in GitLab 12.7).
1. Variables can be specified using double curly braces, such as `{{ci_environment_slug}}` ([added](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20793) in GitLab 12.7). Support for the `"%{ci_environment_slug}"` format was
1. You can also enclose it in quotation marks with curly braces with a leading percent, for example `"%{ci_environment_slug}"`. This method is deprecated though and support will be [removed in the next major release](https://gitlab.com/gitlab-org/gitlab/issues/37990). [removed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/31581) in GitLab 13.0.
Queries that continue to use the old format will show no data.
#### Query Variables from URL #### Query Variables from URL
......
...@@ -151,6 +151,7 @@ service_desk_email: ...@@ -151,6 +151,7 @@ service_desk_email:
log_path: "log/mailroom.log" log_path: "log/mailroom.log"
mailbox: "inbox" mailbox: "inbox"
idle_timeout: 60 idle_timeout: 60
expunge_deleted: true
``` ```
In this case, suppose the `mygroup/myproject` project Service Desk settings has the project name In this case, suppose the `mygroup/myproject` project Service Desk settings has the project name
......
...@@ -7,30 +7,4 @@ stages: ...@@ -7,30 +7,4 @@ stages:
include: include:
- template: Jobs/Build.gitlab-ci.yml - template: Jobs/Build.gitlab-ci.yml
- template: Jobs/Deploy/ECS.gitlab-ci.yml
.deploy_to_ecs:
image: registry.gitlab.com/gitlab-org/cloud-deploy/aws-ecs:latest
script:
- ecs update-task-definition
review:
extends: .deploy_to_ecs
stage: review
environment:
name: review/$CI_COMMIT_REF_NAME
only:
refs:
- branches
- tags
except:
refs:
- master
production:
extends: .deploy_to_ecs
stage: production
environment:
name: production
only:
refs:
- master
.deploy_to_ecs:
image: registry.gitlab.com/gitlab-org/cloud-deploy/aws-ecs:latest
script:
- ecs update-task-definition
review:
extends: .deploy_to_ecs
stage: review
environment:
name: review/$CI_COMMIT_REF_NAME
only:
refs:
- branches
- tags
except:
refs:
- master
production:
extends: .deploy_to_ecs
stage: production
environment:
name: production
only:
refs:
- master
...@@ -45,7 +45,8 @@ module Gitlab ...@@ -45,7 +45,8 @@ module Gitlab
def ==(other) def ==(other)
other.is_a?(self.class) && other.is_a?(self.class) &&
new_line == other.new_line && new_line == other.new_line &&
old_line == other.old_line old_line == other.old_line &&
line_range == other.line_range
end end
end end
end end
......
...@@ -13,7 +13,7 @@ module Gitlab ...@@ -13,7 +13,7 @@ module Gitlab
@client = client @client = client
end end
def pod_logs(namespace, pod_name: nil, container_name: nil, search: nil, start_time: nil, end_time: nil, cursor: nil, filebeat7: true) def pod_logs(namespace, pod_name: nil, container_name: nil, search: nil, start_time: nil, end_time: nil, cursor: nil, chart_above_v2: true)
query = { bool: { must: [] } }.tap do |q| query = { bool: { must: [] } }.tap do |q|
filter_pod_name(q, pod_name) filter_pod_name(q, pod_name)
filter_namespace(q, namespace) filter_namespace(q, namespace)
...@@ -22,7 +22,7 @@ module Gitlab ...@@ -22,7 +22,7 @@ module Gitlab
filter_times(q, start_time, end_time) filter_times(q, start_time, end_time)
end end
body = build_body(query, cursor, filebeat7) body = build_body(query, cursor, chart_above_v2)
response = @client.search body: body response = @client.search body: body
format_response(response) format_response(response)
...@@ -30,8 +30,8 @@ module Gitlab ...@@ -30,8 +30,8 @@ module Gitlab
private private
def build_body(query, cursor = nil, filebeat7 = true) def build_body(query, cursor = nil, chart_above_v2 = true)
offset_field = filebeat7 ? "log.offset" : "offset" offset_field = chart_above_v2 ? "log.offset" : "offset"
body = { body = {
query: query, query: query,
# reverse order so we can query N-most recent records # reverse order so we can query N-most recent records
......
...@@ -36,8 +36,6 @@ module Gitlab ...@@ -36,8 +36,6 @@ module Gitlab
@rbac @rbac
end end
private
def delete_command def delete_command
command = ['helm', 'delete', '--purge', name] + tls_flags_if_remote_tiller command = ['helm', 'delete', '--purge', name] + tls_flags_if_remote_tiller
......
...@@ -37,6 +37,8 @@ module Gitlab ...@@ -37,6 +37,8 @@ module Gitlab
@rbac @rbac
end end
private
# Uses `helm upgrade --install` which means we can use this for both # Uses `helm upgrade --install` which means we can use this for both
# installation and uprade of applications # installation and uprade of applications
def install_command def install_command
...@@ -53,8 +55,6 @@ module Gitlab ...@@ -53,8 +55,6 @@ module Gitlab
command.shelljoin command.shelljoin
end end
private
def install_flag def install_flag
['--install'] ['--install']
end end
......
...@@ -19,7 +19,8 @@ module Gitlab ...@@ -19,7 +19,8 @@ module Gitlab
start_tls: false, start_tls: false,
mailbox: 'inbox', mailbox: 'inbox',
idle_timeout: 60, idle_timeout: 60,
log_path: RAILS_ROOT_DIR.join('log', 'mail_room_json.log') log_path: RAILS_ROOT_DIR.join('log', 'mail_room_json.log'),
expunge_deleted: false
}.freeze }.freeze
# Email specific configuration which is merged with configuration # Email specific configuration which is merged with configuration
......
...@@ -12763,9 +12763,6 @@ msgstr "" ...@@ -12763,9 +12763,6 @@ msgstr ""
msgid "Makes this issue confidential." msgid "Makes this issue confidential."
msgstr "" msgstr ""
msgid "Malformed string"
msgstr ""
msgid "Manage" msgid "Manage"
msgstr "" msgstr ""
...@@ -23697,9 +23694,6 @@ msgstr "" ...@@ -23697,9 +23694,6 @@ msgstr ""
msgid "VulnerabilityManagement|Confirmed %{timeago} by %{user}" msgid "VulnerabilityManagement|Confirmed %{timeago} by %{user}"
msgstr "" msgstr ""
msgid "VulnerabilityManagement|Create issue"
msgstr ""
msgid "VulnerabilityManagement|Detected %{timeago} in pipeline %{pipelineLink}" msgid "VulnerabilityManagement|Detected %{timeago} in pipeline %{pipelineLink}"
msgstr "" msgstr ""
......
...@@ -53,7 +53,8 @@ describe 'mail_room.yml' do ...@@ -53,7 +53,8 @@ describe 'mail_room.yml' do
email: 'gitlab-incoming@gmail.com', email: 'gitlab-incoming@gmail.com',
password: '[REDACTED]', password: '[REDACTED]',
name: 'inbox', name: 'inbox',
idle_timeout: 60 idle_timeout: 60,
expunge_deleted: true
} }
expected_options = { expected_options = {
redis_url: gitlab_redis_queues.url, redis_url: gitlab_redis_queues.url,
......
...@@ -55,7 +55,7 @@ describe Projects::Environments::PrometheusApiController do ...@@ -55,7 +55,7 @@ describe Projects::Environments::PrometheusApiController do
end end
it 'replaces variables with values' do it 'replaces variables with values' do
get :proxy, params: environment_params.merge(query: 'up{environment="%{ci_environment_slug}"}') get :proxy, params: environment_params.merge(query: 'up{environment="{{ci_environment_slug}}"}')
expect(Prometheus::ProxyService).to have_received(:new) expect(Prometheus::ProxyService).to have_received(:new)
.with(environment, 'GET', 'query', expected_params) .with(environment, 'GET', 'query', expected_params)
......
...@@ -3,7 +3,8 @@ ...@@ -3,7 +3,8 @@
require 'spec_helper' require 'spec_helper'
describe Projects::StaticSiteEditorController do describe Projects::StaticSiteEditorController do
let(:project) { create(:project, :public, :repository) } let_it_be(:project) { create(:project, :public, :repository) }
let_it_be(:user) { create(:user) }
describe 'GET show' do describe 'GET show' do
let(:default_params) do let(:default_params) do
...@@ -27,8 +28,6 @@ describe Projects::StaticSiteEditorController do ...@@ -27,8 +28,6 @@ describe Projects::StaticSiteEditorController do
end end
context 'as guest' do context 'as guest' do
let(:user) { create(:user) }
before do before do
project.add_guest(user) project.add_guest(user)
sign_in(user) sign_in(user)
...@@ -42,10 +41,11 @@ describe Projects::StaticSiteEditorController do ...@@ -42,10 +41,11 @@ describe Projects::StaticSiteEditorController do
%w[developer maintainer].each do |role| %w[developer maintainer].each do |role|
context "as #{role}" do context "as #{role}" do
let(:user) { create(:user) } before_all do
project.add_role(user, role)
end
before do before do
project.add_role(user, role)
sign_in(user) sign_in(user)
get :show, params: default_params get :show, params: default_params
end end
...@@ -54,11 +54,8 @@ describe Projects::StaticSiteEditorController do ...@@ -54,11 +54,8 @@ describe Projects::StaticSiteEditorController do
expect(response).to render_template(:show) expect(response).to render_template(:show)
end end
it 'assigns a config variable' do it 'assigns a required variables' do
expect(assigns(:config)).to be_a(Gitlab::StaticSiteEditor::Config) expect(assigns(:config)).to be_a(Gitlab::StaticSiteEditor::Config)
end
it 'correctly assigns ref and path' do
expect(assigns(:ref)).to eq('master') expect(assigns(:ref)).to eq('master')
expect(assigns(:path)).to eq('README.md') expect(assigns(:path)).to eq('README.md')
end end
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
require 'spec_helper' require 'spec_helper'
describe 'Static Site Editor' do describe 'Static Site Editor' do
let(:user) { create(:user) } let_it_be(:user) { create(:user) }
let(:project) { create(:project, :public, :repository) } let_it_be(:project) { create(:project, :public, :repository) }
before do before do
project.add_maintainer(user) project.add_maintainer(user)
......
...@@ -9,6 +9,7 @@ test: ...@@ -9,6 +9,7 @@ test:
ssl: true ssl: true
start_tls: false start_tls: false
mailbox: "inbox" mailbox: "inbox"
expunge_deleted: true
service_desk_email: service_desk_email:
enabled: true enabled: true
...@@ -20,3 +21,4 @@ test: ...@@ -20,3 +21,4 @@ test:
ssl: true ssl: true
start_tls: false start_tls: false
mailbox: "inbox" mailbox: "inbox"
expunge_deleted: true
...@@ -26,6 +26,7 @@ describe Gitlab::Diff::Formatters::TextFormatter do ...@@ -26,6 +26,7 @@ describe Gitlab::Diff::Formatters::TextFormatter do
# Specific text formatter examples # Specific text formatter examples
let!(:formatter) { described_class.new(attrs) } let!(:formatter) { described_class.new(attrs) }
let(:attrs) { base }
describe '#line_age' do describe '#line_age' do
subject { formatter.line_age } subject { formatter.line_age }
...@@ -42,4 +43,21 @@ describe Gitlab::Diff::Formatters::TextFormatter do ...@@ -42,4 +43,21 @@ describe Gitlab::Diff::Formatters::TextFormatter do
it { is_expected.to eq('old') } it { is_expected.to eq('old') }
end end
end end
describe "#==" do
it "is false when the line_range changes" do
formatter_1 = described_class.new(base.merge(line_range: { start_line_code: "foo", end_line_code: "bar" }))
formatter_2 = described_class.new(base.merge(line_range: { start_line_code: "foo", end_line_code: "baz" }))
expect(formatter_1).not_to eq(formatter_2)
end
it "is true when the line_range doesn't change" do
attrs = base.merge({ line_range: { start_line_code: "foo", end_line_code: "baz" } })
formatter_1 = described_class.new(attrs)
formatter_2 = described_class.new(attrs)
expect(formatter_1).to eq(formatter_2)
end
end
end end
...@@ -90,7 +90,7 @@ describe Gitlab::Elasticsearch::Logs::Lines do ...@@ -90,7 +90,7 @@ describe Gitlab::Elasticsearch::Logs::Lines do
it 'can search on filebeat 6' do it 'can search on filebeat 6' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_filebeat_6)).and_return(es_response) expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_filebeat_6)).and_return(es_response)
result = subject.pod_logs(namespace, pod_name: pod_name, filebeat7: false) result = subject.pod_logs(namespace, pod_name: pod_name, chart_above_v2: false)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor) expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end end
end end
......
...@@ -75,4 +75,10 @@ describe Gitlab::Kubernetes::Helm::DeleteCommand do ...@@ -75,4 +75,10 @@ describe Gitlab::Kubernetes::Helm::DeleteCommand do
it_behaves_like 'helm command' do it_behaves_like 'helm command' do
let(:command) { delete_command } let(:command) { delete_command }
end end
describe '#delete_command' do
it 'deletes the release' do
expect(subject.delete_command).to eq('helm delete --purge app-name')
end
end
end end
...@@ -13,7 +13,8 @@ describe Gitlab::MailRoom do ...@@ -13,7 +13,8 @@ describe Gitlab::MailRoom do
start_tls: false, start_tls: false,
mailbox: 'inbox', mailbox: 'inbox',
idle_timeout: 60, idle_timeout: 60,
log_path: Rails.root.join('log', 'mail_room_json.log').to_s log_path: Rails.root.join('log', 'mail_room_json.log').to_s,
expunge_deleted: false
} }
end end
......
...@@ -19,8 +19,9 @@ describe Clusters::Applications::ElasticStack do ...@@ -19,8 +19,9 @@ describe Clusters::Applications::ElasticStack do
it 'is initialized with elastic stack arguments' do it 'is initialized with elastic stack arguments' do
expect(subject.name).to eq('elastic-stack') expect(subject.name).to eq('elastic-stack')
expect(subject.chart).to eq('stable/elastic-stack') expect(subject.chart).to eq('elastic-stack/elastic-stack')
expect(subject.version).to eq('2.0.0') expect(subject.version).to eq('3.0.0')
expect(subject.repository).to eq('https://charts.gitlab.io')
expect(subject).to be_rbac expect(subject).to be_rbac
expect(subject.files).to eq(elastic_stack.files) expect(subject.files).to eq(elastic_stack.files)
expect(subject.preinstall).to be_empty expect(subject.preinstall).to be_empty
...@@ -42,7 +43,19 @@ describe Clusters::Applications::ElasticStack do ...@@ -42,7 +43,19 @@ describe Clusters::Applications::ElasticStack do
it 'includes a preinstall script' do it 'includes a preinstall script' do
expect(subject.preinstall).not_to be_empty expect(subject.preinstall).not_to be_empty
expect(subject.preinstall.first).to include("filebeat.enable") expect(subject.preinstall.first).to include("delete")
end
end
context 'on versions older than 3' do
before do
elastic_stack.status = elastic_stack.status_states[:updating]
elastic_stack.version = "2.9.0"
end
it 'includes a preinstall script' do
expect(subject.preinstall).not_to be_empty
expect(subject.preinstall.first).to include("delete")
end end
end end
...@@ -50,11 +63,47 @@ describe Clusters::Applications::ElasticStack do ...@@ -50,11 +63,47 @@ describe Clusters::Applications::ElasticStack do
let(:elastic_stack) { create(:clusters_applications_elastic_stack, :errored, version: '0.0.1') } let(:elastic_stack) { create(:clusters_applications_elastic_stack, :errored, version: '0.0.1') }
it 'is initialized with the locked version' do it 'is initialized with the locked version' do
expect(subject.version).to eq('2.0.0') expect(subject.version).to eq('3.0.0')
end end
end end
end end
describe '#chart_above_v2?' do
let(:elastic_stack) { create(:clusters_applications_elastic_stack, version: version) }
subject { elastic_stack.chart_above_v2? }
context 'on v1.9.0' do
let(:version) { '1.9.0' }
it { is_expected.to be_falsy }
end
context 'on v2.0.0' do
let(:version) { '2.0.0' }
it { is_expected.to be_truthy }
end
end
describe '#chart_above_v3?' do
let(:elastic_stack) { create(:clusters_applications_elastic_stack, version: version) }
subject { elastic_stack.chart_above_v3? }
context 'on v1.9.0' do
let(:version) { '1.9.0' }
it { is_expected.to be_falsy }
end
context 'on v3.0.0' do
let(:version) { '3.0.0' }
it { is_expected.to be_truthy }
end
end
describe '#uninstall_command' do describe '#uninstall_command' do
let!(:elastic_stack) { create(:clusters_applications_elastic_stack) } let!(:elastic_stack) { create(:clusters_applications_elastic_stack) }
...@@ -70,7 +119,7 @@ describe Clusters::Applications::ElasticStack do ...@@ -70,7 +119,7 @@ describe Clusters::Applications::ElasticStack do
it 'specifies a post delete command to remove custom resource definitions' do it 'specifies a post delete command to remove custom resource definitions' do
expect(subject.postdelete).to eq([ expect(subject.postdelete).to eq([
'kubectl delete pvc --selector release\\=elastic-stack' 'kubectl delete pvc --selector app\\=elastic-stack-elasticsearch-master --namespace gitlab-managed-apps'
]) ])
end end
end end
......
# frozen_string_literal: true
require 'spec_helper'
describe PlanLimits do
let(:plan_limits) { create(:plan_limits) }
let(:model) { ProjectHook }
let(:count) { model.count }
before do
create(:project_hook)
end
context 'without plan limits configured' do
describe '#exceeded?' do
it 'does not exceed any relation offset' do
expect(plan_limits.exceeded?(:project_hooks, model)).to be false
expect(plan_limits.exceeded?(:project_hooks, count)).to be false
end
end
end
context 'with plan limits configured' do
before do
plan_limits.update!(project_hooks: 2)
end
describe '#exceeded?' do
it 'does not exceed the relation offset' do
expect(plan_limits.exceeded?(:project_hooks, model)).to be false
expect(plan_limits.exceeded?(:project_hooks, count)).to be false
end
end
context 'with boundary values' do
before do
create(:project_hook)
end
describe '#exceeded?' do
it 'does exceed the relation offset' do
expect(plan_limits.exceeded?(:project_hooks, model)).to be true
expect(plan_limits.exceeded?(:project_hooks, count)).to be true
end
end
end
end
context 'validates default values' do
let(:columns_with_zero) do
%w[
ci_active_pipelines
ci_pipeline_size
ci_active_jobs
]
end
it "has positive values for enabled limits" do
attributes = plan_limits.attributes
attributes = attributes.except(described_class.primary_key)
attributes = attributes.except(described_class.reflections.values.map(&:foreign_key))
attributes = attributes.except(*columns_with_zero)
expect(attributes).to all(include(be_positive))
end
it "has zero values for disabled limits" do
attributes = plan_limits.attributes
attributes = attributes.slice(*columns_with_zero)
expect(attributes).to all(include(be_zero))
end
end
end
...@@ -6,7 +6,7 @@ describe 'Creation of a new branch' do ...@@ -6,7 +6,7 @@ describe 'Creation of a new branch' do
include GraphqlHelpers include GraphqlHelpers
let_it_be(:current_user) { create(:user) } let_it_be(:current_user) { create(:user) }
let(:project) { create(:project, :public, :repository) } let_it_be(:project) { create(:project, :public, :empty_repo) }
let(:input) { { project_path: project.full_path, name: new_branch, ref: ref } } let(:input) { { project_path: project.full_path, name: new_branch, ref: ref } }
let(:new_branch) { 'new_branch' } let(:new_branch) { 'new_branch' }
let(:ref) { 'master' } let(:ref) { 'master' }
...@@ -35,6 +35,7 @@ describe 'Creation of a new branch' do ...@@ -35,6 +35,7 @@ describe 'Creation of a new branch' do
end end
context 'when ref is not correct' do context 'when ref is not correct' do
let(:new_branch) { 'another_branch' }
let(:ref) { 'unknown' } let(:ref) { 'unknown' }
it_behaves_like 'a mutation that returns errors in the response', it_behaves_like 'a mutation that returns errors in the response',
......
...@@ -255,7 +255,7 @@ describe ::PodLogs::ElasticsearchService do ...@@ -255,7 +255,7 @@ describe ::PodLogs::ElasticsearchService do
.and_return(Elasticsearch::Transport::Client.new) .and_return(Elasticsearch::Transport::Client.new)
allow_any_instance_of(::Gitlab::Elasticsearch::Logs::Lines) allow_any_instance_of(::Gitlab::Elasticsearch::Logs::Lines)
.to receive(:pod_logs) .to receive(:pod_logs)
.with(namespace, pod_name: pod_name, container_name: container_name, search: search, start_time: start_time, end_time: end_time, cursor: cursor, filebeat7: true) .with(namespace, pod_name: pod_name, container_name: container_name, search: search, start_time: start_time, end_time: end_time, cursor: cursor, chart_above_v2: true)
.and_return({ logs: expected_logs, cursor: expected_cursor }) .and_return({ logs: expected_logs, cursor: expected_cursor })
result = subject.send(:pod_logs, result_arg) result = subject.send(:pod_logs, result_arg)
......
...@@ -6,7 +6,7 @@ describe Prometheus::ProxyVariableSubstitutionService do ...@@ -6,7 +6,7 @@ describe Prometheus::ProxyVariableSubstitutionService do
describe '#execute' do describe '#execute' do
let_it_be(:environment) { create(:environment) } let_it_be(:environment) { create(:environment) }
let(:params_keys) { { query: 'up{environment="%{ci_environment_slug}"}' } } let(:params_keys) { { query: 'up{environment="{{ci_environment_slug}}"}' } }
let(:params) { ActionController::Parameters.new(params_keys).permit! } let(:params) { ActionController::Parameters.new(params_keys).permit! }
let(:result) { subject.execute } let(:result) { subject.execute }
...@@ -32,21 +32,13 @@ describe Prometheus::ProxyVariableSubstitutionService do ...@@ -32,21 +32,13 @@ describe Prometheus::ProxyVariableSubstitutionService do
expect(params).to eq( expect(params).to eq(
ActionController::Parameters.new( ActionController::Parameters.new(
query: 'up{environment="%{ci_environment_slug}"}' query: 'up{environment="{{ci_environment_slug}}"}'
).permit! ).permit!
) )
end end
end end
context 'with predefined variables' do context 'with predefined variables' do
let(:params_keys) { { query: 'up{%{environment_filter}}' } }
it_behaves_like 'success' do
let(:expected_query) do
%Q[up{container_name!="POD",environment="#{environment.slug}"}]
end
end
context 'with nil query' do context 'with nil query' do
let(:params_keys) { {} } let(:params_keys) { {} }
...@@ -64,18 +56,6 @@ describe Prometheus::ProxyVariableSubstitutionService do ...@@ -64,18 +56,6 @@ describe Prometheus::ProxyVariableSubstitutionService do
let(:expected_query) { %Q[up{environment="#{environment.slug}"}] } let(:expected_query) { %Q[up{environment="#{environment.slug}"}] }
end end
end end
context 'with ruby and liquid formats' do
let(:params_keys) do
{ query: 'up{%{environment_filter},env2="{{ci_environment_slug}}"}' }
end
it_behaves_like 'success' do
let(:expected_query) do
%Q[up{container_name!="POD",environment="#{environment.slug}",env2="#{environment.slug}"}]
end
end
end
end end
context 'with custom variables' do context 'with custom variables' do
...@@ -92,20 +72,6 @@ describe Prometheus::ProxyVariableSubstitutionService do ...@@ -92,20 +72,6 @@ describe Prometheus::ProxyVariableSubstitutionService do
let(:expected_query) { %q[up{pod_name="pod1"}] } let(:expected_query) { %q[up{pod_name="pod1"}] }
end end
context 'with ruby variable interpolation format' do
let(:params_keys) do
{
query: 'up{pod_name="%{pod_name}"}',
variables: ['pod_name', pod_name]
}
end
it_behaves_like 'success' do
# Custom variables cannot be used with the Ruby interpolation format.
let(:expected_query) { "up{pod_name=\"%{pod_name}\"}" }
end
end
context 'with predefined variables in variables parameter' do context 'with predefined variables in variables parameter' do
let(:params_keys) do let(:params_keys) do
{ {
...@@ -145,108 +111,6 @@ describe Prometheus::ProxyVariableSubstitutionService do ...@@ -145,108 +111,6 @@ describe Prometheus::ProxyVariableSubstitutionService do
let(:expected_query) { 'up{pod_name="{{pod_name}}"}' } let(:expected_query) { 'up{pod_name="{{pod_name}}"}' }
end end
end end
context 'with ruby and liquid variables' do
let(:params_keys) do
{
query: 'up{env1="%{ruby_variable}",env2="{{ liquid_variable }}"}',
variables: %w(ruby_variable value liquid_variable env_slug)
}
end
it_behaves_like 'success' do
# It should replace only liquid variables with their values
let(:expected_query) { %q[up{env1="%{ruby_variable}",env2="env_slug"}] }
end
end
end
context 'ruby template rendering' do
let(:params_keys) do
{ query: 'up{env=%{ci_environment_slug},%{environment_filter}}' }
end
it_behaves_like 'success' do
let(:expected_query) do
"up{env=#{environment.slug},container_name!=\"POD\"," \
"environment=\"#{environment.slug}\"}"
end
end
context 'with multiple occurrences of variable in string' do
let(:params_keys) do
{ query: 'up{env1=%{ci_environment_slug},env2=%{ci_environment_slug}}' }
end
it_behaves_like 'success' do
let(:expected_query) { "up{env1=#{environment.slug},env2=#{environment.slug}}" }
end
end
context 'with multiple variables in string' do
let(:params_keys) do
{ query: 'up{env=%{ci_environment_slug},%{environment_filter}}' }
end
it_behaves_like 'success' do
let(:expected_query) do
"up{env=#{environment.slug}," \
"container_name!=\"POD\",environment=\"#{environment.slug}\"}"
end
end
end
context 'with unknown variables in string' do
let(:params_keys) { { query: 'up{env=%{env_slug}}' } }
it_behaves_like 'success' do
let(:expected_query) { 'up{env=%{env_slug}}' }
end
end
# This spec is needed if there are multiple keys in the context provided
# by `Gitlab::Prometheus::QueryVariables.call(environment)` which is
# passed to the Ruby `%` operator.
# If the number of keys in the context is one, there is no need for
# this spec.
context 'with extra variables in context' do
let(:params_keys) { { query: 'up{env=%{ci_environment_slug}}' } }
it_behaves_like 'success' do
let(:expected_query) { "up{env=#{environment.slug}}" }
end
it 'has more than one variable in context' do
expect(Gitlab::Prometheus::QueryVariables.call(environment).size).to be > 1
end
end
# The ruby % operator will not replace known variables if there are unknown
# variables also in the string. It doesn't raise an error
# (though the `sprintf` and `format` methods do).
context 'with unknown and known variables in string' do
let(:params_keys) do
{ query: 'up{env=%{ci_environment_slug},other_env=%{env_slug}}' }
end
it_behaves_like 'success' do
let(:expected_query) { 'up{env=%{ci_environment_slug},other_env=%{env_slug}}' }
end
end
context 'when rendering raises error' do
context 'when TypeError is raised' do
let(:params_keys) { { query: '{% a %}' } }
it_behaves_like 'error', 'Malformed string'
end
context 'when ArgumentError is raised' do
let(:params_keys) { { query: '%<' } }
it_behaves_like 'error', 'Malformed string'
end
end
end end
context 'gsub variable substitution tolerance for weirdness' do context 'gsub variable substitution tolerance for weirdness' do
......
# Default values for elastic-stack.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
elasticsearch: elasticsearch:
enabled: true enabled: true
cluster: # prefix elasticsearch resources with the name of the releases
env: # looks like we can't use {{ .Release.Name }}-elasticsearch
MINIMUM_MASTER_NODES: "1" # https://github.com/helm/helm/issues/2133
master: clusterName: "elastic-stack-elasticsearch"
replicas: 2
client:
replicas: 1
data:
replicas: 2
kibana:
enabled: false
logstash:
enabled: false
filebeat: filebeat:
enabled: true enabled: true
config: filebeatConfig:
output.file.enabled: false filebeat.yml: |
output.elasticsearch: output.file.enabled: false
enabled: true output.elasticsearch:
hosts: ["http://elastic-stack-elasticsearch-client:9200"] hosts: ["http://elastic-stack-elasticsearch-master:9200"]
filebeat.inputs: filebeat.inputs:
- type: log - type: container
enabled: true paths:
paths: - '/var/lib/docker/containers/*/*.log'
- /var/log/*.log json.keys_under_root: true
- /var/log/messages json.ignore_decoding_error: true
- /var/log/syslog processors:
- type: docker - add_id:
containers.ids: target_field: tie_breaker_id
- "*" - add_cloud_metadata: ~
json.keys_under_root: true - add_kubernetes_metadata: ~
json.ignore_decoding_error: true - decode_json_fields:
processors: fields: ["message"]
- add_kubernetes_metadata: when:
- drop_event: equals:
when: kubernetes.container.namespace: "gitlab-managed-apps"
equals: kubernetes.container.name: "modsecurity-log"
kubernetes.container.name: "filebeat" kibana:
- decode_json_fields:
fields: ["message"]
when:
equals:
kubernetes.container.name: "modsecurity-log"
fluentd:
enabled: false
fluent-bit:
enabled: false
nginx-ldapauth-proxy:
enabled: false enabled: false
elasticsearchHosts: "http://elastic-stack-elasticsearch-master:9200"
elasticsearch-curator: elasticsearch-curator:
enabled: true enabled: true
...@@ -63,7 +44,7 @@ elasticsearch-curator: ...@@ -63,7 +44,7 @@ elasticsearch-curator:
--- ---
client: client:
hosts: hosts:
- elastic-stack-elasticsearch-client - elastic-stack-elasticsearch-master
port: 9200 port: 9200
action_file_yml: |- action_file_yml: |-
--- ---
...@@ -76,6 +57,7 @@ elasticsearch-curator: ...@@ -76,6 +57,7 @@ elasticsearch-curator:
actionable list of indices (ignore_empty_list) and exit cleanly. actionable list of indices (ignore_empty_list) and exit cleanly.
options: options:
ignore_empty_list: True ignore_empty_list: True
allow_ilm_indices: True
filters: filters:
- filtertype: pattern - filtertype: pattern
kind: prefix kind: prefix
...@@ -86,17 +68,3 @@ elasticsearch-curator: ...@@ -86,17 +68,3 @@ elasticsearch-curator:
timestring: '%Y.%m.%d' timestring: '%Y.%m.%d'
unit: days unit: days
unit_count: 30 unit_count: 30
2:
action: delete_indices
description: >-
Indices created by filebeat 6.7.0 are incompatible with filebeat 7,
so they will be deleted.
options:
ignore_empty_list: True
filters:
- filtertype: pattern
kind: prefix
value: filebeat-6.7.0-
elasticsearch-exporter:
enabled: false
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册