提交 c66aadd2 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 640e72ca
......@@ -250,8 +250,7 @@ GitlabSecurity/PublicSend:
- 'ee/spec/**/*'
Gitlab/DuplicateSpecLocation:
Exclude:
- ee/spec/lib/gitlab/gl_repository_spec.rb
Enabled: true
Cop/InjectEnterpriseEditionModule:
Enabled: true
......
......@@ -58,7 +58,7 @@ module Prometheus
def substitute_variables(result)
return success(result) unless query(result)
result[:params][:query] = gsub(query(result), full_context)
result[:params][:query] = gsub(query(result), full_context(result))
success(result)
end
......@@ -75,12 +75,16 @@ module Prometheus
end
end
def predefined_context
Gitlab::Prometheus::QueryVariables.call(@environment).stringify_keys
def predefined_context(result)
Gitlab::Prometheus::QueryVariables.call(
@environment,
start_time: start_timestamp(result),
end_time: end_timestamp(result)
).stringify_keys
end
def full_context
@full_context ||= predefined_context.reverse_merge(variables_hash)
def full_context(result)
@full_context ||= predefined_context(result).reverse_merge(variables_hash)
end
def variables
......@@ -91,6 +95,16 @@ module Prometheus
variables.to_h
end
def start_timestamp(result)
Time.rfc3339(result[:params][:start])
rescue ArgumentError
end
def end_timestamp(result)
Time.rfc3339(result[:params][:end])
rescue ArgumentError
end
def query(result)
result[:params][:query]
end
......
......@@ -14,10 +14,10 @@ module Snippets
@snippets = snippets
end
def execute
def execute(options = {})
return ServiceResponse.success(message: 'No snippets found.') if snippets.empty?
user_can_delete_snippets!
user_can_delete_snippets! unless options[:hard_delete]
attempt_delete_repositories!
snippets.destroy_all # rubocop: disable Cop/DestroyAll
......
......@@ -56,7 +56,7 @@ module Users
MigrateToGhostUserService.new(user).execute unless options[:hard_delete]
response = Snippets::BulkDestroyService.new(current_user, user.snippets).execute
response = Snippets::BulkDestroyService.new(current_user, user.snippets).execute(options)
raise DestroyError, response.message if response.error?
# Rails attempts to load all related records into memory before
......
---
title: Provide `__range` variable for Prometheus queries
merge_request: 33521
author:
type: added
---
title: Pass hard delete option to snippets bulk destroy
merge_request: 33520
author:
type: fixed
Akismet
Alertmanager
Algolia
Alibaba
allowlist
allowlisting
allowlists
......@@ -48,6 +49,7 @@ blockquoted
blockquotes
blockquoting
boolean
Bootsnap
browsable
Bugzilla
Buildkite
......@@ -60,6 +62,8 @@ cacheable
CAS
CentOS
Chatops
checksummed
checksumming
Citrix
Citus
clonable
......@@ -72,6 +76,7 @@ compilable
composable
Conda
Consul
Corosync
cron
crons
crontab
......@@ -91,6 +96,7 @@ deduplication
denylist
denylisting
denylists
deployers
deprovision
deprovisioned
deprovisioning
......@@ -115,6 +121,7 @@ failovers
failsafe
fastlane
favicon
Fio
firewalled
Flawfinder
Flowdock
......@@ -157,6 +164,7 @@ hotfixing
http
https
idempotence
idmapper
Ingress
initializer
initializers
......@@ -185,6 +193,7 @@ Kubernetes
Kubesec
Laravel
LDAP
ldapsearch
Libravatar
Lograge
Logstash
......@@ -193,6 +202,7 @@ lookaheads
lookbehind
lookbehinds
lookups
loopback
Lucene
Maildir
Makefile
......@@ -247,6 +257,7 @@ OpenShift
Packagist
parallelization
parallelizations
passwordless
performant
Pipfile
Pipfiles
......@@ -273,8 +284,8 @@ Prometheus
proxied
proxies
proxying
Pseudonymized
Pseudonymizer
pseudonymized
pseudonymizer
Puma
Python
Qualys
......@@ -298,8 +309,8 @@ reindexes
reindexing
relicensing
remediations
Repmgr
Repmgrd
repmgr
repmgrd
repurposing
requeue
requeued
......@@ -320,6 +331,7 @@ reverifies
reverify
Rubix
Rubocop
Rubular
runbook
runbooks
runit
......@@ -355,6 +367,7 @@ SpotBugs
SSH
Stackdriver
storable
storages
strace
strikethrough
strikethroughs
......@@ -379,6 +392,7 @@ subquerying
substring
substrings
syslog
tcpdump
Tiller
todos
tokenizer
......@@ -433,6 +447,9 @@ unpublished
unpublishes
unpublishing
unreferenced
unregister
unregistered
unregisters
unreplicated
unresolve
unresolved
......
......@@ -93,7 +93,7 @@ NOTE: **Note:**
The `encryption` value `simple_tls` corresponds to 'Simple TLS' in the LDAP
library. `start_tls` corresponds to StartTLS, not to be confused with regular TLS.
Normally, if you specify `simple_tls` it will be on port 636, while `start_tls` (StartTLS)
would be on port 389. `plain` also operates on port 389. Removed values: "tls" was replaced with "start_tls" and "ssl" was replaced with "simple_tls".
would be on port 389. `plain` also operates on port 389. Removed values: `tls` was replaced with `start_tls` and `ssl` was replaced with `simple_tls`.
NOTE: **Note:**
LDAP users must have an email address set, regardless of whether it is used to log in.
......@@ -173,7 +173,7 @@ production:
| `bind_dn` | The full DN of the user you will bind with. | no | `'america\momo'` or `'CN=Gitlab,OU=Users,DC=domain,DC=com'` |
| `password` | The password of the bind user. | no | `'your_great_password'` |
| `encryption` | Encryption method. The `method` key is deprecated in favor of `encryption`. | yes | `'start_tls'` or `'simple_tls'` or `'plain'` |
| `verify_certificates` | Enables SSL certificate verification if encryption method is "start_tls" or "simple_tls". Defaults to true. | no | boolean |
| `verify_certificates` | Enables SSL certificate verification if encryption method is `start_tls` or `simple_tls`. Defaults to true. | no | boolean |
| `timeout` | Set a timeout, in seconds, for LDAP queries. This helps avoid blocking a request if the LDAP server becomes unresponsive. A value of 0 means there is no timeout. | no | `10` or `30` |
| `active_directory` | This setting specifies if LDAP server is Active Directory LDAP server. For non-AD servers it skips the AD specific queries. If your LDAP server is not AD, set this to false. | no | boolean |
| `allow_username_or_email_login` | If enabled, GitLab will ignore everything after the first `@` in the LDAP username submitted by the user on login. If you are using `uid: 'userPrincipalName'` on ActiveDirectory you need to disable this setting, because the userPrincipalName contains an `@`. | no | boolean |
......@@ -595,7 +595,7 @@ When enabled, the following applies:
NOTE: **Note:**
These are cron formatted values. You can use a crontab generator to create
these values, for example <http://www.crontabgenerator.com/>.
these values, for example [Crontab Generator](http://www.crontabgenerator.com/).
By default, GitLab runs a group sync process every hour, on the hour.
......
......@@ -17,7 +17,7 @@ The following documentation enables Okta as a SAML provider.
1. Now, very important, add a logo
(you can choose it from <https://about.gitlab.com/press/>). You'll have to
crop and resize it.
1. Next, you'll need the to fill in the SAML general config. Here's an example
1. Next, you'll need the to fill in the SAML general configuration. Here's an example
image.
![Okta admin panel view](img/okta_admin_panel.png)
......@@ -25,14 +25,14 @@ The following documentation enables Okta as a SAML provider.
1. The last part of the configuration is the feedback section where you can
just say you're a customer and creating an app for internal use.
1. When you have your app you'll have a few tabs on the top of the app's
profile. Click on the SAML 2.0 config instructions button which should
profile. Click on the SAML 2.0 configuration instructions button which should
look like the following:
![Okta SAML settings](img/okta_saml_settings.png)
1. On the screen that comes up take note of the
**Identity Provider Single Sign-On URL** which you'll use for the
`idp_sso_target_url` on your GitLab config file.
`idp_sso_target_url` on your GitLab configuration file.
1. **Before you leave Okta make sure you add your user and groups if any.**
......
......@@ -159,7 +159,7 @@ attribute. As a prerequisite, you must use an LDAP server that:
```
For example, the following is an example server context in an NGINX
configuration file (eg. in `/etc/nginx/sites-available/gitlab-ssl`):
configuration file (such as in `/etc/nginx/sites-available/gitlab-ssl`):
```plaintext
server {
......
......@@ -67,7 +67,7 @@ must disable the **primary** node.
NOTE: **Note:**
(**Ubuntu 14.04 LTS**) If you are using an older version of Ubuntu
or any other distro based on the Upstart init system, you can prevent GitLab
or any other distribution based on the Upstart init system, you can prevent GitLab
from starting if the machine reboots by doing the following:
```shell
......
......@@ -324,7 +324,7 @@ There is an [issue where support is being discussed](https://gitlab.com/gitlab-o
to the private key, which is **only** present on the **primary** node.
1. Test that the `gitlab-psql` user can connect to the **primary** node's database
(the default Omnibus database name is gitlabhq_production):
(the default Omnibus database name is `gitlabhq_production`):
```shell
sudo \
......
......@@ -12,7 +12,7 @@ You can set up a [Docker Registry](https://docs.docker.com/registry/) on your
## Storage support
Docker Registry currently supports a few types of storages. If you choose a
Docker Registry currently supports a few types of storage. If you choose a
distributed storage (`azure`, `gcs`, `s3`, `swift`, or `oss`) for your Docker
Registry on the **primary** node, you can use the same storage for a **secondary**
Docker Registry as well. For more information, read the
......@@ -23,7 +23,7 @@ integrated [Container Registry](../../packages/container_registry.md#container-r
## Replicating Docker Registry
You can enable a storage-agnostic replication so it
can be used for cloud or local storages. Whenever a new image is pushed to the
can be used for cloud or local storage. Whenever a new image is pushed to the
**primary** node, each **secondary** node will pull it to its own container
repository.
......
......@@ -69,7 +69,7 @@ Once your read-only replica is set up, you can skip to [configure you secondary
#### Manually configure the primary database for replication
The [geo_primary_role](https://docs.gitlab.com/omnibus/roles/#gitlab-geo-roles)
The [`geo_primary_role`](https://docs.gitlab.com/omnibus/roles/#gitlab-geo-roles)
configures the **primary** node's database to be replicated by making changes to
`pg_hba.conf` and `postgresql.conf`. Make the following configuration changes
manually to your external database configuration and ensure that you restart PostgreSQL
......@@ -130,7 +130,7 @@ hot_standby = on
### Configure **secondary** application nodes to use the external read-replica
With Omnibus, the
[geo_secondary_role](https://docs.gitlab.com/omnibus/roles/#gitlab-geo-roles)
[`geo_secondary_role`](https://docs.gitlab.com/omnibus/roles/#gitlab-geo-roles)
has three main functions:
1. Configure the replica database.
......
......@@ -15,7 +15,7 @@ The configuration file is passed as an argument to the `gitaly`
executable. This is usually done by either Omnibus GitLab or your
[init](https://en.wikipedia.org/wiki/Init) script.
An [example config file](https://gitlab.com/gitlab-org/gitaly/blob/master/config.toml.example)
An [example configuration file](https://gitlab.com/gitlab-org/gitaly/blob/master/config.toml.example)
can be found in the Gitaly project.
## Format
......@@ -160,7 +160,7 @@ sum(rate(gitaly_catfile_cache_total{type="hit"}[5m])) / sum(rate(gitaly_catfile_
A Gitaly process uses one or more `gitaly-ruby` helper processes to
execute RPC's implemented in Ruby instead of Go. The `[gitaly-ruby]`
section of the config file contains settings for these helper processes.
section of the configuration file contains settings for these helper processes.
These processes are known to occasionally suffer from memory leaks.
Gitaly restarts its `gitaly-ruby` helpers when their memory exceeds the
......
......@@ -343,7 +343,7 @@ When installing the GitLab package, do not supply `EXTERNAL_URL` value.
> Please note:
>
> - If you want your database to listen on a specific interface, change the config:
> - If you want your database to listen on a specific interface, change the configuration:
> `postgresql['listen_address'] = '0.0.0.0'`.
> - If your PgBouncer service runs under a different user account,
> you also need to specify: `postgresql['pgbouncer_user'] = PGBOUNCER_USERNAME` in
......@@ -902,7 +902,7 @@ after it has been restored to service.
##### Add a node as a standby server
From the stnadby node, run:
From the standby node, run:
```shell
gitlab-ctl repmgr standby follow NEW_MASTER
......@@ -917,7 +917,7 @@ after it has been restored to service.
##### Add a failed master back into the cluster as a standby node
Once `repmgrd` and PostgreSQL are runnning, the node will need to follow the new
Once `repmgrd` and PostgreSQL are running, the node will need to follow the new
as a standby node.
```shell
......@@ -934,7 +934,7 @@ after it has been restored to service.
gitlab-ctl repmgr standby setup NEW_MASTER
```
Failure to unregister and readd the old master node can lead to subsequent failovers
Failure to unregister and read the old master node can lead to subsequent failovers
not working.
#### Alternate configurations
......
......@@ -104,7 +104,7 @@ for a real-world example of this exploit.
### Omnibus package installations
1. Find the `incoming_email` section in `/etc/gitlab/gitlab.rb`, enable the feature
and fill in the details for your specific IMAP server and email account (see [examples](#config-examples) below).
and fill in the details for your specific IMAP server and email account (see [examples](#configuration-examples) below).
1. Reconfigure GitLab for the changes to take effect:
......@@ -130,7 +130,7 @@ Reply by email should now be working.
```
1. Find the `incoming_email` section in `config/gitlab.yml`, enable the feature
and fill in the details for your specific IMAP server and email account (see [examples](#config-examples) below).
and fill in the details for your specific IMAP server and email account (see [examples](#configuration-examples) below).
1. Enable `mail_room` in the init script at `/etc/default/gitlab`:
......@@ -153,7 +153,7 @@ Reply by email should now be working.
Reply by email should now be working.
### Config examples
### Configuration examples
#### Postfix
......
......@@ -110,7 +110,7 @@ Here is a configuration example with GCS.
| `provider` | The provider name | `Google` |
| `google_project` | GCP project name | `gcp-project-12345` |
| `google_client_email` | The email address of the service account | `foo@gcp-project-12345.iam.gserviceaccount.com` |
| `google_json_key_location` | The json key path | `/path/to/gcp-project-12345-abcde.json` |
| `google_json_key_location` | The JSON key path | `/path/to/gcp-project-12345-abcde.json` |
NOTE: **Note:**
The service account must have permission to access the bucket.
......
......@@ -37,7 +37,7 @@ the configuration options as follows:
### Your own Libravatar server
If you are [running your own libravatar service](https://wiki.libravatar.org/running_your_own/),
If you are [running your own Libravatar service](https://wiki.libravatar.org/running_your_own/),
the URL will be different in the configuration, but you must provide the same
placeholders so GitLab can parse the URL correctly.
......
......@@ -790,7 +790,7 @@ Omnibus GitLab packages or in `/home/git/gitlab/log/service_measurement.log` for
installations from source.
It contain only a single structured log with measurements for each service execution.
It will contain measurement such as: number of sql calls, execution_time, gc_stats, memory usage, etc...
It will contain measurement such as: number of SQL calls, `execution_time`, `gc_stats`, memory usage, etc...
For example:
......
......@@ -59,7 +59,7 @@ you should
## Taking action on Prometheus alerts **(ULTIMATE)**
You can [add a webhook](../../../user/project/integrations/prometheus.md#external-prometheus-instances)
to the Prometheus config in order for GitLab to receive notifications of any alerts.
to the Prometheus configuration in order for GitLab to receive notifications of any alerts.
Once the webhook is setup, you can
[take action on incoming alerts](../../../user/project/integrations/prometheus.md#taking-action-on-incidents-ultimate).
......
......@@ -51,14 +51,14 @@ The following metrics are available:
| `gitlab_page_out_of_bounds` | Counter | 12.8 | Counter for the PageLimiter pagination limit being hit | `controller`, `action`, `bot` |
| `gitlab_rails_queue_duration_seconds` | Histogram | 9.4 | Measures latency between GitLab Workhorse forwarding a request to Rails | |
| `gitlab_sql_duration_seconds` | Histogram | 10.2 | SQL execution time, excluding SCHEMA operations and BEGIN / COMMIT | |
| `gitlab_transaction_allocated_memory_bytes` | Histogram | 10.2 | Allocated memory for all transactions (gitlab_transaction_* metrics) | |
| `gitlab_transaction_allocated_memory_bytes` | Histogram | 10.2 | Allocated memory for all transactions (`gitlab_transaction_*` metrics) | |
| `gitlab_transaction_cache_<key>_count_total` | Counter | 10.2 | Counter for total Rails cache calls (per key) | |
| `gitlab_transaction_cache_<key>_duration_total` | Counter | 10.2 | Counter for total time (seconds) spent in Rails cache calls (per key) | |
| `gitlab_transaction_cache_count_total` | Counter | 10.2 | Counter for total Rails cache calls (aggregate) | |
| `gitlab_transaction_cache_duration_total` | Counter | 10.2 | Counter for total time (seconds) spent in Rails cache calls (aggregate) | |
| `gitlab_transaction_cache_read_hit_count_total` | Counter | 10.2 | Counter for cache hits for Rails cache calls | `controller`, `action` |
| `gitlab_transaction_cache_read_miss_count_total` | Counter | 10.2 | Counter for cache misses for Rails cache calls | `controller`, `action` |
| `gitlab_transaction_duration_seconds` | Histogram | 10.2 | Duration for all transactions (gitlab_transaction_* metrics) | `controller`, `action` |
| `gitlab_transaction_duration_seconds` | Histogram | 10.2 | Duration for all transactions (`gitlab_transaction_*` metrics) | `controller`, `action` |
| `gitlab_transaction_event_build_found_total` | Counter | 9.4 | Counter for build found for API /jobs/request | |
| `gitlab_transaction_event_build_invalid_total` | Counter | 9.4 | Counter for build invalid due to concurrency conflict for API /jobs/request | |
| `gitlab_transaction_event_build_not_found_cached_total` | Counter | 9.4 | Counter for cached response of build not found for API /jobs/request | |
......@@ -124,7 +124,7 @@ configuration option in `gitlab.yml`. These metrics are served from the
| Metric | Type | Since | Description | Labels |
|:---------------------------------------------- |:------- |:----- |:----------- |:------ |
| `sidekiq_jobs_cpu_seconds` | Histogram | 12.4 | Seconds of cpu time to run Sidekiq job | `queue`, `boundary`, `external_dependencies`, `feature_category`, `job_status`, `urgency` |
| `sidekiq_jobs_cpu_seconds` | Histogram | 12.4 | Seconds of CPU time to run Sidekiq job | `queue`, `boundary`, `external_dependencies`, `feature_category`, `job_status`, `urgency` |
| `sidekiq_jobs_completion_seconds` | Histogram | 12.2 | Seconds to complete Sidekiq job | `queue`, `boundary`, `external_dependencies`, `feature_category`, `job_status`, `urgency` |
| `sidekiq_jobs_db_seconds` | Histogram | 12.9 | Seconds of DB time to run Sidekiq job | `queue`, `boundary`, `external_dependencies`, `feature_category`, `job_status`, `urgency` |
| `sidekiq_jobs_gitaly_seconds` | Histogram | 12.9 | Seconds of Gitaly time to run Sidekiq job | `queue`, `boundary`, `external_dependencies`, `feature_category`, `job_status`, `urgency` |
......
......@@ -84,7 +84,7 @@ To change the address/port that Prometheus listens on:
1. Save the file and [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to
take effect
### Adding custom scrape configs
### Adding custom scrape configurations
You can configure additional scrape targets for the Omnibus GitLab-bundled
Prometheus by editing `prometheus['scrape_configs']` in `/etc/gitlab/gitlab.rb`
......
......@@ -14,7 +14,7 @@ I/O. The information on this page can be used for either scenario.
### Benchmarking with `fio`
We recommend using
[fio](https://fio.readthedocs.io/en/latest/fio_doc.html) to test I/O
[Fio](https://fio.readthedocs.io/en/latest/fio_doc.html) to test I/O
performance. This test should be run both on the NFS server and on the
application nodes that talk to the NFS server.
......
......@@ -40,9 +40,9 @@ master process has PID 56227 below.
[2015-06-05T10:58:08.708824 #62538] INFO -- : worker=10 ready
```
### Tunables
### Tunable options
The main tunables for Unicorn are the number of worker processes and the
The main tunable options for Unicorn are the number of worker processes and the
request timeout after which the Unicorn master terminates a worker process.
See the [Omnibus GitLab Unicorn settings
documentation](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/unicorn.md)
......
......@@ -376,7 +376,7 @@ The different supported drivers are:
| swift | OpenStack Swift Object Storage |
| oss | Aliyun OSS |
Read more about the individual driver's config options in the
Read more about the individual driver's configuration options in the
[Docker Registry docs](https://docs.docker.com/registry/configuration/#storage).
[Read more about using object storage with GitLab](../object_storage.md).
......@@ -413,7 +413,7 @@ NOTE: **Note:**
**Installations from source**
Configuring the storage driver is done in your registry config YML file created
Configuring the storage driver is done in your registry configuration YML file created
when you [deployed your Docker registry](https://docs.docker.com/registry/deploying/).
`s3` storage driver example:
......@@ -610,7 +610,7 @@ You can use GitLab as an auth endpoint with an external container registry.
You can configure the Container Registry to send webhook notifications in
response to events happening within the registry.
Read more about the Container Registry notifications config options in the
Read more about the Container Registry notifications configuration options in the
[Docker Registry notifications documentation](https://docs.docker.com/registry/notifications/).
NOTE: **Note:**
......@@ -641,7 +641,7 @@ To configure a notification endpoint in Omnibus:
**Installations from source**
Configuring the notification endpoint is done in your registry config YML file created
Configuring the notification endpoint is done in your registry configuration YML file created
when you [deployed your Docker registry](https://docs.docker.com/registry/deploying/).
Example:
......
......@@ -272,7 +272,7 @@ world. Custom domains are supported, but no TLS.
sudo ln -sf /etc/nginx/sites-{available,enabled}/gitlab-pages.conf
```
1. Edit all GitLab related configs in `/etc/nginx/site-available/` and replace
1. Edit all GitLab related configurations in `/etc/nginx/site-available/` and replace
`0.0.0.0` with `192.0.2.1`, where `192.0.2.1` the primary IP where GitLab
listens to.
1. Restart NGINX
......@@ -341,7 +341,7 @@ world. Custom domains and TLS are supported.
sudo ln -sf /etc/nginx/sites-{available,enabled}/gitlab-pages-ssl.conf
```
1. Edit all GitLab related configs in `/etc/nginx/site-available/` and replace
1. Edit all GitLab related configurations in `/etc/nginx/site-available/` and replace
`0.0.0.0` with `192.0.2.1`, where `192.0.2.1` the primary IP where GitLab
listens to.
1. Restart NGINX
......@@ -352,7 +352,7 @@ world. Custom domains and TLS are supported.
>**Note:**
The following information applies only for installations from source.
Be extra careful when setting up the domain name in the NGINX config. You must
Be extra careful when setting up the domain name in the NGINX configuration. You must
not remove the backslashes.
If your GitLab Pages domain is `example.io`, replace:
......
......@@ -7,11 +7,11 @@ For a full list of reference architectures, see
> - **Supported users (approximate):** 1,000
> - **High Availability:** False
| Users | Configuration([8](#footnotes)) | GCP | AWS | Azure |
|-------|--------------------------------|---------------|----------------------|------------------------|
| 100 | 2 vCPU, 7.2GB Memory | n1-standard-2 | m5.large | D2s v3 |
| 500 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| 1000 | 8 vCPU, 30GB Memory | n1-standard-8 | m5.2xlarge | D8s v3 |
| Users | Configuration([8](#footnotes)) | GCP | AWS | Azure |
|-------|--------------------------------|-----------------|----------------------|------------------------|
| 100 | 2 vCPU, 7.2GB Memory | `n1-standard-2` | `m5.large` | D2s v3 |
| 500 | 4 vCPU, 15GB Memory | `n1-standard-4` | `m5.xlarge` | D4s v3 |
| 1000 | 8 vCPU, 30GB Memory | `n1-standard-8` | `m5.2xlarge` | D8s v3 |
For situations where you need to serve up to 1,000 users, a single-node
solution with [frequent backups](index.md#automated-backups-core-only) is appropriate
......
......@@ -8,23 +8,23 @@ For a full list of reference architectures, see
> - **High Availability:** True
> - **Test RPS rates:** API: 500 RPS, Web: 50 RPS, Git: 50 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS | Azure |
|--------------------------------------------------------------|-------|---------------------------------|----------------|-----------------------|----------------|
| GitLab Rails ([1](#footnotes)) | 5 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | c5.9xlarge | F32s v2 |
| PostgreSQL | 3 | 8 vCPU, 30GB Memory | n1-standard-8 | m5.2xlarge | D8s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 32 vCPU, 120GB Memory | n1-standard-32 | m5.8xlarge | D32s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS | Azure |
|--------------------------------------------------------------|-------|---------------------------------|------------------|-----------------------|----------------|
| GitLab Rails ([1](#footnotes)) | 5 | 32 vCPU, 28.8GB Memory | `n1-highcpu-32` | `c5.9xlarge` | F32s v2 |
| PostgreSQL | 3 | 8 vCPU, 30GB Memory | `n1-standard-8` | `m5.2xlarge` | D8s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 32 vCPU, 120GB Memory | `n1-standard-32` | `m5.8xlarge` | D32s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | `n1-standard-4` | `m5.xlarge` | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | `n1-standard-4` | `m5.xlarge` | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | `g1-small` | `t2.small` | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | `g1-small` | `t2.small` | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | `n1-standard-4` | `m5.xlarge` | D4s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | `n1-highcpu-4` | `c5.xlarge` | F4s v2 |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | `n1-highcpu-4` | `c5.xlarge` | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | `n1-highcpu-4` | `c5.xlarge` | F4s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | `n1-highcpu-4` | `c5.xlarge` | F4s v2 |
## Footnotes
......
......@@ -10,14 +10,14 @@ For a full list of reference architectures, see
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS | Azure |
|--------------------------------------------------------------|-------|---------------------------------|---------------|-----------------------|----------------|
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| PostgreSQL | 1 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Redis ([3](#footnotes)) | 1 | 1 vCPU, 3.75GB Memory | n1-standard-1 | m5.large | D2s v3 |
| Gitaly ([5](#footnotes)) ([7](#footnotes)) | X ([2](#footnotes)) | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| GitLab Rails ([1](#footnotes)), Sidekiq | 2 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | `n1-highcpu-4` | `c5.xlarge` | F4s v2 |
| PostgreSQL | 1 | 2 vCPU, 7.5GB Memory | `n1-standard-2` | `m5.large` | D2s v3 |
| Redis ([3](#footnotes)) | 1 | 1 vCPU, 3.75GB Memory | `n1-standard-1` | `m5.large` | D2s v3 |
| Gitaly ([5](#footnotes)) ([7](#footnotes)) | X ([2](#footnotes)) | 4 vCPU, 15GB Memory | `n1-standard-4` | `m5.xlarge` | D4s v3 |
| GitLab Rails ([1](#footnotes)), Sidekiq | 2 | 8 vCPU, 7.2GB Memory | `n1-highcpu-8` | `c5.2xlarge` | F8s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
## Setup instructions
......
......@@ -16,18 +16,18 @@ following the [2,000-user reference architecture](2k_users.md).
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS | Azure |
|--------------------------------------------------------------|-------|---------------------------------|---------------|-----------------------|----------------|
| GitLab Rails ([1](#footnotes)) | 3 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Consul + Sentinel ([3](#footnotes)) | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| GitLab Rails ([1](#footnotes)) | 3 | 8 vCPU, 7.2GB Memory | `n1-highcpu-8` | `c5.2xlarge` | F8s v2 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | `n1-standard-2` | `m5.large` | D2s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 4 vCPU, 15GB Memory | `n1-standard-4` | `m5.xlarge` | D4s v3 |
| Redis ([3](#footnotes)) | 3 | 2 vCPU, 7.5GB Memory | `n1-standard-2` | `m5.large` | D2s v3 |
| Consul + Sentinel ([3](#footnotes)) | 3 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | `n1-standard-2` | `m5.large` | D2s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | `n1-highcpu-4` | `c5.xlarge` | F4s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
## Footnotes
......
......@@ -10,21 +10,21 @@ For a full list of reference architectures, see
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS | Azure |
|--------------------------------------------------------------|-------|---------------------------------|----------------|-----------------------|----------------|
| GitLab Rails ([1](#footnotes)) | 12 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | c5.9xlarge | F32s v2 |
| PostgreSQL | 3 | 16 vCPU, 60GB Memory | n1-standard-16 | m5.4xlarge | D16s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 64 vCPU, 240GB Memory | n1-standard-64 | m5.16xlarge | D64s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| GitLab Rails ([1](#footnotes)) | 12 | 32 vCPU, 28.8GB Memory | `n1-highcpu-32` | `c5.9xlarge` | F32s v2 |
| PostgreSQL | 3 | 16 vCPU, 60GB Memory | `n1-standard-16` | `m5.4xlarge` | D16s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 64 vCPU, 240GB Memory | `n1-standard-64` | `m5.16xlarge` | D64s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | `n1-standard-4` | `m5.xlarge` | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | `n1-standard-4` | `m5.xlarge` | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | `g1-small` | `t2.small` | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | `g1-small` | `t2.small` | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | `n1-standard-4` | `m5.xlarge` | D4s v3 |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | `n1-highcpu-4` | `c5.xlarge` | F4s v2 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | `n1-highcpu-4` | `c5.xlarge` | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 8 vCPU, 7.2GB Memory | `n1-highcpu-8` | `c5.2xlarge` | F8s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 8 vCPU, 7.2GB Memory | `n1-highcpu-8` | `c5.2xlarge` | F8s v2 |
## Footnotes
......
......@@ -10,18 +10,18 @@ For a full list of reference architectures, see
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS | Azure |
|--------------------------------------------------------------|-------|---------------------------------|---------------|-----------------------|----------------|
| GitLab Rails ([1](#footnotes)) | 3 | 16 vCPU, 14.4GB Memory | n1-highcpu-16 | c5.4xlarge | F16s v2 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 8 vCPU, 30GB Memory | n1-standard-8 | m5.2xlarge | D8s v3 |
| Redis ([3](#footnotes)) | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Consul + Sentinel ([3](#footnotes)) | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| GitLab Rails ([1](#footnotes)) | 3 | 16 vCPU, 14.4GB Memory | `n1-highcpu-16` | `c5.4xlarge` | F16s v2 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | `n1-standard-2` | `m5.large` | D2s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 8 vCPU, 30GB Memory | `n1-standard-8` | `m5.2xlarge` | D8s v3 |
| Redis ([3](#footnotes)) | 3 | 2 vCPU, 7.5GB Memory | `n1-standard-2` | `m5.large` | D2s v3 |
| Consul + Sentinel ([3](#footnotes)) | 3 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | `n1-standard-2` | `m5.large` | D2s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | `n1-highcpu-4` | `c5.xlarge` | F4s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | `n1-highcpu-2` | `c5.large` | F2s v2 |
## Footnotes
......
......@@ -10,7 +10,7 @@ storage shards) to distribute the storage load between several mount points.
> - You must have at least one storage path called `default`.
> - The paths are defined in key-value pairs. The key is an arbitrary name you
> can pick to name the file path.
> - The target directories and any of its subpaths must not be a symlink.
> - The target directories and any of its sub-paths must not be a symlink.
> - No target directory may be a sub-directory of another; no nesting.
Example: this is OK:
......@@ -57,7 +57,7 @@ storage2:
Now that you've read that big fat warning above, let's edit the configuration
files and add the full paths of the alternative repository storage paths. In
the example below, we add two more mountpoints that are named `nfs_1` and `nfs_2`
the example below, we add two more mount points that are named `nfs_1` and `nfs_2`
respectively.
NOTE: **Note:** This example uses NFS. We do not recommend using EFS for storage as it may impact GitLab's performance. See the [relevant documentation](high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
......
......@@ -6,8 +6,8 @@ to contribute to GitLab in a way that is easy for everyone.
For a first-time step-by-step guide to the contribution process, see our
[Contributing to GitLab](https://about.gitlab.com/community/contribute/) page.
Looking for something to work on? Look for issues with the label
[`~Accepting merge requests`](#how-to-contribute).
Looking for something to work on? See the
[How to contribute](#how-to-contribute) section for more information.
GitLab comes in two flavors:
......@@ -76,9 +76,12 @@ Sign up for the mailing list, answer GitLab questions on StackOverflow or respon
## How to contribute
If you want to contribute to GitLab,
[issues with the `~Accepting merge requests` label](issue_workflow.md#label-for-community-contributors)
are a great place to start.
If you would like to contribute to GitLab:
- Issues with the
[`~Accepting merge requests` label](issue_workflow.md#label-for-community-contributors)
are a great place to start.
- Consult the [Contribution Flow](#contribution-flow) section to learn the process.
If you have any questions or need help visit [Getting Help](https://about.gitlab.com/get-help/) to
learn how to communicate with GitLab. We have a [Gitter channel for contributors](https://gitter.im/gitlab/contributors),
......@@ -96,25 +99,77 @@ a Merge Request.
For more information, see the [`gitlab-development-kit`](https://gitlab.com/gitlab-org/gitlab-development-kit)
project.
## Contribution Flow
When contributing to GitLab, your merge request is subject to review by merge request maintainers of a particular specialty.
When you submit code to GitLab, we really want it to get merged, but there will be times when it will not be merged.
When maintainers are reading through a merge request they may request guidance from other maintainers. If merge request maintainers conclude that the code should not be merged, our reasons will be fully disclosed. If it has been decided that the code quality is not up to GitLab’s standards, the merge request maintainer will refer the author to our docs and code style guides, and provide some guidance.
Sometimes style guides will be followed but the code will lack structural integrity, or the maintainer will have reservations about the code’s overall quality. When there is a reservation the maintainer will inform the author and provide some guidance. The author may then choose to update the merge request. Once the merge request has been updated and reassigned to the maintainer, they will review the code again. Once the code has been resubmitted any number of times, the maintainer may choose to close the merge request with a summary of why it will not be merged, as well as some guidance. If the merge request is closed the maintainer will be open to discussion as to how to improve the code so it can be approved in the future.
GitLab will do its best to review community contributions as quickly as possible. Specially appointed developers review community contributions daily. You may take a look at the [team page](https://about.gitlab.com/company/team/) for the merge request coach who specializes in the type of code you have written and mention them in the merge request. For example, if you have written some JavaScript in your code then you should mention the frontend merge request coach. If your code has multiple disciplines you may mention multiple merge request coaches.
GitLab receives a lot of community contributions, so if your code has not been reviewed within two days (excluding weekend and public holidays) of its initial submission feel free to re-mention the appropriate merge request coach.
When submitting code to GitLab, you may feel that your contribution requires the aid of an external library. If your code includes an external library please provide a link to the library, as well as reasons for including it.
When your code contains more than 500 changes, any major breaking changes, or an external library, `@mention` a maintainer in the merge request. If you are not sure who to mention, the reviewer will add one early in the merge request process.
### Issues workflow
### Contribution flow
The general flow of contributing to GitLab is:
1. [Create a fork](../../user/project/repository/forking_workflow.md#creating-a-fork)
of GitLab. In some cases, you will want to set up the
[GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit) to
[develop against your fork](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/doc/index.md#develop-in-your-own-gitlab-fork).
1. Make your changes in your fork.
1. When you're ready, [create a new merge request](../../user/project/merge_requests/creating_merge_requests.md).
1. In the merge request's description:
- Ensure you provide complete and accurate information.
- Review the provided checklist.
1. Assign the merge request (if possible) to, or `@mention`, one of the
[code owners](../../user/project/code_owners.md) for the relevant project,
and explain that you are ready for review.
When you submit code to GitLab, we really want it to get merged! However, we always review
submissions carefully, and this takes time. Code submissions will usually be reviewed by two
[domain experts](../code_review.md#domain-experts) before being merged:
- A [reviewer](../code_review.md#the-responsibility-of-the-reviewer).
- A [maintainer](../code_review.md#the-responsibility-of-the-maintainer).
Keep the following in mind when submitting merge requests:
- When reviewers are reading through a merge request they may request guidance from other
reviewers.
- If the code quality is found to not meet GitLab’s standards, the merge request reviewer will
provide guidance and refer the author to our:
- [Documentation](../documentation/styleguide.md) style guide.
- Code style guides.
- Sometimes style guides will be followed but the code will lack structural integrity, or the
reviewer will have reservations about the code’s overall quality. When there is a reservation,
the reviewer will inform the author and provide some guidance.
- Though GitLab generally allows anyone to indicate
[approval](../../user/project/merge_requests/merge_request_approvals.md) of merge requests, the
maintainer may require [approvals from certain reviewers](../code_review.md#approval-guidelines)
before merging a merge request.
- After review, the author may be asked to update the merge request. Once the merge request has been
updated and reassigned to the reviewer, they will review the code again. This process may repeat
any number of times before merge, to help make the contribution the best it can be.
Sometimes a maintainer may choose to close a merge request. They will fully disclose why it will not
be merged, as well as some guidance. The maintainers will be open to discussion about how to change
the code so it can be approved and merged in the future.
GitLab will do its best to review community contributions as quickly as possible. Specially
appointed developers review community contributions daily. Look at the
[team page](https://about.gitlab.com/company/team/) for the merge request coach who specializes in
the type of code you have written and mention them in the merge request. For example, if you have
written some front-end code, you should `@mention` the frontend merge request coach. If
your code has multiple disciplines, you may `@mention` multiple merge request coaches.
GitLab receives a lot of community contributions. If your code has not been reviewed within two
working days of its initial submission, feel free to `@mention` all merge request coaches with
`@gitlab-org/coaches` to get their attention.
When submitting code to GitLab, you may feel that your contribution requires the aid of an external
library. If your code includes an external library, please provide a link to the library, as well as
reasons for including it.
`@mention` a maintainer in merge requests that contain:
- More than 500 changes.
- Any major breaking changes.
- External libraries.
If you are not sure who to mention, the reviewer will do this for you early in the merge request process.
#### Issues workflow
This [documentation](issue_workflow.md) outlines the current issue workflow:
......@@ -127,7 +182,7 @@ This [documentation](issue_workflow.md) outlines the current issue workflow:
- [Technical and UX debt](issue_workflow.md#technical-and-ux-debt)
- [Technical debt in follow-up issues](issue_workflow.md#technical-debt-in-follow-up-issues)
### Merge requests workflow
#### Merge requests workflow
This [documentation](merge_request_workflow.md) outlines the current merge request process.
......
......@@ -209,10 +209,19 @@ GitLab supports a limited set of [CI variables](../../../ci/variables/README.md)
- `ci_project_namespace`
- `ci_project_path`
- `ci_environment_name`
- `__range`
NOTE: **Note:**
Variables for Prometheus queries must be lowercase.
###### __range
The `__range` variable is useful in Prometheus
[range vector selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#range-vector-selectors).
Its value is the total number of seconds in the dashboard's time range.
For example, if the dashboard time range is set to 8 hours, the value of
`__range` is `28800s`.
##### User-defined variables
[Variables can be defined](#templating-templating-properties) in a custom dashboard YAML file.
......
......@@ -3,8 +3,10 @@
module Gitlab
module Prometheus
module QueryVariables
def self.call(environment)
# start_time and end_time should be Time objects.
def self.call(environment, start_time: nil, end_time: nil)
{
__range: range(start_time, end_time),
ci_environment_slug: environment.slug,
kube_namespace: environment.deployment_namespace || '',
environment_filter: %{container_name!="POD",environment="#{environment.slug}"},
......@@ -14,6 +16,16 @@ module Gitlab
ci_environment_name: environment.name
}
end
private
def self.range(start_time, end_time)
if start_time && end_time
range_seconds = (end_time - start_time).to_i
"#{range_seconds}s"
end
end
private_class_method :range
end
end
end
......@@ -15332,6 +15332,9 @@ msgstr ""
msgid "Owner"
msgstr ""
msgid "Package Files"
msgstr ""
msgid "Package Registry"
msgstr ""
......@@ -22080,7 +22083,7 @@ msgstr ""
msgid "The vulnerability is no longer detected. Verify the vulnerability has been remediated before changing its status."
msgstr ""
msgid "There are no %{replicableType} to show"
msgid "There are no %{replicableTypeName} to show"
msgstr ""
msgid "There are no GPG keys associated with this account."
......
......@@ -7,8 +7,9 @@ describe Gitlab::Prometheus::QueryVariables do
let(:project) { environment.project }
let(:environment) { create(:environment) }
let(:slug) { environment.slug }
let(:params) { {} }
subject { described_class.call(environment) }
subject { described_class.call(environment, params) }
it { is_expected.to include(ci_environment_slug: slug) }
it { is_expected.to include(ci_project_name: project.name) }
......@@ -53,5 +54,42 @@ describe Gitlab::Prometheus::QueryVariables do
it { is_expected.to include(kube_namespace: kube_namespace) }
end
end
context '__range' do
context 'when start_time and end_time are present' do
let(:params) do
{
start_time: Time.rfc3339('2020-05-29T07:23:05.008Z'),
end_time: Time.rfc3339('2020-05-29T15:23:05.008Z')
}
end
it { is_expected.to include(__range: "#{8.hours.to_i}s") }
end
context 'when start_time and end_time are not present' do
it { is_expected.to include(__range: nil) }
end
context 'when end_time is not present' do
let(:params) do
{
start_time: Time.rfc3339('2020-05-29T07:23:05.008Z')
}
end
it { is_expected.to include(__range: nil) }
end
context 'when start_time is not present' do
let(:params) do
{
end_time: Time.rfc3339('2020-05-29T07:23:05.008Z')
}
end
it { is_expected.to include(__range: nil) }
end
end
end
end
......@@ -186,5 +186,19 @@ describe Prometheus::ProxyVariableSubstitutionService do
end
end
end
context '__range' do
let(:params_keys) do
{
query: 'topk(5, sum by (method) (rate(rest_client_requests_total[{{__range}}])))',
start_time: '2020-05-29T08:19:07.142Z',
end_time: '2020-05-29T16:19:07.142Z'
}
end
it_behaves_like 'success' do
let(:expected_query) { "topk(5, sum by (method) (rate(rest_client_requests_total[#{8.hours.to_i}s])))" }
end
end
end
end
......@@ -69,6 +69,18 @@ describe Snippets::BulkDestroyService do
it_behaves_like 'error is raised' do
let(:error_message) { "You don't have access to delete these snippets." }
end
context 'when hard_delete option is passed' do
subject { described_class.new(service_user, snippets).execute(hard_delete: true) }
it 'returns a ServiceResponse success response' do
expect(subject).to be_success
end
it 'deletes all the snippets that belong to the user' do
expect { subject }.to change(Snippet, :count).by(-2)
end
end
end
context 'when an error is raised deleting the repository' do
......
......@@ -67,6 +67,18 @@ describe Users::DestroyService do
end
end
it 'calls the bulk snippet destroy service with hard delete option if it is present' do
# this avoids getting into Projects::DestroyService as it would
# call Snippets::BulkDestroyService first!
allow(user).to receive(:personal_projects).and_return([])
expect_next_instance_of(Snippets::BulkDestroyService) do |bulk_destroy_service|
expect(bulk_destroy_service).to receive(:execute).with(hard_delete: true).and_call_original
end
service.execute(user, hard_delete: true)
end
it 'does not delete project snippets that the user is the author of' do
repo = create(:project_snippet, :repository, author: user).snippet_repository
service.execute(user)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册