提交 0a22eaad 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 53758cb2
<script> <script>
import { GlDrawer, GlLabel, GlAvatarLink, GlAvatarLabeled, GlLink } from '@gitlab/ui'; import { GlDrawer, GlLabel } from '@gitlab/ui';
import { mapActions, mapState } from 'vuex'; import { mapActions, mapState } from 'vuex';
import { __ } from '~/locale'; import { __ } from '~/locale';
import boardsStore from '~/boards/stores/boards_store'; import boardsStore from '~/boards/stores/boards_store';
...@@ -15,16 +15,13 @@ export default { ...@@ -15,16 +15,13 @@ export default {
milestone: 'milestone', milestone: 'milestone',
label: 'label', label: 'label',
labelListText: __('Label'), labelListText: __('Label'),
labelMilestoneText: __('Milestone'),
labelAssigneeText: __('Assignee'),
components: { components: {
GlDrawer, GlDrawer,
GlLabel, GlLabel,
GlAvatarLink,
GlAvatarLabeled,
GlLink,
BoardSettingsSidebarWipLimit: () => BoardSettingsSidebarWipLimit: () =>
import('ee_component/boards/components/board_settings_wip_limit.vue'), import('ee_component/boards/components/board_settings_wip_limit.vue'),
BoardSettingsListTypes: () =>
import('ee_component/boards/components/board_settings_list_types.vue'),
}, },
computed: { computed: {
...mapState(['activeId']), ...mapState(['activeId']),
...@@ -41,30 +38,11 @@ export default { ...@@ -41,30 +38,11 @@ export default {
activeListLabel() { activeListLabel() {
return this.activeList.label; return this.activeList.label;
}, },
activeListMilestone() {
return this.activeList.milestone;
},
activeListAssignee() {
return this.activeList.assignee;
},
boardListType() { boardListType() {
return this.activeList.type || null; return this.activeList.type || null;
}, },
listTypeTitle() { listTypeTitle() {
switch (this.boardListType) { return this.$options.labelListText;
case this.$options.milestone: {
return this.$options.labelMilestoneText;
}
case this.$options.label: {
return this.$options.labelListText;
}
case this.$options.assignee: {
return this.$options.labelAssigneeText;
}
default: {
return '';
}
}
}, },
}, },
created() { created() {
...@@ -94,32 +72,20 @@ export default { ...@@ -94,32 +72,20 @@ export default {
> >
<template #header>{{ $options.listSettingsText }}</template> <template #header>{{ $options.listSettingsText }}</template>
<template v-if="isSidebarOpen"> <template v-if="isSidebarOpen">
<div class="d-flex flex-column align-items-start"> <div v-if="boardListType === $options.label">
<label class="js-list-label">{{ listTypeTitle }}</label> <label class="js-list-label gl-display-block">{{ listTypeTitle }}</label>
<template v-if="boardListType === $options.label"> <gl-label
<gl-label :title="activeListLabel.title"
:title="activeListLabel.title" :background-color="activeListLabel.color"
:background-color="activeListLabel.color" :scoped="showScopedLabels(activeListLabel)"
:scoped="showScopedLabels(activeListLabel)" />
/>
</template>
<template v-else-if="boardListType === $options.assignee">
<gl-avatar-link class="js-assignee" :href="activeListAssignee.webUrl">
<gl-avatar-labeled
:size="32"
:label="activeListAssignee.name"
:sub-label="`@${activeListAssignee.username}`"
:src="activeListAssignee.avatar"
/>
</gl-avatar-link>
</template>
<template v-else-if="boardListType === $options.milestone">
<gl-link class="js-milestone" :href="activeListMilestone.webUrl">
{{ activeListMilestone.title }}
</gl-link>
</template>
</div> </div>
<board-settings-list-types
v-else
:active-list="activeList"
:board-list-type="boardListType"
/>
<board-settings-sidebar-wip-limit :max-issue-count="activeList.maxIssueCount" /> <board-settings-sidebar-wip-limit :max-issue-count="activeList.maxIssueCount" />
</template> </template>
</gl-drawer> </gl-drawer>
......
---
type: reference
---
# How to set up Consul **(PREMIUM ONLY)**
A Consul cluster consists of both
[server and client agents](https://www.consul.io/docs/agent).
The servers run on their own nodes and the clients run on other nodes that in
turn communicate with the servers.
GitLab Premium includes a bundled version of [Consul](https://www.consul.io/)
a service networking solution that you can manage by using `/etc/gitlab/gitlab.rb`.
## Configure the Consul nodes
NOTE: **Important:**
Before proceeding, refer to the
[available reference architectures](reference_architectures/index.md#available-reference-architectures)
to find out how many Consul server nodes you should have.
On **each** Consul server node perform the following:
1. Follow the instructions to [install](https://about.gitlab.com/install/)
GitLab by choosing your preferred platform, but do not supply the
`EXTERNAL_URL` value when asked.
1. Edit `/etc/gitlab/gitlab.rb`, and add the following by replacing the values
noted in the `retry_join` section. In the example below, there are three
nodes, two denoted with their IP, and one with its FQDN, you can use either
notation:
```ruby
# Disable all components except Consul
roles ['consul_role']
# Consul nodes: can be FQDN or IP, separated by a whitespace
consul['configuration'] = {
server: true,
retry_join: %w(10.10.10.1 consul1.gitlab.example.com 10.10.10.2)
}
# Disable auto migrations
gitlab_rails['auto_migrate'] = false
```
1. [Reconfigure GitLab](restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes
to take effect.
1. Run the following command to ensure Consul is both configured correctly and
to verify that all server nodes are communicating:
```shell
sudo /opt/gitlab/embedded/bin/consul members
```
The output should be similar to:
```plaintext
Node Address Status Type Build Protocol DC
CONSUL_NODE_ONE XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
CONSUL_NODE_TWO XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
CONSUL_NODE_THREE XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
```
If the results display any nodes with a status that isn't `alive`, or if any
of the three nodes are missing, see the [Troubleshooting section](#troubleshooting-consul).
## Upgrade the Consul nodes
To upgrade your Consul nodes, upgrade the GitLab package.
Nodes should be:
- Members of a healthy cluster prior to upgrading the Omnibus GitLab package.
- Upgraded one node at a time.
Identify any existing health issues in the cluster by running the following command
within each node. The command will return an empty array if the cluster is healthy:
```shell
curl http://127.0.0.1:8500/v1/health/state/critical
```
Consul nodes communicate using the raft protocol. If the current leader goes
offline, there needs to be a leader election. A leader node must exist to facilitate
synchronization across the cluster. If too many nodes go offline at the same time,
the cluster will lose quorum and not elect a leader due to
[broken consensus](https://www.consul.io/docs/internals/consensus.html).
Consult the [troubleshooting section](#troubleshooting-consul) if the cluster is not
able to recover after the upgrade. The [outage recovery](#outage-recovery) may
be of particular interest.
NOTE: **Note:**
GitLab uses Consul to store only transient data that is easily regenerated. If
the bundled Consul was not used by any process other than GitLab itself, then
[rebuilding the cluster from scratch](#recreate-from-scratch) is fine.
## Troubleshooting Consul
Below are some useful operations should you need to debug any issues.
You can see any error logs by running:
```shell
sudo gitlab-ctl tail consul
```
### Check the cluster membership
To determine which nodes are part of the cluster, run the following on any member in the cluster:
```shell
sudo /opt/gitlab/embedded/bin/consul members
```
The output should be similar to:
```plaintext
Node Address Status Type Build Protocol DC
consul-b XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
consul-c XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
consul-c XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
db-a XX.XX.X.Y:8301 alive client 0.9.0 2 gitlab_consul
db-b XX.XX.X.Y:8301 alive client 0.9.0 2 gitlab_consul
```
Ideally all nodes will have a `Status` of `alive`.
### Restart Consul
If it is necessary to restart Consul, it is important to do this in
a controlled manner to maintain quorum. If quorum is lost, to recover the cluster,
you will need to follow the Consul [outage recovery](#outage-recovery) process.
To be safe, it's recommended that you only restart Consul in one node at a time to
ensure the cluster remains intact. For larger clusters, it is possible to restart
multiple nodes at a time. See the
[Consul consensus document](https://www.consul.io/docs/internals/consensus.html#deployment-table)
for how many failures it can tolerate. This will be the number of simultaneous
restarts it can sustain.
To restart Consul:
```shell
sudo gitlab-ctl restart consul
```
### Consul nodes unable to communicate
By default, Consul will attempt to
[bind](https://www.consul.io/docs/agent/options.html#_bind) to `0.0.0.0`, but
it will advertise the first private IP address on the node for other Consul nodes
to communicate with it. If the other nodes cannot communicate with a node on
this address, then the cluster will have a failed status.
If you are running into this issue, you will see messages like the following in `gitlab-ctl tail consul` output:
```plaintext
2017-09-25_19:53:39.90821 2017/09/25 19:53:39 [WARN] raft: no known peers, aborting election
2017-09-25_19:53:41.74356 2017/09/25 19:53:41 [ERR] agent: failed to sync remote state: No cluster leader
```
To fix this:
1. Pick an address on each node that all of the other nodes can reach this node through.
1. Update your `/etc/gitlab/gitlab.rb`
```ruby
consul['configuration'] = {
...
bind_addr: 'IP ADDRESS'
}
```
1. Reconfigure GitLab;
```shell
gitlab-ctl reconfigure
```
If you still see the errors, you may have to
[erase the Consul database and reinitialize](#recreate-from-scratch) on the affected node.
### Consul does not start - multiple private IPs
In case that a node has multiple private IPs, Consul will be confused as to
which of the private addresses to advertise, and then immediately exit on start.
You will see messages like the following in `gitlab-ctl tail consul` output:
```plaintext
2017-11-09_17:41:45.52876 ==> Starting Consul agent...
2017-11-09_17:41:45.53057 ==> Error creating agent: Failed to get advertise address: Multiple private IPs found. Please configure one.
```
To fix this:
1. Pick an address on the node that all of the other nodes can reach this node through.
1. Update your `/etc/gitlab/gitlab.rb`
```ruby
consul['configuration'] = {
...
bind_addr: 'IP ADDRESS'
}
```
1. Reconfigure GitLab;
```shell
gitlab-ctl reconfigure
```
### Outage recovery
If you lost enough Consul nodes in the cluster to break quorum, then the cluster
is considered failed, and it will not function without manual intervention.
In that case, you can either recreate the nodes from scratch or attempt a
recover.
#### Recreate from scratch
By default, GitLab does not store anything in the Consul node that cannot be
recreated. To erase the Consul database and reinitialize:
```shell
sudo gitlab-ctl stop consul
sudo rm -rf /var/opt/gitlab/consul/data
sudo gitlab-ctl start consul
```
After this, the node should start back up, and the rest of the server agents rejoin.
Shortly after that, the client agents should rejoin as well.
#### Recover a failed node
If you have taken advantage of Consul to store other data and want to restore
the failed node, follow the
[Consul guide](https://learn.hashicorp.com/consul/day-2-operations/outage)
to recover a failed cluster.
...@@ -137,7 +137,7 @@ documentation: ...@@ -137,7 +137,7 @@ documentation:
synchronized from the **primary** node. synchronized from the **primary** node.
NOTE: **Note:** NOTE: **Note:**
[NFS](../../high_availability/nfs.md) can be used in place of Gitaly but is not [NFS](../../nfs.md) can be used in place of Gitaly but is not
recommended. recommended.
### Step 2: Configure the main read-only replica PostgreSQL database on the **secondary** node ### Step 2: Configure the main read-only replica PostgreSQL database on the **secondary** node
...@@ -304,9 +304,9 @@ In the architecture overview, there are two machines running the GitLab ...@@ -304,9 +304,9 @@ In the architecture overview, there are two machines running the GitLab
application services. These services are enabled selectively in the application services. These services are enabled selectively in the
configuration. configuration.
Configure the application servers following Configure the GitLab Rails application servers following the relevant steps
[Configuring GitLab for multiple nodes](../../high_availability/gitlab.md), then make the outlined in the [reference architectures](../../reference_architectures/index.md),
following modifications: then make the following modifications:
1. Edit `/etc/gitlab/gitlab.rb` on each application server in the **secondary** 1. Edit `/etc/gitlab/gitlab.rb` on each application server in the **secondary**
cluster, and add the following: cluster, and add the following:
...@@ -393,7 +393,7 @@ application servers. ...@@ -393,7 +393,7 @@ application servers.
In this topology, a load balancer is required at each geographic location to In this topology, a load balancer is required at each geographic location to
route traffic to the application servers. route traffic to the application servers.
See [Load Balancer for GitLab with multiple nodes](../../high_availability/load_balancer.md) for See [Load Balancer for GitLab with multiple nodes](../../load_balancer.md) for
more information. more information.
### Step 6: Configure the backend application servers on the **secondary** node ### Step 6: Configure the backend application servers on the **secondary** node
......
...@@ -376,7 +376,7 @@ This can be risky because anything that prevents your Gitaly clients from reachi ...@@ -376,7 +376,7 @@ This can be risky because anything that prevents your Gitaly clients from reachi
servers will cause all Gitaly requests to fail. For example, any sort of network, firewall, or name servers will cause all Gitaly requests to fail. For example, any sort of network, firewall, or name
resolution problems. resolution problems.
Additionally, you must [disable Rugged](../high_availability/nfs.md#improving-nfs-performance-with-gitlab) Additionally, you must [disable Rugged](../nfs.md#improving-nfs-performance-with-gitlab)
if previously enabled manually. if previously enabled manually.
Gitaly makes the following assumptions: Gitaly makes the following assumptions:
......
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
redirect_to: 'database.md' redirect_to: 'database.md'
--- ---
This document was moved to [another location](database.md). This document was moved to [another location](../postgresql/index.md).
--- ---
type: reference redirect_to: ../consul.md
--- ---
# Working with the bundled Consul service **(PREMIUM ONLY)** This document was moved to [another location](../consul.md).
As part of its High Availability stack, GitLab Premium includes a bundled version of [Consul](https://www.consul.io/) that can be managed through `/etc/gitlab/gitlab.rb`. Consul is a service networking solution. When it comes to [GitLab Architecture](../../development/architecture.md), Consul utilization is supported for configuring:
1. [Monitoring in Scaled and Highly Available environments](monitoring_node.md)
1. [PostgreSQL High Availability with Omnibus](../postgresql/replication_and_failover.md)
A Consul cluster consists of multiple server agents, as well as client agents that run on other nodes which need to talk to the Consul cluster.
## Prerequisites
First, make sure to [download/install](https://about.gitlab.com/install/)
Omnibus GitLab **on each node**.
Choose an installation method, then make sure you complete steps:
1. Install and configure the necessary dependencies.
1. Add the GitLab package repository and install the package.
When installing the GitLab package, do not supply `EXTERNAL_URL` value.
## Configuring the Consul nodes
On each Consul node perform the following:
1. Make sure you collect [`CONSUL_SERVER_NODES`](../postgresql/replication_and_failover.md#consul-information), which are the IP addresses or DNS records of the Consul server nodes, for the next step, before executing the next step.
1. Edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
```ruby
# Disable all components except Consul
roles ['consul_role']
# START user configuration
# Replace placeholders:
#
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses gathered for CONSUL_SERVER_NODES
consul['configuration'] = {
server: true,
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z)
}
# Disable auto migrations
gitlab_rails['auto_migrate'] = false
#
# END user configuration
```
> `consul_role` was introduced with GitLab 10.3
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes
to take effect.
### Consul checkpoint
Before moving on, make sure Consul is configured correctly. Run the following
command to verify all server nodes are communicating:
```shell
/opt/gitlab/embedded/bin/consul members
```
The output should be similar to:
```plaintext
Node Address Status Type Build Protocol DC
CONSUL_NODE_ONE XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
CONSUL_NODE_TWO XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
CONSUL_NODE_THREE XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
```
If any of the nodes isn't `alive` or if any of the three nodes are missing,
check the [Troubleshooting section](#troubleshooting) before proceeding.
## Operations
### Checking cluster membership
To see which nodes are part of the cluster, run the following on any member in the cluster
```shell
$ /opt/gitlab/embedded/bin/consul members
Node Address Status Type Build Protocol DC
consul-b XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
consul-c XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
consul-c XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
db-a XX.XX.X.Y:8301 alive client 0.9.0 2 gitlab_consul
db-b XX.XX.X.Y:8301 alive client 0.9.0 2 gitlab_consul
```
Ideally all nodes will have a `Status` of `alive`.
### Restarting the server cluster
NOTE: **Note:**
This section only applies to server agents. It is safe to restart client agents whenever needed.
If it is necessary to restart the server cluster, it is important to do this in a controlled fashion in order to maintain quorum. If quorum is lost, you will need to follow the Consul [outage recovery](#outage-recovery) process to recover the cluster.
To be safe, we recommend you only restart one server agent at a time to ensure the cluster remains intact.
For larger clusters, it is possible to restart multiple agents at a time. See the [Consul consensus document](https://www.consul.io/docs/internals/consensus.html#deployment-table) for how many failures it can tolerate. This will be the number of simultaneous restarts it can sustain.
## Upgrades for bundled Consul
Nodes running GitLab-bundled Consul should be:
- Members of a healthy cluster prior to upgrading the Omnibus GitLab package.
- Upgraded one node at a time.
NOTE: **Note:**
Running `curl http://127.0.0.1:8500/v1/health/state/critical` from any Consul node will identify existing health issues in the cluster. The command will return an empty array if the cluster is healthy.
Consul clusters communicate using the raft protocol. If the current leader goes offline, there needs to be a leader election. A leader node must exist to facilitate synchronization across the cluster. If too many nodes go offline at the same time, the cluster will lose quorum and not elect a leader due to [broken consensus](https://www.consul.io/docs/internals/consensus.html).
Consult the [troubleshooting section](#troubleshooting) if the cluster is not able to recover after the upgrade. The [outage recovery](#outage-recovery) may be of particular interest.
NOTE: **Note:**
GitLab only uses Consul to store transient data that is easily regenerated. If the bundled Consul was not used by any process other than GitLab itself, then [rebuilding the cluster from scratch](#recreate-from-scratch) is fine.
## Troubleshooting
### Consul server agents unable to communicate
By default, the server agents will attempt to [bind](https://www.consul.io/docs/agent/options.html#_bind) to '0.0.0.0', but they will advertise the first private IP address on the node for other agents to communicate with them. If the other nodes cannot communicate with a node on this address, then the cluster will have a failed status.
You will see messages like the following in `gitlab-ctl tail consul` output if you are running into this issue:
```plaintext
2017-09-25_19:53:39.90821 2017/09/25 19:53:39 [WARN] raft: no known peers, aborting election
2017-09-25_19:53:41.74356 2017/09/25 19:53:41 [ERR] agent: failed to sync remote state: No cluster leader
```
To fix this:
1. Pick an address on each node that all of the other nodes can reach this node through.
1. Update your `/etc/gitlab/gitlab.rb`
```ruby
consul['configuration'] = {
...
bind_addr: 'IP ADDRESS'
}
```
1. Run `gitlab-ctl reconfigure`
If you still see the errors, you may have to [erase the Consul database and reinitialize](#recreate-from-scratch) on the affected node.
### Consul agents do not start - Multiple private IPs
In the case that a node has multiple private IPs the agent be confused as to which of the private addresses to advertise, and then immediately exit on start.
You will see messages like the following in `gitlab-ctl tail consul` output if you are running into this issue:
```plaintext
2017-11-09_17:41:45.52876 ==> Starting Consul agent...
2017-11-09_17:41:45.53057 ==> Error creating agent: Failed to get advertise address: Multiple private IPs found. Please configure one.
```
To fix this:
1. Pick an address on the node that all of the other nodes can reach this node through.
1. Update your `/etc/gitlab/gitlab.rb`
```ruby
consul['configuration'] = {
...
bind_addr: 'IP ADDRESS'
}
```
1. Run `gitlab-ctl reconfigure`
### Outage recovery
If you lost enough server agents in the cluster to break quorum, then the cluster is considered failed, and it will not function without manual intervention.
#### Recreate from scratch
By default, GitLab does not store anything in the Consul cluster that cannot be recreated. To erase the Consul database and reinitialize
```shell
gitlab-ctl stop consul
rm -rf /var/opt/gitlab/consul/data
gitlab-ctl start consul
```
After this, the cluster should start back up, and the server agents rejoin. Shortly after that, the client agents should rejoin as well.
#### Recover a failed cluster
If you have taken advantage of Consul to store other data, and want to restore the failed cluster, please follow the [Consul guide](https://learn.hashicorp.com/consul/day-2-operations/outage) to recover a failed cluster.
--- ---
stage: Create redirect_to: ../gitaly/index.md
group: Gitaly
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers
type: reference
--- ---
# Configuring Gitaly for Scaled and High Availability This document was moved to [another location](../gitaly/index.md).
A [Gitaly Cluster](../gitaly/praefect.md) can be used to increase the fault
tolerance of Gitaly in high availability configurations.
This document is relevant for [scalable and highly available setups](../reference_architectures/index.md).
## Running Gitaly on its own server
See [Run Gitaly on its own server](../gitaly/index.md#run-gitaly-on-its-own-server)
in Gitaly documentation.
Continue configuration of other components by going back to the
[reference architecture](../reference_architectures/index.md#configure-gitlab-to-scale) page.
## Enable Monitoring
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
1. Make sure to collect [`CONSUL_SERVER_NODES`](../postgresql/replication_and_failover.md#consul-information), which are the IP addresses or DNS records of the Consul server nodes, for the next step. Note they are presented as `Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z`
1. Create/edit `/etc/gitlab/gitlab.rb` and add the following configuration:
```ruby
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '0.0.0.0:9100'
gitaly['prometheus_listen_addr'] = "0.0.0.0:9236"
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
--- ---
type: reference redirect_to: ../reference_architectures/index.md
--- ---
# Configuring GitLab application (Rails) This document was moved to [another location](../reference_architectures/index.md).
This section describes how to configure the GitLab application (Rails) component.
NOTE: **Note:**
There is some additional configuration near the bottom for
additional GitLab application servers. It's important to read and understand
these additional steps before proceeding with GitLab installation.
NOTE: **Note:**
[Cloud Object Storage service](object_storage.md) with [Gitaly](gitaly.md)
is recommended over [NFS](nfs.md) wherever possible for improved performance.
1. If necessary, install the NFS client utility packages using the following
commands:
```shell
# Ubuntu/Debian
apt-get install nfs-common
# CentOS/Red Hat
yum install nfs-utils nfs-utils-lib
```
1. Specify the necessary NFS exports in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See [NFS documentation](nfs.md#nfs-client-mount-options)
for examples and the various options.
1. Create the shared directories. These may be different depending on your NFS
mount locations.
```shell
mkdir -p /var/opt/gitlab/.ssh /var/opt/gitlab/gitlab-rails/uploads /var/opt/gitlab/gitlab-rails/shared /var/opt/gitlab/gitlab-ci/builds /var/opt/gitlab/git-data
```
1. Download/install Omnibus GitLab using **steps 1 and 2** from
[GitLab downloads](https://about.gitlab.com/install/). Do not complete other
steps on the download page.
1. Create/edit `/etc/gitlab/gitlab.rb` and use the following configuration.
Be sure to change the `external_url` to match your eventual GitLab front-end
URL. Depending your the NFS configuration, you may need to change some GitLab
data locations. See [NFS documentation](nfs.md) for `/etc/gitlab/gitlab.rb`
configuration values for various scenarios. The example below assumes you've
added NFS mounts in the default data locations. Additionally the UID and GIDs
given are just examples and you should configure with your preferred values.
```ruby
external_url 'https://gitlab.example.com'
# Prevent GitLab from starting if NFS data mounts are not available
high_availability['mountpoint'] = '/var/opt/gitlab/git-data'
# Disable components that will not be on the GitLab application server
roles ['application_role']
nginx['enable'] = true
# PostgreSQL connection details
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_encoding'] = 'unicode'
gitlab_rails['db_host'] = '10.1.0.5' # IP/hostname of database server
gitlab_rails['db_password'] = 'DB password'
# Redis connection details
gitlab_rails['redis_port'] = '6379'
gitlab_rails['redis_host'] = '10.1.0.6' # IP/hostname of Redis server
gitlab_rails['redis_password'] = 'Redis Password'
# Ensure UIDs and GIDs match between servers for permissions via NFS
user['uid'] = 9000
user['gid'] = 9000
web_server['uid'] = 9001
web_server['gid'] = 9001
registry['uid'] = 9002
registry['gid'] = 9002
```
1. [Enable monitoring](#enable-monitoring)
NOTE: **Note:**
To maintain uniformity of links across HA clusters, the `external_url`
on the first application server as well as the additional application
servers should point to the external URL that users will use to access GitLab.
In a typical HA setup, this will be the URL of the load balancer which will
route traffic to all GitLab application servers in the HA cluster.
NOTE: **Note:**
When you specify `https` in the `external_url`, as in the example
above, GitLab assumes you have SSL certificates in `/etc/gitlab/ssl/`. If
certificates are not present, NGINX will fail to start. See
[NGINX documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for more information.
NOTE: **Note:**
It is best to set the `uid` and `gid`s prior to the initial reconfigure
of GitLab. Omnibus will not recursively `chown` directories if set after the initial reconfigure.
## First GitLab application server
On the first application server, run:
```shell
sudo gitlab-ctl reconfigure
```
This should compile the configuration and initialize the database. Do
not run this on additional application servers until the next step.
## Extra configuration for additional GitLab application servers
Additional GitLab servers (servers configured **after** the first GitLab server)
need some extra configuration.
1. Configure shared secrets. These values can be obtained from the primary
GitLab server in `/etc/gitlab/gitlab-secrets.json`. Copy this file to the
secondary servers **prior to** running the first `reconfigure` in the steps
above.
```ruby
gitlab_shell['secret_token'] = 'fbfb19c355066a9afb030992231c4a363357f77345edd0f2e772359e5be59b02538e1fa6cae8f93f7d23355341cea2b93600dab6d6c3edcdced558fc6d739860'
gitlab_rails['otp_key_base'] = 'b719fe119132c7810908bba18315259ed12888d4f5ee5430c42a776d840a396799b0a5ef0a801348c8a357f07aa72bbd58e25a84b8f247a25c72f539c7a6c5fa'
gitlab_rails['secret_key_base'] = '6e657410d57c71b4fc3ed0d694e7842b1895a8b401d812c17fe61caf95b48a6d703cb53c112bc01ebd197a85da81b18e29682040e99b4f26594772a4a2c98c6d'
gitlab_rails['db_key_base'] = 'bf2e47b68d6cafaef1d767e628b619365becf27571e10f196f98dc85e7771042b9203199d39aff91fcb6837c8ed83f2a912b278da50999bb11a2fbc0fba52964'
```
1. Run `touch /etc/gitlab/skip-auto-reconfigure` to prevent database migrations
from running on upgrade. Only the primary GitLab application server should
handle migrations.
1. **Recommended** Configure host keys. Copy the contents (private and public keys) of `/etc/ssh/` on
the primary application server to `/etc/ssh` on all secondary servers. This
prevents false man-in-the-middle-attack alerts when accessing servers in your
High Availability cluster behind a load balancer.
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
NOTE: **Note:**
You will need to restart the GitLab applications nodes after an update has occurred and database
migrations performed.
## Enable Monitoring
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
If you enable Monitoring, it must be enabled on **all** GitLab servers.
1. Make sure to collect [`CONSUL_SERVER_NODES`](../postgresql/replication_and_failover.md#consul-information), which are the IP addresses or DNS records of the Consul server nodes, for the next step. Note they are presented as `Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z`
1. Create/edit `/etc/gitlab/gitlab.rb` and add the following configuration:
```ruby
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '0.0.0.0:9100'
gitlab_workhorse['prometheus_listen_addr'] = '0.0.0.0:9229'
sidekiq['listen_address'] = "0.0.0.0"
puma['listen'] = '0.0.0.0'
# Add the monitoring node's IP address to the monitoring list that allows it to
# scrape the NGINX metrics. Replace placeholder `monitoring.gitlab.example.com` with
# the address and/or subnets gathered from the monitoring node(s).
gitlab_rails['monitoring_whitelist'] = ['monitoring.gitlab.example.com', '127.0.0.0/8']
nginx['status']['options']['allow'] = ['monitoring.gitlab.example.com', '127.0.0.0/8']
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
CAUTION: **Warning:**
If running Unicorn, after changing `unicorn['listen']` in `gitlab.rb`, and
running `sudo gitlab-ctl reconfigure`, it can take an extended period of time
for Unicorn to complete reloading after receiving a `HUP`. For more
information, see the
[issue](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/4401).
## Troubleshooting
- `mount: wrong fs type, bad option, bad superblock on`
You have not installed the necessary NFS client utilities. See step 1 above.
- `mount: mount point /var/opt/gitlab/... does not exist`
This particular directory does not exist on the NFS server. Ensure
the share is exported and exists on the NFS server and try to remount.
---
## Upgrading GitLab HA
GitLab HA installations can be upgraded with no downtime, but the
upgrade process must be carefully coordinated to avoid failures. See the
[Omnibus GitLab multi-node upgrade
document](https://docs.gitlab.com/omnibus/update/#multi-node--ha-deployment)
for more details.
Read more on high-availability configuration:
1. [Configure the database](../postgresql/replication_and_failover.md)
1. [Configure Redis](redis.md)
1. [Configure NFS](nfs.md)
1. [Configure the load balancers](load_balancer.md)
--- ---
type: reference redirect_to: ../load_balancer.md
--- ---
# Load Balancer for multi-node GitLab This document was moved to [another location](../load_balancer.md).
In an multi-node GitLab configuration, you will need a load balancer to route
traffic to the application servers. The specifics on which load balancer to use
or the exact configuration is beyond the scope of GitLab documentation. We hope
that if you're managing HA systems like GitLab you have a load balancer of
choice already. Some examples including HAProxy (open-source), F5 Big-IP LTM,
and Citrix Net Scaler. This documentation will outline what ports and protocols
you need to use with GitLab.
## SSL
How will you handle SSL in your multi-node environment? There are several different
options:
- Each application node terminates SSL
- The load balancer(s) terminate SSL and communication is not secure between
the load balancer(s) and the application nodes
- The load balancer(s) terminate SSL and communication is *secure* between the
load balancer(s) and the application nodes
### Application nodes terminate SSL
Configure your load balancer(s) to pass connections on port 443 as 'TCP' rather
than 'HTTP(S)' protocol. This will pass the connection to the application nodes
NGINX service untouched. NGINX will have the SSL certificate and listen on port 443.
See [NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
### Load Balancer(s) terminate SSL without backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will then be responsible for managing SSL certificates and
terminating SSL.
Since communication between the load balancer(s) and GitLab will not be secure,
there is some additional configuration needed. See
[NGINX Proxied SSL documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl)
for details.
### Load Balancer(s) terminate SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will be responsible for managing SSL certificates that
end users will see.
Traffic will also be secure between the load balancer(s) and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
added to GitLab to configure SSL certificates. See
[NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
## Ports
### Basic ports
| LB Port | Backend Port | Protocol |
| ------- | ------------ | ------------------------ |
| 80 | 80 | HTTP (*1*) |
| 443 | 443 | TCP or HTTPS (*1*) (*2*) |
| 22 | 22 | TCP |
- (*1*): [Web terminal](../../ci/environments/index.md#web-terminals) support requires
your load balancer to correctly handle WebSocket connections. When using
HTTP or HTTPS proxying, this means your load balancer must be configured
to pass through the `Connection` and `Upgrade` hop-by-hop headers. See the
[web terminal](../integration/terminal.md) integration guide for
more details.
- (*2*): When using HTTPS protocol for port 443, you will need to add an SSL
certificate to the load balancers. If you wish to terminate SSL at the
GitLab application server instead, use TCP protocol.
### GitLab Pages Ports
If you're using GitLab Pages with custom domain support you will need some
additional port configurations.
GitLab Pages requires a separate virtual IP address. Configure DNS to point the
`pages_external_url` from `/etc/gitlab/gitlab.rb` at the new virtual IP address. See the
[GitLab Pages documentation](../pages/index.md) for more information.
| LB Port | Backend Port | Protocol |
| ------- | ------------- | --------- |
| 80 | Varies (*1*) | HTTP |
| 443 | Varies (*1*) | TCP (*2*) |
- (*1*): The backend port for GitLab Pages depends on the
`gitlab_pages['external_http']` and `gitlab_pages['external_https']`
setting. See [GitLab Pages documentation](../pages/index.md) for more details.
- (*2*): Port 443 for GitLab Pages should always use the TCP protocol. Users can
configure custom domains with custom SSL, which would not be possible
if SSL was terminated at the load balancer.
### Alternate SSH Port
Some organizations have policies against opening SSH port 22. In this case,
it may be helpful to configure an alternate SSH hostname that allows users
to use SSH on port 443. An alternate SSH hostname will require a new virtual IP address
compared to the other GitLab HTTP configuration above.
Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
| LB Port | Backend Port | Protocol |
| ------- | ------------ | -------- |
| 443 | 22 | TCP |
## Readiness check
It is strongly recommend that multi-node deployments configure load balancers to utilize the [readiness check](../../user/admin_area/monitoring/health_check.md#readiness) to ensure a node is ready to accept traffic, before routing traffic to it. This is especially important when utilizing Puma, as there is a brief period during a restart where Puma will not accept requests.
---
Read more on high-availability configuration:
1. [Configure the database](../postgresql/replication_and_failover.md)
1. [Configure Redis](redis.md)
1. [Configure NFS](nfs.md)
1. [Configure the GitLab application servers](gitlab.md)
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
--- ---
type: reference redirect_to: ../monitoring/prometheus/index.md
--- ---
# Configuring a Monitoring node for Scaling and High Availability This document was moved to [another location](../monitoring/prometheus/index.md).
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
You can configure a Prometheus node to monitor GitLab.
## Standalone Monitoring node using Omnibus GitLab
The Omnibus GitLab package can be used to configure a standalone Monitoring node running [Prometheus](../monitoring/prometheus/index.md) and [Grafana](../monitoring/performance/grafana_configuration.md).
The monitoring node is not highly available. See [Scaling and High Availability](../reference_architectures/index.md)
for an overview of GitLab scaling and high availability options.
The steps below are the minimum necessary to configure a Monitoring node running Prometheus and Grafana with
Omnibus:
1. SSH into the Monitoring node.
1. [Download/install](https://about.gitlab.com/install/) the Omnibus GitLab
package you want using **steps 1 and 2** from the GitLab downloads page.
- Do not complete any other steps on the download page.
1. Make sure to collect [`CONSUL_SERVER_NODES`](../postgresql/replication_and_failover.md#consul-information), which are the IP addresses or DNS records of the Consul server nodes, for the next step. Note they are presented as `Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z`
1. Edit `/etc/gitlab/gitlab.rb` and add the contents:
```ruby
external_url 'http://gitlab.example.com'
# Enable Prometheus
prometheus['enable'] = true
prometheus['listen_address'] = '0.0.0.0:9090'
prometheus['monitor_kubernetes'] = false
# Enable Login form
grafana['disable_login_form'] = false
# Enable Grafana
grafana['enable'] = true
grafana['admin_password'] = 'toomanysecrets'
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Disable all other services
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_exporter['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = true
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
sidekiq['enable'] = false
puma['enable'] = false
node_exporter['enable'] = false
gitlab_exporter['enable'] = false
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
The next step is to tell all the other nodes where the monitoring node is:
1. Edit `/etc/gitlab/gitlab.rb`, and add, or find and uncomment the following line:
```ruby
gitlab_rails['prometheus_address'] = '10.0.0.1:9090'
```
Where `10.0.0.1:9090` is the IP address and port of the Prometheus node.
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to
take effect.
## Migrating to Service Discovery
Once monitoring using Service Discovery is enabled with `consul['monitoring_service_discovery'] = true`,
ensure that `prometheus['scrape_configs']` is not set in `/etc/gitlab/gitlab.rb`. Setting both
`consul['monitoring_service_discovery'] = true` and `prometheus['scrape_configs']` in `/etc/gitlab/gitlab.rb`
will result in errors.
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
--- ---
type: reference redirect_to: ../nfs.md
--- ---
# NFS This document was moved to [another location](../nfs.md).
You can view information and options set for each of the mounted NFS file
systems by running `nfsstat -m` and `cat /etc/fstab`.
CAUTION: **Caution:**
From GitLab 13.0, using NFS for Git repositories is deprecated. In GitLab 14.0,
support for NFS for Git repositories is scheduled to be removed. Upgrade to
[Gitaly Cluster](../gitaly/praefect.md) as soon as possible.
NOTE: **Note:**
Filesystem performance has a big impact on overall GitLab
performance, especially for actions that read or write to Git repositories. See
[Filesystem Performance Benchmarking](../operations/filesystem_benchmarking.md)
for steps to test filesystem performance.
## Known kernel version incompatibilities
RedHat Enterprise Linux (RHEL) and CentOS v7.7 and v7.8 ship with kernel
version `3.10.0-1127`, which [contains a
bug](https://bugzilla.redhat.com/show_bug.cgi?id=1783554) that causes
[uploads to fail to copy over NFS](https://gitlab.com/gitlab-org/gitlab/-/issues/218999). The
following GitLab versions include a fix to work properly with that
kernel version:
1. [12.10.12](https://about.gitlab.com/releases/2020/06/25/gitlab-12-10-12-released/)
1. [13.0.7](https://about.gitlab.com/releases/2020/06/25/gitlab-13-0-7-released/)
1. [13.1.1](https://about.gitlab.com/releases/2020/06/24/gitlab-13-1-1-released/)
1. 13.2 and up
If you are using that kernel version, be sure to upgrade GitLab to avoid
errors.
## Fast lookup of authorized SSH keys
The [fast SSH key lookup](../operations/fast_ssh_key_lookup.md) feature can improve
performance of GitLab instances even if they're using block storage.
[Fast SSH key lookup](../operations/fast_ssh_key_lookup.md) is a replacement for
`authorized_keys` (in `/var/opt/gitlab/.ssh`) using the GitLab database.
NFS increases latency, so fast lookup is recommended if `/var/opt/gitlab`
is moved to NFS.
We are investigating the use of
[fast lookup as the default](https://gitlab.com/groups/gitlab-org/-/epics/3104).
## NFS Server features
### Required features
**File locking**: GitLab **requires** advisory file locking, which is only
supported natively in NFS version 4. NFSv3 also supports locking as long as
Linux Kernel 2.6.5+ is used. We recommend using version 4 and do not
specifically test NFSv3.
### Recommended options
When you define your NFS exports, we recommend you also add the following
options:
- `no_root_squash` - NFS normally changes the `root` user to `nobody`. This is
a good security measure when NFS shares will be accessed by many different
users. However, in this case only GitLab will use the NFS share so it
is safe. GitLab recommends the `no_root_squash` setting because we need to
manage file permissions automatically. Without the setting you may receive
errors when the Omnibus package tries to alter permissions. Note that GitLab
and other bundled components do **not** run as `root` but as non-privileged
users. The recommendation for `no_root_squash` is to allow the Omnibus package
to set ownership and permissions on files, as needed. In some cases where the
`no_root_squash` option is not available, the `root` flag can achieve the same
result.
- `sync` - Force synchronous behavior. Default is asynchronous and under certain
circumstances it could lead to data loss if a failure occurs before data has
synced.
Due to the complexities of running Omnibus with LDAP and the complexities of
maintaining ID mapping without LDAP, in most cases you should enable numeric UIDs
and GIDs (which is off by default in some cases) for simplified permission
management between systems:
- [NetApp instructions](https://library.netapp.com/ecmdocs/ECMP1401220/html/GUID-24367A9F-E17B-4725-ADC1-02D86F56F78E.html)
- For non-NetApp devices, disable NFSv4 `idmapping` by performing opposite of [enable NFSv4 idmapper](https://wiki.archlinux.org/index.php/NFS#Enabling_NFSv4_idmapping)
### Disable NFS server delegation
We recommend that all NFS users disable the NFS server delegation feature. This
is to avoid a [Linux kernel bug](https://bugzilla.redhat.com/show_bug.cgi?id=1552203)
which causes NFS clients to slow precipitously due to
[excessive network traffic from numerous `TEST_STATEID` NFS messages](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/52017).
To disable NFS server delegation, do the following:
1. On the NFS server, run:
```shell
echo 0 > /proc/sys/fs/leases-enable
sysctl -w fs.leases-enable=0
```
1. Restart the NFS server process. For example, on CentOS run `service nfs restart`.
#### Important notes
The kernel bug may be fixed in
[more recent kernels with this commit](https://github.com/torvalds/linux/commit/95da1b3a5aded124dd1bda1e3cdb876184813140).
Red Hat Enterprise 7 [shipped a kernel update](https://access.redhat.com/errata/RHSA-2019:2029)
on August 6, 2019 that may also have resolved this problem.
You may not need to disable NFS server delegation if you know you are using a version of
the Linux kernel that has been fixed. That said, GitLab still encourages instance
administrators to keep NFS server delegation disabled.
### Improving NFS performance with GitLab
#### Improving NFS performance with Unicorn
NOTE: **Note:**
From GitLab 12.1, it will automatically be detected if Rugged can and should be used per storage.
If you previously enabled Rugged using the feature flag, you will need to unset the feature flag by using:
```shell
sudo gitlab-rake gitlab:features:unset_rugged
```
If the Rugged feature flag is explicitly set to either true or false, GitLab will use the value explicitly set.
#### Improving NFS performance with Puma
NOTE: **Note:**
From GitLab 12.7, Rugged auto-detection is disabled if Puma thread count is greater than 1.
If you want to use Rugged with Puma, it is recommended to [set Puma thread count to 1](https://docs.gitlab.com/omnibus/settings/puma.html#puma-settings).
If you want to use Rugged with Puma thread count more than 1, Rugged can be enabled using the [feature flag](../../development/gitaly.md#legacy-rugged-code)
If the Rugged feature flag is explicitly set to either true or false, GitLab will use the value explicitly set.
### Known issues
#### Avoid using AWS's Elastic File System (EFS)
GitLab strongly recommends against using AWS Elastic File System (EFS).
Our support team will not be able to assist on performance issues related to
file system access.
Customers and users have reported that AWS EFS does not perform well for GitLab's
use-case. Workloads where many small files are written in a serialized manner, like `git`,
are not well-suited for EFS. EBS with an NFS server on top will perform much better.
If you do choose to use EFS, avoid storing GitLab log files (e.g. those in `/var/log/gitlab`)
there because this will also affect performance. We recommend that the log files be
stored on a local volume.
For more details on another person's experience with EFS, see this [Commit Brooklyn 2019 video](https://youtu.be/K6OS8WodRBQ?t=313).
#### Avoid using CephFS and GlusterFS
GitLab strongly recommends against using CephFS and GlusterFS.
These distributed file systems are not well-suited for GitLab's input/output access patterns because Git uses many small files and access times and file locking times to propagate will make Git activity very slow.
#### Avoid using PostgreSQL with NFS
GitLab strongly recommends against running your PostgreSQL database
across NFS. The GitLab support team will not be able to assist on performance issues related to
this configuration.
Additionally, this configuration is specifically warned against in the
[PostgreSQL Documentation](https://www.postgresql.org/docs/current/creating-cluster.html#CREATING-CLUSTER-NFS):
>PostgreSQL does nothing special for NFS file systems, meaning it assumes NFS behaves exactly like
>locally-connected drives. If the client or server NFS implementation does not provide standard file
>system semantics, this can cause reliability problems. Specifically, delayed (asynchronous) writes
>to the NFS server can cause data corruption problems.
For supported database architecture, please see our documentation on
[Configuring a Database for GitLab HA](../postgresql/replication_and_failover.md).
## NFS Client mount options
Here is an example snippet to add to `/etc/fstab`:
```plaintext
10.1.0.1:/var/opt/gitlab/.ssh /var/opt/gitlab/.ssh nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-rails/uploads /var/opt/gitlab/gitlab-rails/uploads nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-rails/shared /var/opt/gitlab/gitlab-rails/shared nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-ci/builds /var/opt/gitlab/gitlab-ci/builds nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/git-data /var/opt/gitlab/git-data nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
```
Note there are several options that you should consider using:
| Setting | Description |
| ------- | ----------- |
| `vers=4.1` |NFS v4.1 should be used instead of v4.0 because there is a Linux [NFS client bug in v4.0](https://gitlab.com/gitlab-org/gitaly/-/issues/1339) that can cause significant problems due to stale data.
| `nofail` | Don't halt boot process waiting for this mount to become available
| `lookupcache=positive` | Tells the NFS client to honor `positive` cache results but invalidates any `negative` cache results. Negative cache results cause problems with Git. Specifically, a `git push` can fail to register uniformly across all NFS clients. The negative cache causes the clients to 'remember' that the files did not exist previously.
| `hard` | Instead of `soft`. [Further details](#soft-mount-option).
### soft mount option
We recommend that you use `hard` in your mount options, unless you have a specific
reason to use `soft`.
On GitLab.com, we use `soft` because there were times when we had NFS servers
reboot and `soft` improved availability, but everyone's infrastructure is different.
If your NFS is provided by on-premise storage arrays with redundant controllers,
for example, you shouldn't need to worry about NFS server availability.
The NFS man page states:
> "soft" timeout can cause silent data corruption in certain cases
Read the [Linux man page](https://linux.die.net/man/5/nfs) to understand the difference,
and if you do use `soft`, ensure that you've taken steps to mitigate the risks.
If you experience behavior that might have been caused by
writes to disk on the NFS server not occurring, such as commits going missing,
use the `hard` option, because (from the man page):
> use the soft option only when client responsiveness is more important than data integrity
Other vendors make similar recommendations, including
[SAP](http://wiki.scn.sap.com/wiki/x/PARnFQ) and NetApp's
[knowledge base](https://kb.netapp.com/Advice_and_Troubleshooting/Data_Storage_Software/ONTAP_OS/What_are_the_differences_between_hard_mount_and_soft_mount),
they highlight that if the NFS client driver caches data, `soft` means there is no certainty if
writes by GitLab are actually on disk.
Mount points set with the option `hard` may not perform as well, and if the
NFS server goes down, `hard` will cause processes to hang when interacting with
the mount point. Use `SIGKILL` (`kill -9`) to deal with hung processes.
The `intr` option
[stopped working in the 2.6 kernel](https://access.redhat.com/solutions/157873).
## A single NFS mount
It's recommended to nest all GitLab data directories within a mount, that allows automatic
restore of backups without manually moving existing data.
```plaintext
mountpoint
└── gitlab-data
├── builds
├── git-data
├── shared
└── uploads
```
To do so, we'll need to configure Omnibus with the paths to each directory nested
in the mount point as follows:
Mount `/gitlab-nfs` then use the following Omnibus
configuration to move each data location to a subdirectory:
```ruby
git_data_dirs({"default" => { "path" => "/gitlab-nfs/gitlab-data/git-data"} })
gitlab_rails['uploads_directory'] = '/gitlab-nfs/gitlab-data/uploads'
gitlab_rails['shared_path'] = '/gitlab-nfs/gitlab-data/shared'
gitlab_ci['builds_directory'] = '/gitlab-nfs/gitlab-data/builds'
```
Run `sudo gitlab-ctl reconfigure` to start using the central location. Please
be aware that if you had existing data you will need to manually copy/rsync it
to these new locations and then restart GitLab.
## Bind mounts
Alternatively to changing the configuration in Omnibus, bind mounts can be used
to store the data on an NFS mount.
Bind mounts provide a way to specify just one NFS mount and then
bind the default GitLab data locations to the NFS mount. Start by defining your
single NFS mount point as you normally would in `/etc/fstab`. Let's assume your
NFS mount point is `/gitlab-nfs`. Then, add the following bind mounts in
`/etc/fstab`:
```shell
/gitlab-nfs/gitlab-data/git-data /var/opt/gitlab/git-data none bind 0 0
/gitlab-nfs/gitlab-data/.ssh /var/opt/gitlab/.ssh none bind 0 0
/gitlab-nfs/gitlab-data/uploads /var/opt/gitlab/gitlab-rails/uploads none bind 0 0
/gitlab-nfs/gitlab-data/shared /var/opt/gitlab/gitlab-rails/shared none bind 0 0
/gitlab-nfs/gitlab-data/builds /var/opt/gitlab/gitlab-ci/builds none bind 0 0
```
Using bind mounts will require manually making sure the data directories
are empty before attempting a restore. Read more about the
[restore prerequisites](../../raketasks/backup_restore.md).
## Multiple NFS mounts
When using default Omnibus configuration you will need to share 4 data locations
between all GitLab cluster nodes. No other locations should be shared. The
following are the 4 locations need to be shared:
| Location | Description | Default configuration |
| -------- | ----------- | --------------------- |
| `/var/opt/gitlab/git-data` | Git repository data. This will account for a large portion of your data | `git_data_dirs({"default" => { "path" => "/var/opt/gitlab/git-data"} })`
| `/var/opt/gitlab/gitlab-rails/uploads` | User uploaded attachments | `gitlab_rails['uploads_directory'] = '/var/opt/gitlab/gitlab-rails/uploads'`
| `/var/opt/gitlab/gitlab-rails/shared` | Build artifacts, GitLab Pages, LFS objects, temp files, etc. If you're using LFS this may also account for a large portion of your data | `gitlab_rails['shared_path'] = '/var/opt/gitlab/gitlab-rails/shared'`
| `/var/opt/gitlab/gitlab-ci/builds` | GitLab CI/CD build traces | `gitlab_ci['builds_directory'] = '/var/opt/gitlab/gitlab-ci/builds'`
Other GitLab directories should not be shared between nodes. They contain
node-specific files and GitLab code that does not need to be shared. To ship
logs to a central location consider using remote syslog. Omnibus GitLab packages
provide configuration for [UDP log shipping](https://docs.gitlab.com/omnibus/settings/logs.html#udp-log-shipping-gitlab-enterprise-edition-only).
Having multiple NFS mounts will require manually making sure the data directories
are empty before attempting a restore. Read more about the
[restore prerequisites](../../raketasks/backup_restore.md).
---
Read more on high-availability configuration:
1. [Configure the database](../postgresql/replication_and_failover.md)
1. [Configure Redis](redis.md)
1. [Configure the GitLab application servers](gitlab.md)
1. [Configure the load balancers](load_balancer.md)
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
--- ---
type: reference redirect_to: ../nfs.md
--- ---
# Configuring NFS for GitLab HA This document was moved to [another location](../nfs.md).
Setting up NFS for a GitLab HA setup allows all applications nodes in a cluster
to share the same files and maintain data consistency. Application nodes in an HA
setup act as clients while the NFS server plays host.
> Note: The instructions provided in this documentation allow for setting a quick
proof of concept but will leave NFS as potential single point of failure and
therefore not recommended for use in production. Explore options such as [Pacemaker
and Corosync](https://clusterlabs.org) for highly available NFS in production.
Below are instructions for setting up an application node(client) in an HA cluster
to read from and write to a central NFS server(host).
NOTE: **Note:**
Using EFS may negatively impact performance. Please review the [relevant documentation](nfs.md#avoid-using-awss-elastic-file-system-efs) for additional details.
## NFS Server Setup
> Follow the instructions below to set up and configure your NFS server.
### Step 1 - Install NFS Server on Host
Installing the `nfs-kernel-server` package allows you to share directories with the clients running the GitLab application.
```shell
apt-get update
apt-get install nfs-kernel-server
```
### Step 2 - Export Host's Home Directory to Client
In this setup we will share the home directory on the host with the client. Edit the exports file as below to share the host's home directory with the client. If you have multiple clients running GitLab you must enter the client IP addresses in line in the `/etc/exports` file.
```plaintext
#/etc/exports for one client
/home <client_ip_address>(rw,sync,no_root_squash,no_subtree_check)
#/etc/exports for three clients
/home <client_ip_address>(rw,sync,no_root_squash,no_subtree_check) <client_2_ip_address>(rw,sync,no_root_squash,no_subtree_check) <client_3_ip_address>(rw,sync,no_root_squash,no_subtree_check)
```
Restart the NFS server after making changes to the `exports` file for the changes
to take effect.
```shell
systemctl restart nfs-kernel-server
```
NOTE: **Note:**
You may need to update your server's firewall. See the [firewall section](#nfs-in-a-firewalled-environment) at the end of this guide.
## Client / GitLab application node Setup
> Follow the instructions below to connect any GitLab Rails application node running
inside your HA environment to the NFS server configured above.
### Step 1 - Install NFS Common on Client
The `nfs-common` provides NFS functionality without installing server components which
we don't need running on the application nodes.
```shell
apt-get update
apt-get install nfs-common
```
### Step 2 - Create Mount Points on Client
Create a directory on the client that we can mount the shared directory from the host.
Please note that if your mount point directory contains any files they will be hidden
once the remote shares are mounted. An empty/new directory on the client is recommended
for this purpose.
```shell
mkdir -p /nfs/home
```
Confirm that the mount point works by mounting it on the client and checking that
it is mounted with the command below:
```shell
mount <host_ip_address>:/home
df -h
```
### Step 3 - Set up Automatic Mounts on Boot
Edit `/etc/fstab` on the client as below to mount the remote shares automatically at boot.
Note that GitLab requires advisory file locking, which is only supported natively in
NFS version 4. NFSv3 also supports locking as long as Linux Kernel 2.6.5+ is used.
We recommend using version 4 and do not specifically test NFSv3.
See [NFS documentation](nfs.md#nfs-client-mount-options) for guidance on mount options.
```plaintext
#/etc/fstab
<host_ip_address>:/home /nfs/home nfs4 defaults,hard,vers=4.1,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
```
Reboot the client and confirm that the mount point is mounted automatically.
NOTE: **Note:**
If you followed our guide to [GitLab Pages on a separate server](../pages/index.md#running-gitlab-pages-on-a-separate-server)
here, please continue there with the pages-specific NFS mounts.
The step below is for broader use-cases than only sharing pages data.
### Step 4 - Set up GitLab to Use NFS mounts
When using the default Omnibus configuration you will need to share 4 data locations
between all GitLab cluster nodes. No other locations should be shared. Changing the
default file locations in `gitlab.rb` on the client allows you to have one main mount
point and have all the required locations as subdirectories to use the NFS mount for
`git-data`.
```plaintext
git_data_dirs({"default" => {"path" => "/nfs/home/var/opt/gitlab-data/git-data"}})
gitlab_rails['uploads_directory'] = '/nfs/home/var/opt/gitlab-data/uploads'
gitlab_rails['shared_path'] = '/nfs/home/var/opt/gitlab-data/shared'
gitlab_ci['builds_directory'] = '/nfs/home/var/opt/gitlab-data/builds'
```
Save the changes in `gitlab.rb` and run `gitlab-ctl reconfigure`.
## NFS in a Firewalled Environment
If the traffic between your NFS server and NFS client(s) is subject to port filtering
by a firewall, then you will need to reconfigure that firewall to allow NFS communication.
[This guide from TDLP](http://tldp.org/HOWTO/NFS-HOWTO/security.html#FIREWALLS)
covers the basics of using NFS in a firewalled environment. Additionally, we encourage you to
search for and review the specific documentation for your operating system or distribution and your firewall software.
Example for Ubuntu:
Check that NFS traffic from the client is allowed by the firewall on the host by running
the command: `sudo ufw status`. If it's being blocked, then you can allow traffic from a specific
client with the command below.
```shell
sudo ufw allow from <client_ip_address> to any port nfs
```
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
--- ---
type: reference redirect_to: ../postgresql/pgbouncer.md
--- ---
# Working with the bundled PgBouncer service **(PREMIUM ONLY)** This document was moved to [another location](../postgresql/pgbouncer.md).
As part of its High Availability stack, GitLab Premium includes a bundled version of [PgBouncer](http://www.pgbouncer.org/) that can be managed through `/etc/gitlab/gitlab.rb`. PgBouncer is used to seamlessly migrate database connections between servers in a failover scenario. Additionally, it can be used in a non-HA setup to pool connections, speeding up response time while reducing resource usage.
In a HA setup, it's recommended to run a PgBouncer node separately for each database node with an internal load balancer (TCP) serving each accordingly.
## Operations
### Running PgBouncer as part of an HA GitLab installation
This content has been moved to a [new location](../postgresql/replication_and_failover.md#configuring-the-pgbouncer-node).
### Running PgBouncer as part of a non-HA GitLab installation
1. Generate PGBOUNCER_USER_PASSWORD_HASH with the command `gitlab-ctl pg-password-md5 pgbouncer`
1. Generate SQL_USER_PASSWORD_HASH with the command `gitlab-ctl pg-password-md5 gitlab`. We'll also need to enter the plaintext SQL_USER_PASSWORD later
1. On your database node, ensure the following is set in your `/etc/gitlab/gitlab.rb`
```ruby
postgresql['pgbouncer_user_password'] = 'PGBOUNCER_USER_PASSWORD_HASH'
postgresql['sql_user_password'] = 'SQL_USER_PASSWORD_HASH'
postgresql['listen_address'] = 'XX.XX.XX.Y' # Where XX.XX.XX.Y is the ip address on the node postgresql should listen on
postgresql['md5_auth_cidr_addresses'] = %w(AA.AA.AA.B/32) # Where AA.AA.AA.B is the IP address of the pgbouncer node
```
1. Run `gitlab-ctl reconfigure`
NOTE: **Note:**
If the database was already running, it will need to be restarted after reconfigure by running `gitlab-ctl restart postgresql`.
1. On the node you are running PgBouncer on, make sure the following is set in `/etc/gitlab/gitlab.rb`
```ruby
pgbouncer['enable'] = true
pgbouncer['databases'] = {
gitlabhq_production: {
host: 'DATABASE_HOST',
user: 'pgbouncer',
password: 'PGBOUNCER_USER_PASSWORD_HASH'
}
}
```
1. Run `gitlab-ctl reconfigure`
1. On the node running Puma, make sure the following is set in `/etc/gitlab/gitlab.rb`
```ruby
gitlab_rails['db_host'] = 'PGBOUNCER_HOST'
gitlab_rails['db_port'] = '6432'
gitlab_rails['db_password'] = 'SQL_USER_PASSWORD'
```
1. Run `gitlab-ctl reconfigure`
1. At this point, your instance should connect to the database through PgBouncer. If you are having issues, see the [Troubleshooting](#troubleshooting) section
## Enable Monitoring
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
If you enable Monitoring, it must be enabled on **all** PgBouncer servers.
1. Create/edit `/etc/gitlab/gitlab.rb` and add the following configuration:
```ruby
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '0.0.0.0:9100'
pgbouncer_exporter['listen_address'] = '0.0.0.0:9188'
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
### Interacting with PgBouncer
#### Administrative console
As part of Omnibus GitLab, we provide a command `gitlab-ctl pgb-console` to automatically connect to the PgBouncer administrative console. Please see the [PgBouncer documentation](https://www.pgbouncer.org/usage.html#admin-console) for detailed instructions on how to interact with the console.
To start a session, run
```shell
# gitlab-ctl pgb-console
Password for user pgbouncer:
psql (11.7, server 1.7.2/bouncer)
Type "help" for help.
pgbouncer=#
```
The password you will be prompted for is the PGBOUNCER_USER_PASSWORD
To get some basic information about the instance, run
```shell
pgbouncer=# show databases; show clients; show servers;
name | host | port | database | force_user | pool_size | reserve_pool | pool_mode | max_connections | current_connections
---------------------+-----------+------+---------------------+------------+-----------+--------------+-----------+-----------------+---------------------
gitlabhq_production | 127.0.0.1 | 5432 | gitlabhq_production | | 100 | 5 | | 0 | 1
pgbouncer | | 6432 | pgbouncer | pgbouncer | 2 | 0 | statement | 0 | 0
(2 rows)
type | user | database | state | addr | port | local_addr | local_port | connect_time | request_time | ptr | link
| remote_pid | tls
------+-----------+---------------------+--------+-----------+-------+------------+------------+---------------------+---------------------+-----------+------
+------------+-----
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44590 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x12444c0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44592 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x12447c0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44594 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x1244940 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44706 | 127.0.0.1 | 6432 | 2018-04-24 22:14:22 | 2018-04-24 22:16:31 | 0x1244ac0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44708 | 127.0.0.1 | 6432 | 2018-04-24 22:14:22 | 2018-04-24 22:15:15 | 0x1244c40 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44794 | 127.0.0.1 | 6432 | 2018-04-24 22:15:15 | 2018-04-24 22:15:15 | 0x1244dc0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44798 | 127.0.0.1 | 6432 | 2018-04-24 22:15:15 | 2018-04-24 22:16:31 | 0x1244f40 |
| 0 |
C | pgbouncer | pgbouncer | active | 127.0.0.1 | 44660 | 127.0.0.1 | 6432 | 2018-04-24 22:13:51 | 2018-04-24 22:17:12 | 0x1244640 |
| 0 |
(8 rows)
type | user | database | state | addr | port | local_addr | local_port | connect_time | request_time | ptr | link | rem
ote_pid | tls
------+--------+---------------------+-------+-----------+------+------------+------------+---------------------+---------------------+-----------+------+----
--------+-----
S | gitlab | gitlabhq_production | idle | 127.0.0.1 | 5432 | 127.0.0.1 | 35646 | 2018-04-24 22:15:15 | 2018-04-24 22:17:10 | 0x124dca0 | |
19980 |
(1 row)
```
## Troubleshooting
In case you are experiencing any issues connecting through PgBouncer, the first place to check is always the logs:
```shell
# gitlab-ctl tail pgbouncer
```
Additionally, you can check the output from `show databases` in the [Administrative console](#administrative-console). In the output, you would expect to see values in the `host` field for the `gitlabhq_production` database. Additionally, `current_connections` should be greater than 1.
### Message: `LOG: invalid CIDR mask in address`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-cidr-mask-in-address).
### Message: `LOG: invalid IP mask "md5": Name or service not known`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-ip-mask-md5-name-or-service-not-known).
--- ---
type: reference redirect_to: ../sidekiq.md
--- ---
# Configuring Sidekiq This document was moved to [another location](../sidekiq.md).
This section discusses how to configure an external Sidekiq instance.
Sidekiq requires connection to the Redis, PostgreSQL and Gitaly instance.
To configure the Sidekiq node:
1. SSH into the Sidekiq server.
1. [Download/install](https://about.gitlab.com/install/) the Omnibus GitLab package
you want using steps 1 and 2 from the GitLab downloads page.
**Do not complete any other steps on the download page.**
1. Open `/etc/gitlab/gitlab.rb` with your editor.
1. Generate the Sidekiq configuration:
```ruby
sidekiq['listen_address'] = "10.10.1.48"
## Optional: Enable extra Sidekiq processes
sidekiq_cluster['enable'] = true
sidekiq_cluster['enable'] = true
"elastic_indexer"
]
```
1. Setup Sidekiq's connection to Redis:
```ruby
## Must be the same in every sentinel node
redis['master_name'] = 'gitlab-redis'
## The same password for Redis authentication you set up for the master node.
redis['master_password'] = 'YOUR_PASSOWORD'
## A list of sentinels with `host` and `port`
gitlab_rails['redis_sentinels'] = [
{'host' => '10.10.1.34', 'port' => 26379},
{'host' => '10.10.1.35', 'port' => 26379},
{'host' => '10.10.1.36', 'port' => 26379},
]
```
1. Setup Sidekiq's connection to Gitaly:
```ruby
git_data_dirs({
'default' => { 'gitaly_address' => 'tcp://gitaly:8075' },
})
gitlab_rails['gitaly_token'] = 'YOUR_TOKEN'
```
1. Setup Sidekiq's connection to PostgreSQL:
```ruby
gitlab_rails['db_host'] = '10.10.1.30'
gitlab_rails['db_password'] = 'YOUR_PASSOWORD'
gitlab_rails['db_port'] = '5432'
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_encoding'] = 'unicode'
gitlab_rails['auto_migrate'] = false
```
Remember to add the Sidekiq nodes to PostgreSQL's trusted addresses:
```ruby
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32 10.10.1.31/32 10.10.1.32/32 10.10.1.33/32 10.10.1.38/32)
```
1. Disable other services:
```ruby
nginx['enable'] = false
grafana['enable'] = false
prometheus['enable'] = false
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_monitor['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = false
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
puma['enable'] = false
gitlab_exporter['enable'] = false
```
1. Run `gitlab-ctl reconfigure`.
NOTE: **Note:**
You will need to restart the Sidekiq nodes after an update has occurred and database
migrations performed.
## Example configuration
Here's what the ending `/etc/gitlab/gitlab.rb` would look like:
```ruby
########################################
##### Services Disabled ###
########################################
nginx['enable'] = false
grafana['enable'] = false
prometheus['enable'] = false
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_monitor['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = false
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
puma['enable'] = false
gitlab_exporter['enable'] = false
########################################
#### Redis ###
########################################
## Must be the same in every sentinel node
redis['master_name'] = 'gitlab-redis'
## The same password for Redis authentication you set up for the master node.
redis['master_password'] = 'YOUR_PASSOWORD'
## A list of sentinels with `host` and `port`
gitlab_rails['redis_sentinels'] = [
{'host' => '10.10.1.34', 'port' => 26379},
{'host' => '10.10.1.35', 'port' => 26379},
{'host' => '10.10.1.36', 'port' => 26379},
]
#######################################
### Gitaly ###
#######################################
git_data_dirs({
'default' => { 'gitaly_address' => 'tcp://gitaly:8075' },
})
gitlab_rails['gitaly_token'] = 'YOUR_TOKEN'
#######################################
### Postgres ###
#######################################
gitlab_rails['db_host'] = '10.10.1.30'
gitlab_rails['db_password'] = 'YOUR_PASSOWORD'
gitlab_rails['db_port'] = '5432'
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_encoding'] = 'unicode'
gitlab_rails['auto_migrate'] = false
#######################################
### Sidekiq configuration ###
#######################################
sidekiq['listen_address'] = "10.10.1.48"
#######################################
### Monitoring configuration ###
#######################################
consul['enable'] = true
consul['monitoring_service_discovery'] = true
consul['configuration'] = {
bind_addr: '10.10.1.48',
retry_join: %w(10.10.1.34 10.10.1.35 10.10.1.36)
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '10.10.1.48:9100'
# Rails Status for prometheus
gitlab_rails['monitoring_whitelist'] = ['10.10.1.42', '127.0.0.1']
```
## Further reading
Related Sidekiq configuration:
1. [Extra Sidekiq processes](../operations/extra_sidekiq_processes.md)
1. [Using the GitLab-Sidekiq chart](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/)
...@@ -57,7 +57,7 @@ through to the next one in the chain. If you installed GitLab using Omnibus, or ...@@ -57,7 +57,7 @@ through to the next one in the chain. If you installed GitLab using Omnibus, or
from source, starting with GitLab 8.15, this should be done by the default from source, starting with GitLab 8.15, this should be done by the default
configuration, so there's no need for you to do anything. configuration, so there's no need for you to do anything.
However, if you run a [load balancer](../high_availability/load_balancer.md) in However, if you run a [load balancer](../load_balancer.md) in
front of GitLab, you may need to make some changes to your configuration. These front of GitLab, you may need to make some changes to your configuration. These
guides document the necessary steps for a selection of popular reverse proxies: guides document the necessary steps for a selection of popular reverse proxies:
......
---
type: reference
---
# Load Balancer for multi-node GitLab
In an multi-node GitLab configuration, you will need a load balancer to route
traffic to the application servers. The specifics on which load balancer to use
or the exact configuration is beyond the scope of GitLab documentation. We hope
that if you're managing HA systems like GitLab you have a load balancer of
choice already. Some examples including HAProxy (open-source), F5 Big-IP LTM,
and Citrix Net Scaler. This documentation will outline what ports and protocols
you need to use with GitLab.
## SSL
How will you handle SSL in your multi-node environment? There are several different
options:
- Each application node terminates SSL
- The load balancer(s) terminate SSL and communication is not secure between
the load balancer(s) and the application nodes
- The load balancer(s) terminate SSL and communication is *secure* between the
load balancer(s) and the application nodes
### Application nodes terminate SSL
Configure your load balancer(s) to pass connections on port 443 as 'TCP' rather
than 'HTTP(S)' protocol. This will pass the connection to the application nodes
NGINX service untouched. NGINX will have the SSL certificate and listen on port 443.
See [NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
### Load Balancer(s) terminate SSL without backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will then be responsible for managing SSL certificates and
terminating SSL.
Since communication between the load balancer(s) and GitLab will not be secure,
there is some additional configuration needed. See
[NGINX Proxied SSL documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl)
for details.
### Load Balancer(s) terminate SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will be responsible for managing SSL certificates that
end users will see.
Traffic will also be secure between the load balancer(s) and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
added to GitLab to configure SSL certificates. See
[NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
## Ports
### Basic ports
| LB Port | Backend Port | Protocol |
| ------- | ------------ | ------------------------ |
| 80 | 80 | HTTP (*1*) |
| 443 | 443 | TCP or HTTPS (*1*) (*2*) |
| 22 | 22 | TCP |
- (*1*): [Web terminal](../ci/environments/index.md#web-terminals) support requires
your load balancer to correctly handle WebSocket connections. When using
HTTP or HTTPS proxying, this means your load balancer must be configured
to pass through the `Connection` and `Upgrade` hop-by-hop headers. See the
[web terminal](integration/terminal.md) integration guide for
more details.
- (*2*): When using HTTPS protocol for port 443, you will need to add an SSL
certificate to the load balancers. If you wish to terminate SSL at the
GitLab application server instead, use TCP protocol.
### GitLab Pages Ports
If you're using GitLab Pages with custom domain support you will need some
additional port configurations.
GitLab Pages requires a separate virtual IP address. Configure DNS to point the
`pages_external_url` from `/etc/gitlab/gitlab.rb` at the new virtual IP address. See the
[GitLab Pages documentation](pages/index.md) for more information.
| LB Port | Backend Port | Protocol |
| ------- | ------------- | --------- |
| 80 | Varies (*1*) | HTTP |
| 443 | Varies (*1*) | TCP (*2*) |
- (*1*): The backend port for GitLab Pages depends on the
`gitlab_pages['external_http']` and `gitlab_pages['external_https']`
setting. See [GitLab Pages documentation](pages/index.md) for more details.
- (*2*): Port 443 for GitLab Pages should always use the TCP protocol. Users can
configure custom domains with custom SSL, which would not be possible
if SSL was terminated at the load balancer.
### Alternate SSH Port
Some organizations have policies against opening SSH port 22. In this case,
it may be helpful to configure an alternate SSH hostname that allows users
to use SSH on port 443. An alternate SSH hostname will require a new virtual IP address
compared to the other GitLab HTTP configuration above.
Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
| LB Port | Backend Port | Protocol |
| ------- | ------------ | -------- |
| 443 | 22 | TCP |
## Readiness check
It is strongly recommend that multi-node deployments configure load balancers to utilize the [readiness check](../user/admin_area/monitoring/health_check.md#readiness) to ensure a node is ready to accept traffic, before routing traffic to it. This is especially important when utilizing Puma, as there is a brief period during a restart where Puma will not accept requests.
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
...@@ -23,7 +23,7 @@ From left to right, it displays: ...@@ -23,7 +23,7 @@ From left to right, it displays:
details: details:
![Gitaly profiling using the Performance Bar](img/performance_bar_gitaly_calls.png) ![Gitaly profiling using the Performance Bar](img/performance_bar_gitaly_calls.png)
- **Rugged calls**: the time taken (in milliseconds) and the total number of - **Rugged calls**: the time taken (in milliseconds) and the total number of
[Rugged](../../high_availability/nfs.md#improving-nfs-performance-with-gitlab) calls. [Rugged](../../nfs.md#improving-nfs-performance-with-gitlab) calls.
Click to display a modal window with more details: Click to display a modal window with more details:
![Rugged profiling using the Performance Bar](img/performance_bar_rugged_calls.png) ![Rugged profiling using the Performance Bar](img/performance_bar_rugged_calls.png)
- **Redis calls**: the time taken (in milliseconds) and the total number of - **Redis calls**: the time taken (in milliseconds) and the total number of
......
...@@ -109,6 +109,81 @@ prometheus['scrape_configs'] = [ ...@@ -109,6 +109,81 @@ prometheus['scrape_configs'] = [
] ]
``` ```
### Standalone Prometheus using Omnibus GitLab
The Omnibus GitLab package can be used to configure a standalone Monitoring node running Prometheus and [Grafana](../performance/grafana_configuration.md).
The steps below are the minimum necessary to configure a Monitoring node running Prometheus and Grafana with Omnibus GitLab:
1. SSH into the Monitoring node.
1. [Install](https://about.gitlab.com/install/) the Omnibus GitLab
package you want using **steps 1 and 2** from the GitLab downloads page, but
do not follow the remaining steps.
1. Make sure to collect the IP addresses or DNS records of the Consul server nodes, for the next step.
1. Edit `/etc/gitlab/gitlab.rb` and add the contents:
```ruby
external_url 'http://gitlab.example.com'
# Enable Prometheus
prometheus['enable'] = true
prometheus['listen_address'] = '0.0.0.0:9090'
prometheus['monitor_kubernetes'] = false
# Enable Login form
grafana['disable_login_form'] = false
# Enable Grafana
grafana['enable'] = true
grafana['admin_password'] = 'toomanysecrets'
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# The addresses can be IPs or FQDNs
consul['configuration'] = {
retry_join: %w(10.0.0.1 10.0.0.2 10.0.0.3),
}
# Disable all other services
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_exporter['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = true
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
sidekiq['enable'] = false
puma['enable'] = false
node_exporter['enable'] = false
gitlab_exporter['enable'] = false
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
The next step is to tell all the other nodes where the monitoring node is:
1. Edit `/etc/gitlab/gitlab.rb`, and add, or find and uncomment the following line:
```ruby
gitlab_rails['prometheus_address'] = '10.0.0.1:9090'
```
Where `10.0.0.1:9090` is the IP address and port of the Prometheus node.
1. Save the file and [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to
take effect.
NOTE: **Note:**
Once monitoring using Service Discovery is enabled with `consul['monitoring_service_discovery'] = true`,
ensure that `prometheus['scrape_configs']` is not set in `/etc/gitlab/gitlab.rb`. Setting both
`consul['monitoring_service_discovery'] = true` and `prometheus['scrape_configs']` in `/etc/gitlab/gitlab.rb`
will result in errors.
### Using an external Prometheus server ### Using an external Prometheus server
NOTE: **Note:** NOTE: **Note:**
...@@ -128,14 +203,24 @@ To use an external Prometheus server: ...@@ -128,14 +203,24 @@ To use an external Prometheus server:
1. Set each bundled service's [exporter](#bundled-software-metrics) to listen on a network address, for example: 1. Set each bundled service's [exporter](#bundled-software-metrics) to listen on a network address, for example:
```ruby ```ruby
node_exporter['listen_address'] = '0.0.0.0:9100'
gitlab_workhorse['prometheus_listen_addr'] = "0.0.0.0:9229"
# Rails nodes
gitlab_exporter['listen_address'] = '0.0.0.0' gitlab_exporter['listen_address'] = '0.0.0.0'
sidekiq['listen_address'] = '0.0.0.0'
gitlab_exporter['listen_port'] = '9168' gitlab_exporter['listen_port'] = '9168'
node_exporter['listen_address'] = '0.0.0.0:9100'
# Sidekiq nodes
sidekiq['listen_address'] = '0.0.0.0'
# Redis nodes
redis_exporter['listen_address'] = '0.0.0.0:9121' redis_exporter['listen_address'] = '0.0.0.0:9121'
# PostgreSQL nodes
postgres_exporter['listen_address'] = '0.0.0.0:9187' postgres_exporter['listen_address'] = '0.0.0.0:9187'
# Gitaly nodes
gitaly['prometheus_listen_addr'] = "0.0.0.0:9236" gitaly['prometheus_listen_addr'] = "0.0.0.0:9236"
gitlab_workhorse['prometheus_listen_addr'] = "0.0.0.0:9229"
``` ```
1. Install and set up a dedicated Prometheus instance, if necessary, using the [official installation instructions](https://prometheus.io/docs/prometheus/latest/installation/). 1. Install and set up a dedicated Prometheus instance, if necessary, using the [official installation instructions](https://prometheus.io/docs/prometheus/latest/installation/).
......
---
type: reference
---
# Using NFS with GitLab
NFS can be used as an alternative for object storage but this isn't typically
recommended for performance reasons. Note however it is required for [GitLab
Pages](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/196).
For data objects such as LFS, Uploads, Artifacts, etc., an [Object Storage service](object_storage.md)
is recommended over NFS where possible, due to better performance.
CAUTION: **Caution:**
From GitLab 13.0, using NFS for Git repositories is deprecated. In GitLab 14.0,
support for NFS for Git repositories is scheduled to be removed. Upgrade to
[Gitaly Cluster](gitaly/praefect.md) as soon as possible.
NOTE: **Note:**
Filesystem performance has a big impact on overall GitLab
performance, especially for actions that read or write to Git repositories. See
[Filesystem Performance Benchmarking](operations/filesystem_benchmarking.md)
for steps to test filesystem performance.
## Known kernel version incompatibilities
RedHat Enterprise Linux (RHEL) and CentOS v7.7 and v7.8 ship with kernel
version `3.10.0-1127`, which [contains a
bug](https://bugzilla.redhat.com/show_bug.cgi?id=1783554) that causes
[uploads to fail to copy over NFS](https://gitlab.com/gitlab-org/gitlab/-/issues/218999). The
following GitLab versions include a fix to work properly with that
kernel version:
1. [12.10.12](https://about.gitlab.com/releases/2020/06/25/gitlab-12-10-12-released/)
1. [13.0.7](https://about.gitlab.com/releases/2020/06/25/gitlab-13-0-7-released/)
1. [13.1.1](https://about.gitlab.com/releases/2020/06/24/gitlab-13-1-1-released/)
1. 13.2 and up
If you are using that kernel version, be sure to upgrade GitLab to avoid
errors.
## Fast lookup of authorized SSH keys
The [fast SSH key lookup](operations/fast_ssh_key_lookup.md) feature can improve
performance of GitLab instances even if they're using block storage.
[Fast SSH key lookup](operations/fast_ssh_key_lookup.md) is a replacement for
`authorized_keys` (in `/var/opt/gitlab/.ssh`) using the GitLab database.
NFS increases latency, so fast lookup is recommended if `/var/opt/gitlab`
is moved to NFS.
We are investigating the use of
[fast lookup as the default](https://gitlab.com/groups/gitlab-org/-/epics/3104).
## NFS server
Installing the `nfs-kernel-server` package allows you to share directories with
the clients running the GitLab application:
```shell
sudo apt-get update
sudo apt-get install nfs-kernel-server
```
### Required features
**File locking**: GitLab **requires** advisory file locking, which is only
supported natively in NFS version 4. NFSv3 also supports locking as long as
Linux Kernel 2.6.5+ is used. We recommend using version 4 and do not
specifically test NFSv3.
### Recommended options
When you define your NFS exports, we recommend you also add the following
options:
- `no_root_squash` - NFS normally changes the `root` user to `nobody`. This is
a good security measure when NFS shares will be accessed by many different
users. However, in this case only GitLab will use the NFS share so it
is safe. GitLab recommends the `no_root_squash` setting because we need to
manage file permissions automatically. Without the setting you may receive
errors when the Omnibus package tries to alter permissions. Note that GitLab
and other bundled components do **not** run as `root` but as non-privileged
users. The recommendation for `no_root_squash` is to allow the Omnibus package
to set ownership and permissions on files, as needed. In some cases where the
`no_root_squash` option is not available, the `root` flag can achieve the same
result.
- `sync` - Force synchronous behavior. Default is asynchronous and under certain
circumstances it could lead to data loss if a failure occurs before data has
synced.
Due to the complexities of running Omnibus with LDAP and the complexities of
maintaining ID mapping without LDAP, in most cases you should enable numeric UIDs
and GIDs (which is off by default in some cases) for simplified permission
management between systems:
- [NetApp instructions](https://library.netapp.com/ecmdocs/ECMP1401220/html/GUID-24367A9F-E17B-4725-ADC1-02D86F56F78E.html)
- For non-NetApp devices, disable NFSv4 `idmapping` by performing opposite of [enable NFSv4 idmapper](https://wiki.archlinux.org/index.php/NFS#Enabling_NFSv4_idmapping)
### Disable NFS server delegation
We recommend that all NFS users disable the NFS server delegation feature. This
is to avoid a [Linux kernel bug](https://bugzilla.redhat.com/show_bug.cgi?id=1552203)
which causes NFS clients to slow precipitously due to
[excessive network traffic from numerous `TEST_STATEID` NFS messages](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/52017).
To disable NFS server delegation, do the following:
1. On the NFS server, run:
```shell
echo 0 > /proc/sys/fs/leases-enable
sysctl -w fs.leases-enable=0
```
1. Restart the NFS server process. For example, on CentOS run `service nfs restart`.
NOTE: **Important note:**
The kernel bug may be fixed in
[more recent kernels with this commit](https://github.com/torvalds/linux/commit/95da1b3a5aded124dd1bda1e3cdb876184813140).
Red Hat Enterprise 7 [shipped a kernel update](https://access.redhat.com/errata/RHSA-2019:2029)
on August 6, 2019 that may also have resolved this problem.
You may not need to disable NFS server delegation if you know you are using a version of
the Linux kernel that has been fixed. That said, GitLab still encourages instance
administrators to keep NFS server delegation disabled.
### Improving NFS performance with GitLab
#### Improving NFS performance with Unicorn
NOTE: **Note:**
From GitLab 12.1, it will automatically be detected if Rugged can and should be used per storage.
If you previously enabled Rugged using the feature flag, you will need to unset the feature flag by using:
```shell
sudo gitlab-rake gitlab:features:unset_rugged
```
If the Rugged feature flag is explicitly set to either true or false, GitLab will use the value explicitly set.
#### Improving NFS performance with Puma
NOTE: **Note:**
From GitLab 12.7, Rugged auto-detection is disabled if Puma thread count is greater than 1.
If you want to use Rugged with Puma, it is recommended to [set Puma thread count to 1](https://docs.gitlab.com/omnibus/settings/puma.html#puma-settings).
If you want to use Rugged with Puma thread count more than 1, Rugged can be enabled using the [feature flag](../development/gitaly.md#legacy-rugged-code)
If the Rugged feature flag is explicitly set to either true or false, GitLab will use the value explicitly set.
## NFS client
The `nfs-common` provides NFS functionality without installing server components which
we don't need running on the application nodes.
```shell
apt-get update
apt-get install nfs-common
```
### Mount options
Here is an example snippet to add to `/etc/fstab`:
```plaintext
10.1.0.1:/var/opt/gitlab/.ssh /var/opt/gitlab/.ssh nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-rails/uploads /var/opt/gitlab/gitlab-rails/uploads nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-rails/shared /var/opt/gitlab/gitlab-rails/shared nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-ci/builds /var/opt/gitlab/gitlab-ci/builds nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/git-data /var/opt/gitlab/git-data nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
```
Note there are several options that you should consider using:
| Setting | Description |
| ------- | ----------- |
| `vers=4.1` |NFS v4.1 should be used instead of v4.0 because there is a Linux [NFS client bug in v4.0](https://gitlab.com/gitlab-org/gitaly/-/issues/1339) that can cause significant problems due to stale data.
| `nofail` | Don't halt boot process waiting for this mount to become available
| `lookupcache=positive` | Tells the NFS client to honor `positive` cache results but invalidates any `negative` cache results. Negative cache results cause problems with Git. Specifically, a `git push` can fail to register uniformly across all NFS clients. The negative cache causes the clients to 'remember' that the files did not exist previously.
| `hard` | Instead of `soft`. [Further details](#soft-mount-option).
#### `soft` mount option
It's recommended that you use `hard` in your mount options, unless you have a specific
reason to use `soft`.
On GitLab.com, we use `soft` because there were times when we had NFS servers
reboot and `soft` improved availability, but everyone's infrastructure is different.
If your NFS is provided by on-premise storage arrays with redundant controllers,
for example, you shouldn't need to worry about NFS server availability.
The NFS man page states:
> "soft" timeout can cause silent data corruption in certain cases
Read the [Linux man page](https://linux.die.net/man/5/nfs) to understand the difference,
and if you do use `soft`, ensure that you've taken steps to mitigate the risks.
If you experience behavior that might have been caused by
writes to disk on the NFS server not occurring, such as commits going missing,
use the `hard` option, because (from the man page):
> use the soft option only when client responsiveness is more important than data integrity
Other vendors make similar recommendations, including
[SAP](http://wiki.scn.sap.com/wiki/x/PARnFQ) and NetApp's
[knowledge base](https://kb.netapp.com/Advice_and_Troubleshooting/Data_Storage_Software/ONTAP_OS/What_are_the_differences_between_hard_mount_and_soft_mount),
they highlight that if the NFS client driver caches data, `soft` means there is no certainty if
writes by GitLab are actually on disk.
Mount points set with the option `hard` may not perform as well, and if the
NFS server goes down, `hard` will cause processes to hang when interacting with
the mount point. Use `SIGKILL` (`kill -9`) to deal with hung processes.
The `intr` option
[stopped working in the 2.6 kernel](https://access.redhat.com/solutions/157873).
### A single NFS mount
It's recommended to nest all GitLab data directories within a mount, that allows automatic
restore of backups without manually moving existing data.
```plaintext
mountpoint
└── gitlab-data
├── builds
├── git-data
├── shared
└── uploads
```
To do so, we'll need to configure Omnibus with the paths to each directory nested
in the mount point as follows:
Mount `/gitlab-nfs` then use the following Omnibus
configuration to move each data location to a subdirectory:
```ruby
git_data_dirs({"default" => { "path" => "/gitlab-nfs/gitlab-data/git-data"} })
gitlab_rails['uploads_directory'] = '/gitlab-nfs/gitlab-data/uploads'
gitlab_rails['shared_path'] = '/gitlab-nfs/gitlab-data/shared'
gitlab_ci['builds_directory'] = '/gitlab-nfs/gitlab-data/builds'
```
Run `sudo gitlab-ctl reconfigure` to start using the central location. Please
be aware that if you had existing data you will need to manually copy/rsync it
to these new locations and then restart GitLab.
### Bind mounts
Alternatively to changing the configuration in Omnibus, bind mounts can be used
to store the data on an NFS mount.
Bind mounts provide a way to specify just one NFS mount and then
bind the default GitLab data locations to the NFS mount. Start by defining your
single NFS mount point as you normally would in `/etc/fstab`. Let's assume your
NFS mount point is `/gitlab-nfs`. Then, add the following bind mounts in
`/etc/fstab`:
```shell
/gitlab-nfs/gitlab-data/git-data /var/opt/gitlab/git-data none bind 0 0
/gitlab-nfs/gitlab-data/.ssh /var/opt/gitlab/.ssh none bind 0 0
/gitlab-nfs/gitlab-data/uploads /var/opt/gitlab/gitlab-rails/uploads none bind 0 0
/gitlab-nfs/gitlab-data/shared /var/opt/gitlab/gitlab-rails/shared none bind 0 0
/gitlab-nfs/gitlab-data/builds /var/opt/gitlab/gitlab-ci/builds none bind 0 0
```
Using bind mounts will require manually making sure the data directories
are empty before attempting a restore. Read more about the
[restore prerequisites](../raketasks/backup_restore.md).
You can view information and options set for each of the mounted NFS file
systems by running `nfsstat -m` and `cat /etc/fstab`.
### Multiple NFS mounts
When using default Omnibus configuration you will need to share 4 data locations
between all GitLab cluster nodes. No other locations should be shared. The
following are the 4 locations need to be shared:
| Location | Description | Default configuration |
| -------- | ----------- | --------------------- |
| `/var/opt/gitlab/git-data` | Git repository data. This will account for a large portion of your data | `git_data_dirs({"default" => { "path" => "/var/opt/gitlab/git-data"} })`
| `/var/opt/gitlab/gitlab-rails/uploads` | User uploaded attachments | `gitlab_rails['uploads_directory'] = '/var/opt/gitlab/gitlab-rails/uploads'`
| `/var/opt/gitlab/gitlab-rails/shared` | Build artifacts, GitLab Pages, LFS objects, temp files, etc. If you're using LFS this may also account for a large portion of your data | `gitlab_rails['shared_path'] = '/var/opt/gitlab/gitlab-rails/shared'`
| `/var/opt/gitlab/gitlab-ci/builds` | GitLab CI/CD build traces | `gitlab_ci['builds_directory'] = '/var/opt/gitlab/gitlab-ci/builds'`
Other GitLab directories should not be shared between nodes. They contain
node-specific files and GitLab code that does not need to be shared. To ship
logs to a central location consider using remote syslog. Omnibus GitLab packages
provide configuration for [UDP log shipping](https://docs.gitlab.com/omnibus/settings/logs.html#udp-log-shipping-gitlab-enterprise-edition-only).
Having multiple NFS mounts will require manually making sure the data directories
are empty before attempting a restore. Read more about the
[restore prerequisites](../raketasks/backup_restore.md).
## NFS in a Firewalled Environment
If the traffic between your NFS server and NFS client(s) is subject to port filtering
by a firewall, then you will need to reconfigure that firewall to allow NFS communication.
[This guide from TDLP](http://tldp.org/HOWTO/NFS-HOWTO/security.html#FIREWALLS)
covers the basics of using NFS in a firewalled environment. Additionally, we encourage you to
search for and review the specific documentation for your operating system or distribution and your firewall software.
Example for Ubuntu:
Check that NFS traffic from the client is allowed by the firewall on the host by running
the command: `sudo ufw status`. If it's being blocked, then you can allow traffic from a specific
client with the command below.
```shell
sudo ufw allow from <client_ip_address> to any port nfs
```
## Known issues
### Avoid using AWS's Elastic File System (EFS)
GitLab strongly recommends against using AWS Elastic File System (EFS).
Our support team will not be able to assist on performance issues related to
file system access.
Customers and users have reported that AWS EFS does not perform well for GitLab's
use-case. Workloads where many small files are written in a serialized manner, like `git`,
are not well-suited for EFS. EBS with an NFS server on top will perform much better.
If you do choose to use EFS, avoid storing GitLab log files (e.g. those in `/var/log/gitlab`)
there because this will also affect performance. We recommend that the log files be
stored on a local volume.
For more details on another person's experience with EFS, see this [Commit Brooklyn 2019 video](https://youtu.be/K6OS8WodRBQ?t=313).
### Avoid using CephFS and GlusterFS
GitLab strongly recommends against using CephFS and GlusterFS.
These distributed file systems are not well-suited for GitLab's input/output access patterns because Git uses many small files and access times and file locking times to propagate will make Git activity very slow.
### Avoid using PostgreSQL with NFS
GitLab strongly recommends against running your PostgreSQL database
across NFS. The GitLab support team will not be able to assist on performance issues related to
this configuration.
Additionally, this configuration is specifically warned against in the
[PostgreSQL Documentation](https://www.postgresql.org/docs/current/creating-cluster.html#CREATING-CLUSTER-NFS):
>PostgreSQL does nothing special for NFS file systems, meaning it assumes NFS behaves exactly like
>locally-connected drives. If the client or server NFS implementation does not provide standard file
>system semantics, this can cause reliability problems. Specifically, delayed (asynchronous) writes
>to the NFS server can cause data corruption problems.
For supported database architecture, please see our documentation on
[Configuring a Database for GitLab HA](postgresql/replication_and_failover.md).
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
...@@ -27,7 +27,7 @@ will _not_ carry over automatically, due to differences between the two applicat ...@@ -27,7 +27,7 @@ will _not_ carry over automatically, due to differences between the two applicat
deployments, see [Configuring Puma Settings](https://docs.gitlab.com/omnibus/settings/puma.html#configuring-puma-settings). deployments, see [Configuring Puma Settings](https://docs.gitlab.com/omnibus/settings/puma.html#configuring-puma-settings).
For Helm based deployments, see the [Webservice Chart documentation](https://docs.gitlab.com/charts/charts/gitlab/webservice/index.html). For Helm based deployments, see the [Webservice Chart documentation](https://docs.gitlab.com/charts/charts/gitlab/webservice/index.html).
Additionally we strongly recommend that multi-node deployments [configure their load balancers to utilize the readiness check](../high_availability/load_balancer.md#readiness-check) due to a difference between Unicorn and Puma in how they handle connections during a restart of the service. Additionally we strongly recommend that multi-node deployments [configure their load balancers to utilize the readiness check](../load_balancer.md#readiness-check) due to a difference between Unicorn and Puma in how they handle connections during a restart of the service.
## Performance caveat when using Puma with Rugged ## Performance caveat when using Puma with Rugged
......
...@@ -536,7 +536,7 @@ database encryption. Proceed with caution. ...@@ -536,7 +536,7 @@ database encryption. Proceed with caution.
1. Set up a new server. This will become the **Pages server**. 1. Set up a new server. This will become the **Pages server**.
1. Create an [NFS share](../high_availability/nfs_host_client_setup.md) 1. Create an [NFS share](../nfs.md)
on the **Pages server** and configure this share to on the **Pages server** and configure this share to
allow access from your main **GitLab server**. allow access from your main **GitLab server**.
Note that the example there is more general and Note that the example there is more general and
......
...@@ -32,7 +32,7 @@ If you use a cloud-managed service, or provide your own PostgreSQL instance: ...@@ -32,7 +32,7 @@ If you use a cloud-managed service, or provide your own PostgreSQL instance:
gitlab_rails['db_password'] = 'DB password' gitlab_rails['db_password'] = 'DB password'
``` ```
For more information on GitLab HA setups, refer to [configuring GitLab for HA](../high_availability/gitlab.md). For more information on GitLab multi-node setups, refer to the [reference architectures](../reference_architectures/index.md).
1. Reconfigure for the changes to take effect: 1. Reconfigure for the changes to take effect:
......
---
type: reference
---
# Working with the bundled PgBouncer service **(PREMIUM ONLY)**
[PgBouncer](http://www.pgbouncer.org/) is used to seamlessly migrate database
connections between servers in a failover scenario. Additionally, it can be used
in a non-fault-tolerant setup to pool connections, speeding up response time
while reducing resource usage.
GitLab Premium includes a bundled version of PgBouncer that can be managed
through `/etc/gitlab/gitlab.rb`.
## PgBouncer as part of a fault-tolerant GitLab installation
This content has been moved to a [new location](replication_and_failover.md#configuring-the-pgbouncer-node).
## PgBouncer as part of a non-fault-tolerant GitLab installation
1. Generate PGBOUNCER_USER_PASSWORD_HASH with the command `gitlab-ctl pg-password-md5 pgbouncer`
1. Generate SQL_USER_PASSWORD_HASH with the command `gitlab-ctl pg-password-md5 gitlab`. We'll also need to enter the plaintext SQL_USER_PASSWORD later
1. On your database node, ensure the following is set in your `/etc/gitlab/gitlab.rb`
```ruby
postgresql['pgbouncer_user_password'] = 'PGBOUNCER_USER_PASSWORD_HASH'
postgresql['sql_user_password'] = 'SQL_USER_PASSWORD_HASH'
postgresql['listen_address'] = 'XX.XX.XX.Y' # Where XX.XX.XX.Y is the ip address on the node postgresql should listen on
postgresql['md5_auth_cidr_addresses'] = %w(AA.AA.AA.B/32) # Where AA.AA.AA.B is the IP address of the pgbouncer node
```
1. Run `gitlab-ctl reconfigure`
NOTE: **Note:**
If the database was already running, it will need to be restarted after reconfigure by running `gitlab-ctl restart postgresql`.
1. On the node you are running PgBouncer on, make sure the following is set in `/etc/gitlab/gitlab.rb`
```ruby
pgbouncer['enable'] = true
pgbouncer['databases'] = {
gitlabhq_production: {
host: 'DATABASE_HOST',
user: 'pgbouncer',
password: 'PGBOUNCER_USER_PASSWORD_HASH'
}
}
```
1. Run `gitlab-ctl reconfigure`
1. On the node running Puma, make sure the following is set in `/etc/gitlab/gitlab.rb`
```ruby
gitlab_rails['db_host'] = 'PGBOUNCER_HOST'
gitlab_rails['db_port'] = '6432'
gitlab_rails['db_password'] = 'SQL_USER_PASSWORD'
```
1. Run `gitlab-ctl reconfigure`
1. At this point, your instance should connect to the database through PgBouncer. If you are having issues, see the [Troubleshooting](#troubleshooting) section
## Enable Monitoring
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
If you enable Monitoring, it must be enabled on **all** PgBouncer servers.
1. Create/edit `/etc/gitlab/gitlab.rb` and add the following configuration:
```ruby
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '0.0.0.0:9100'
pgbouncer_exporter['listen_address'] = '0.0.0.0:9188'
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
## Administrative console
As part of Omnibus GitLab, a command is provided to automatically connect to the
PgBouncer administrative console. See the
[PgBouncer documentation](https://www.pgbouncer.org/usage.html#admin-console)
for detailed instructions on how to interact with the console.
To start a session run the following and provide the password for the `pgbouncer`
user:
```shell
sudo gitlab-ctl pgb-console
```
To get some basic information about the instance:
```shell
pgbouncer=# show databases; show clients; show servers;
name | host | port | database | force_user | pool_size | reserve_pool | pool_mode | max_connections | current_connections
---------------------+-----------+------+---------------------+------------+-----------+--------------+-----------+-----------------+---------------------
gitlabhq_production | 127.0.0.1 | 5432 | gitlabhq_production | | 100 | 5 | | 0 | 1
pgbouncer | | 6432 | pgbouncer | pgbouncer | 2 | 0 | statement | 0 | 0
(2 rows)
type | user | database | state | addr | port | local_addr | local_port | connect_time | request_time | ptr | link
| remote_pid | tls
------+-----------+---------------------+--------+-----------+-------+------------+------------+---------------------+---------------------+-----------+------
+------------+-----
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44590 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x12444c0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44592 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x12447c0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44594 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x1244940 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44706 | 127.0.0.1 | 6432 | 2018-04-24 22:14:22 | 2018-04-24 22:16:31 | 0x1244ac0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44708 | 127.0.0.1 | 6432 | 2018-04-24 22:14:22 | 2018-04-24 22:15:15 | 0x1244c40 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44794 | 127.0.0.1 | 6432 | 2018-04-24 22:15:15 | 2018-04-24 22:15:15 | 0x1244dc0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44798 | 127.0.0.1 | 6432 | 2018-04-24 22:15:15 | 2018-04-24 22:16:31 | 0x1244f40 |
| 0 |
C | pgbouncer | pgbouncer | active | 127.0.0.1 | 44660 | 127.0.0.1 | 6432 | 2018-04-24 22:13:51 | 2018-04-24 22:17:12 | 0x1244640 |
| 0 |
(8 rows)
type | user | database | state | addr | port | local_addr | local_port | connect_time | request_time | ptr | link | rem
ote_pid | tls
------+--------+---------------------+-------+-----------+------+------------+------------+---------------------+---------------------+-----------+------+----
--------+-----
S | gitlab | gitlabhq_production | idle | 127.0.0.1 | 5432 | 127.0.0.1 | 35646 | 2018-04-24 22:15:15 | 2018-04-24 22:17:10 | 0x124dca0 | |
19980 |
(1 row)
```
## Troubleshooting
In case you are experiencing any issues connecting through PgBouncer, the first
place to check is always the logs:
```shell
sudo gitlab-ctl tail pgbouncer
```
Additionally, you can check the output from `show databases` in the
[administrative console](#administrative-console). In the output, you would expect
to see values in the `host` field for the `gitlabhq_production` database.
Additionally, `current_connections` should be greater than 1.
### Message: `LOG: invalid CIDR mask in address`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-cidr-mask-in-address).
### Message: `LOG: invalid IP mask "md5": Name or service not known`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-ip-mask-md5-name-or-service-not-known).
...@@ -203,7 +203,7 @@ When installing the GitLab package, do not supply `EXTERNAL_URL` value. ...@@ -203,7 +203,7 @@ When installing the GitLab package, do not supply `EXTERNAL_URL` value.
### Configuring the Database nodes ### Configuring the Database nodes
1. Make sure to [configure the Consul nodes](../high_availability/consul.md). 1. Make sure to [configure the Consul nodes](../consul.md).
1. Make sure you collect [`CONSUL_SERVER_NODES`](#consul-information), [`PGBOUNCER_PASSWORD_HASH`](#pgbouncer-information), [`POSTGRESQL_PASSWORD_HASH`](#postgresql-information), the [number of db nodes](#postgresql-information), and the [network address](#network-information) before executing the next step. 1. Make sure you collect [`CONSUL_SERVER_NODES`](#consul-information), [`PGBOUNCER_PASSWORD_HASH`](#pgbouncer-information), [`POSTGRESQL_PASSWORD_HASH`](#postgresql-information), the [number of db nodes](#postgresql-information), and the [network address](#network-information) before executing the next step.
1. On the master database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section: 1. On the master database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
...@@ -466,7 +466,7 @@ Check the [Troubleshooting section](#troubleshooting) before proceeding. ...@@ -466,7 +466,7 @@ Check the [Troubleshooting section](#troubleshooting) before proceeding.
gitlab-ctl write-pgpass --host 127.0.0.1 --database pgbouncer --user pgbouncer --hostuser gitlab-consul gitlab-ctl write-pgpass --host 127.0.0.1 --database pgbouncer --user pgbouncer --hostuser gitlab-consul
``` ```
1. [Enable monitoring](../high_availability/pgbouncer.md#enable-monitoring) 1. [Enable monitoring](../postgresql/pgbouncer.md#enable-monitoring)
#### PgBouncer Checkpoint #### PgBouncer Checkpoint
...@@ -795,7 +795,7 @@ After deploying the configuration follow these steps: ...@@ -795,7 +795,7 @@ After deploying the configuration follow these steps:
This example uses 3 PostgreSQL servers, and 1 application node (with PgBouncer setup alongside). This example uses 3 PostgreSQL servers, and 1 application node (with PgBouncer setup alongside).
It differs from the [recommended setup](#example-recommended-setup) by moving the Consul servers into the same servers we use for PostgreSQL. It differs from the [recommended setup](#example-recommended-setup) by moving the Consul servers into the same servers we use for PostgreSQL.
The trade-off is between reducing server counts, against the increased operational complexity of needing to deal with PostgreSQL [failover](#failover-procedure) and [restore](#restore-procedure) procedures in addition to [Consul outage recovery](../high_availability/consul.md#outage-recovery) on the same set of machines. The trade-off is between reducing server counts, against the increased operational complexity of needing to deal with PostgreSQL [failover](#failover-procedure) and [restore](#restore-procedure) procedures in addition to [Consul outage recovery](../consul.md#outage-recovery) on the same set of machines.
In this example we start with all servers on the same 10.6.0.0/16 private network range, they can connect to each freely other on those addresses. In this example we start with all servers on the same 10.6.0.0/16 private network range, they can connect to each freely other on those addresses.
...@@ -1087,7 +1087,7 @@ To restart either service, run `gitlab-ctl restart SERVICE` ...@@ -1087,7 +1087,7 @@ To restart either service, run `gitlab-ctl restart SERVICE`
For PostgreSQL, it is usually safe to restart the master node by default. Automatic failover defaults to a 1 minute timeout. Provided the database returns before then, nothing else needs to be done. To be safe, you can stop `repmgrd` on the standby nodes first with `gitlab-ctl stop repmgrd`, then start afterwards with `gitlab-ctl start repmgrd`. For PostgreSQL, it is usually safe to restart the master node by default. Automatic failover defaults to a 1 minute timeout. Provided the database returns before then, nothing else needs to be done. To be safe, you can stop `repmgrd` on the standby nodes first with `gitlab-ctl stop repmgrd`, then start afterwards with `gitlab-ctl start repmgrd`.
On the Consul server nodes, it is important to restart the Consul service in a controlled fashion. Read our [Consul documentation](../high_availability/consul.md#restarting-the-server-cluster) for instructions on how to restart the service. On the Consul server nodes, it is important to [restart the Consul service](../consul.md#restart-consul) in a controlled manner.
### `gitlab-ctl repmgr-check-master` command produces errors ### `gitlab-ctl repmgr-check-master` command produces errors
...@@ -1134,11 +1134,10 @@ postgresql['trust_auth_cidr_addresses'] = %w(123.123.123.123/32 <other_cidrs>) ...@@ -1134,11 +1134,10 @@ postgresql['trust_auth_cidr_addresses'] = %w(123.123.123.123/32 <other_cidrs>)
### Issues with other components ### Issues with other components
If you're running into an issue with a component not outlined here, be sure to check the troubleshooting section of their specific documentation page. If you're running into an issue with a component not outlined here, be sure to check the troubleshooting section of their specific documentation page:
- [Consul](../high_availability/consul.md#troubleshooting) - [Consul](../consul.md#troubleshooting-consul)
- [PostgreSQL](https://docs.gitlab.com/omnibus/settings/database.html#troubleshooting) - [PostgreSQL](https://docs.gitlab.com/omnibus/settings/database.html#troubleshooting)
- [GitLab application](../high_availability/gitlab.md#troubleshooting)
## Patroni ## Patroni
......
...@@ -466,7 +466,7 @@ While it doesn't require a list of all Sentinel nodes, in case of a failure, ...@@ -466,7 +466,7 @@ While it doesn't require a list of all Sentinel nodes, in case of a failure,
it needs to access at least one of the listed. it needs to access at least one of the listed.
NOTE: **Note:** NOTE: **Note:**
The following steps should be performed in the [GitLab application server](../high_availability/gitlab.md) The following steps should be performed in the GitLab application server
which ideally should not have Redis or Sentinels on it for a HA setup. which ideally should not have Redis or Sentinels on it for a HA setup.
1. SSH into the server where the GitLab application is installed. 1. SSH into the server where the GitLab application is installed.
...@@ -736,6 +736,5 @@ Read more: ...@@ -736,6 +736,5 @@ Read more:
1. [Reference architectures](../reference_architectures/index.md) 1. [Reference architectures](../reference_architectures/index.md)
1. [Configure the database](../postgresql/replication_and_failover.md) 1. [Configure the database](../postgresql/replication_and_failover.md)
1. [Configure NFS](../high_availability/nfs.md) 1. [Configure NFS](../nfs.md)
1. [Configure the GitLab application servers](../high_availability/gitlab.md) 1. [Configure the load balancers](../load_balancer.md)
1. [Configure the load balancers](../high_availability/load_balancer.md)
...@@ -221,7 +221,7 @@ the correct credentials for the Sentinel nodes. ...@@ -221,7 +221,7 @@ the correct credentials for the Sentinel nodes.
While it doesn't require a list of all Sentinel nodes, in case of a failure, While it doesn't require a list of all Sentinel nodes, in case of a failure,
it needs to access at least one of listed ones. it needs to access at least one of listed ones.
The following steps should be performed in the [GitLab application server](../high_availability/gitlab.md) The following steps should be performed in the GitLab application server
which ideally should not have Redis or Sentinels in the same machine: which ideally should not have Redis or Sentinels in the same machine:
1. Edit `/home/git/gitlab/config/resque.yml` following the example in 1. Edit `/home/git/gitlab/config/resque.yml` following the example in
......
...@@ -557,7 +557,7 @@ On each node perform the following: ...@@ -557,7 +557,7 @@ On each node perform the following:
1. Specify the necessary NFS mounts in `/etc/fstab`. 1. Specify the necessary NFS mounts in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See the [NFS documentation](../high_availability/nfs.md) to configure your NFS server. See the [NFS documentation](../nfs.md)
for examples and the various options. for examples and the various options.
1. Create the shared directories. These may be different depending on your NFS 1. Create the shared directories. These may be different depending on your NFS
...@@ -855,7 +855,7 @@ along with [Gitaly](#configure-gitaly), are recommended over using NFS whenever ...@@ -855,7 +855,7 @@ along with [Gitaly](#configure-gitaly), are recommended over using NFS whenever
possible. However, if you intend to use GitLab Pages, possible. However, if you intend to use GitLab Pages,
[you must use NFS](troubleshooting.md#gitlab-pages-requires-nfs). [you must use NFS](troubleshooting.md#gitlab-pages-requires-nfs).
For information about configuring NFS, see the [NFS documentation page](../high_availability/nfs.md). For information about configuring NFS, see the [NFS documentation page](../nfs.md).
<div align="right"> <div align="right">
<a type="button" class="btn btn-default" href="#setup-components"> <a type="button" class="btn btn-default" href="#setup-components">
......
...@@ -1445,7 +1445,7 @@ On each node perform the following: ...@@ -1445,7 +1445,7 @@ On each node perform the following:
1. Specify the necessary NFS mounts in `/etc/fstab`. 1. Specify the necessary NFS mounts in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See the [NFS documentation](../high_availability/nfs.md) to configure your NFS server. See the [NFS documentation](../nfs.md)
for examples and the various options. for examples and the various options.
1. Create the shared directories. These may be different depending on your NFS 1. Create the shared directories. These may be different depending on your NFS
...@@ -1754,7 +1754,7 @@ work. ...@@ -1754,7 +1754,7 @@ work.
are recommended over NFS wherever possible for improved performance. If you intend are recommended over NFS wherever possible for improved performance. If you intend
to use GitLab Pages, this currently [requires NFS](troubleshooting.md#gitlab-pages-requires-nfs). to use GitLab Pages, this currently [requires NFS](troubleshooting.md#gitlab-pages-requires-nfs).
See how to [configure NFS](../high_availability/nfs.md). See how to [configure NFS](../nfs.md).
<div align="right"> <div align="right">
<a type="button" class="btn btn-default" href="#setup-components"> <a type="button" class="btn btn-default" href="#setup-components">
......
...@@ -1444,7 +1444,7 @@ On each node perform the following: ...@@ -1444,7 +1444,7 @@ On each node perform the following:
1. Specify the necessary NFS mounts in `/etc/fstab`. 1. Specify the necessary NFS mounts in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See the [NFS documentation](../high_availability/nfs.md) to configure your NFS server. See the [NFS documentation](../nfs.md)
for examples and the various options. for examples and the various options.
1. Create the shared directories. These may be different depending on your NFS 1. Create the shared directories. These may be different depending on your NFS
...@@ -1753,7 +1753,7 @@ work. ...@@ -1753,7 +1753,7 @@ work.
are recommended over NFS wherever possible for improved performance. If you intend are recommended over NFS wherever possible for improved performance. If you intend
to use GitLab Pages, this currently [requires NFS](troubleshooting.md#gitlab-pages-requires-nfs). to use GitLab Pages, this currently [requires NFS](troubleshooting.md#gitlab-pages-requires-nfs).
See how to [configure NFS](../high_availability/nfs.md). See how to [configure NFS](../nfs.md).
<div align="right"> <div align="right">
<a type="button" class="btn btn-default" href="#setup-components"> <a type="button" class="btn btn-default" href="#setup-components">
......
...@@ -177,7 +177,7 @@ column. ...@@ -177,7 +177,7 @@ column.
| NFS ([5](#footnotes)) ([7](#footnotes)) | Shared disk storage service. Can be used as an alternative Object Storage. Required for GitLab Pages | [NFS configuration](../high_availability/nfs.md) | No | | NFS ([5](#footnotes)) ([7](#footnotes)) | Shared disk storage service. Can be used as an alternative Object Storage. Required for GitLab Pages | [NFS configuration](../high_availability/nfs.md) | No |
| [Consul](../../development/architecture.md#consul) ([3](#footnotes)) | Service discovery and health checks/failover | [Consul configuration](../high_availability/consul.md) **(PREMIUM ONLY)** | Yes | | [Consul](../../development/architecture.md#consul) ([3](#footnotes)) | Service discovery and health checks/failover | [Consul configuration](../high_availability/consul.md) **(PREMIUM ONLY)** | Yes |
| [PostgreSQL](../../development/architecture.md#postgresql) | Database | [PostgreSQL configuration](https://docs.gitlab.com/omnibus/settings/database.html) | Yes | | [PostgreSQL](../../development/architecture.md#postgresql) | Database | [PostgreSQL configuration](https://docs.gitlab.com/omnibus/settings/database.html) | Yes |
| [PgBouncer](../../development/architecture.md#pgbouncer) | Database connection pooler | [PgBouncer configuration](../high_availability/pgbouncer.md#running-pgbouncer-as-part-of-a-non-ha-gitlab-installation) **(PREMIUM ONLY)** | Yes | | [PgBouncer](../../development/architecture.md#pgbouncer) | Database connection pooler | [PgBouncer configuration](../postgresql/pgbouncer.md) **(PREMIUM ONLY)** | Yes |
| Repmgr | PostgreSQL cluster management and failover | [PostgreSQL and Repmgr configuration](../postgresql/replication_and_failover.md) | Yes | | Repmgr | PostgreSQL cluster management and failover | [PostgreSQL and Repmgr configuration](../postgresql/replication_and_failover.md) | Yes |
| Patroni | An alternative PostgreSQL cluster management and failover | [PostgreSQL and Patroni configuration](../postgresql/replication_and_failover.md#patroni) | Yes | | Patroni | An alternative PostgreSQL cluster management and failover | [PostgreSQL and Patroni configuration](../postgresql/replication_and_failover.md#patroni) | Yes |
| [Redis](../../development/architecture.md#redis) ([3](#footnotes)) | Key/value store for fast data lookup and caching | [Redis configuration](../high_availability/redis.md) | Yes | | [Redis](../../development/architecture.md#redis) ([3](#footnotes)) | Key/value store for fast data lookup and caching | [Redis configuration](../high_availability/redis.md) | Yes |
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册