提交 c0c1433f 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 2e4dcef6
......@@ -45,8 +45,6 @@ rules:
no-jquery/no-serialize: error
promise/always-return: off
promise/no-callback-in-promise: off
# Make update to eslint@6 smoother:
prefer-object-spread: off
overrides:
- files:
- '**/spec/**/*'
......
......@@ -43,7 +43,6 @@ code_quality:
extends:
- .default-retry
- .reports:rules:sast
- .use-docker-in-docker
stage: test
# `needs: []` starts the job immediately in the pipeline
# https://docs.gitlab.com/ee/ci/yaml/README.html#needs
......@@ -55,10 +54,6 @@ code_quality:
sast: gl-sast-report.json
expire_in: 1 week # GitLab-specific
variables:
# emptying DOCKER_HOST so it can be detected properly on kubernetes executor
# with the script below
DOCKER_HOST: ""
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: ""
SAST_ANALYZER_IMAGE_PREFIX: "registry.gitlab.com/gitlab-org/security-products/analyzers"
SAST_ANALYZER_IMAGE_TAG: 2
......
<script>
import { GlNewDropdown, GlNewDropdownItem, GlTabs, GlTab } from '@gitlab/ui';
import { GlNewDropdown, GlNewDropdownItem, GlTabs, GlTab, GlButton } from '@gitlab/ui';
import { s__ } from '~/locale';
import query from '../graphql/queries/details.query.graphql';
import { fetchPolicies } from '~/lib/graphql';
import glFeatureFlagsMixin from '~/vue_shared/mixins/gl_feature_flags_mixin';
export default {
statuses: {
......@@ -19,7 +20,9 @@ export default {
GlNewDropdownItem,
GlTab,
GlTabs,
GlButton,
},
mixins: [glFeatureFlagsMixin()],
props: {
alertId: {
type: String,
......@@ -29,6 +32,10 @@ export default {
type: String,
required: true,
},
newIssuePath: {
type: String,
required: true,
},
},
apollo: {
alert: {
......@@ -52,9 +59,22 @@ export default {
</script>
<template>
<div>
<div v-if="alert" class="d-flex justify-content-between border-bottom pb-2 pt-1">
<div></div>
<gl-new-dropdown class="align-self-center" right>
<div
v-if="alert"
class="gl-display-flex justify-content-end gl-border-b-1 gl-border-b-gray-200 gl-border-b-solid gl-p-4"
>
<gl-button
v-if="glFeatures.createIssueFromAlertEnabled"
data-testid="createIssueBtn"
:href="newIssuePath"
category="primary"
variant="success"
>
{{ s__('AlertManagement|Create issue') }}
</gl-button>
</div>
<div class="gl-display-flex justify-content-end">
<gl-new-dropdown right>
<gl-new-dropdown-item
v-for="(label, field) in $options.statuses"
:key="field"
......
......@@ -7,7 +7,7 @@ Vue.use(VueApollo);
export default selector => {
const domEl = document.querySelector(selector);
const { alertId, projectPath } = domEl.dataset;
const { alertId, projectPath, newIssuePath } = domEl.dataset;
const apolloProvider = new VueApollo({
defaultClient: createDefaultClient(),
......@@ -25,6 +25,7 @@ export default selector => {
props: {
alertId,
projectPath,
newIssuePath,
},
});
},
......
......@@ -345,6 +345,7 @@ export default {
ref="copyChartLink"
v-track-event="generateLinkToChartOptions(clipboardText)"
:data-clipboard-text="clipboardText"
data-qa-selector="generate_chart_link_menu_item"
@click="showToast(clipboardText)"
>
{{ __('Copy link to chart') }}
......
import { s__ } from '~/locale';
import { s__, __ } from '~/locale';
export const BRANCH_SUFFIX_COUNT = 8;
export const DEFAULT_TARGET_BRANCH = 'master';
......@@ -10,5 +10,8 @@ export const SUBMIT_CHANGES_COMMIT_ERROR = s__(
export const SUBMIT_CHANGES_MERGE_REQUEST_ERROR = s__(
'StaticSiteEditor|Could not create merge request.',
);
export const LOAD_CONTENT_ERROR = __(
'An error ocurred while loading your content. Please try again.',
);
export const DEFAULT_HEADING = s__('StaticSiteEditor|Static site editor');
......@@ -3,18 +3,29 @@ import VueApollo from 'vue-apollo';
import createDefaultClient from '~/lib/graphql';
import typeDefs from './typedefs.graphql';
import fileResolver from './resolvers/file';
Vue.use(VueApollo);
const createApolloProvider = data => {
const createApolloProvider = appData => {
const defaultClient = createDefaultClient(
{},
{
Project: {
file: fileResolver,
},
},
{
typeDefs,
},
);
defaultClient.cache.writeData({
data,
data: {
appData: {
__typename: 'AppData',
...appData,
},
},
});
return new VueApollo({
......
query appData {
isSupportedContent @client
appData @client {
isSupportedContent
project
sourcePath
returnUrl
}
}
query sourceContent($project: ID!, $sourcePath: String!) {
project(fullPath: $project) {
fullPath,
file(path: $sourcePath) @client {
title
content
}
}
}
import loadSourceContent from '../../services/load_source_content';
const fileResolver = ({ fullPath: projectId }, { path: sourcePath }) => {
return loadSourceContent({ projectId, sourcePath }).then(sourceContent => ({
// eslint-disable-next-line @gitlab/require-i18n-strings
__typename: 'File',
...sourceContent,
}));
};
export default fileResolver;
extend type Query {
type File {
title: String
content: String!
}
extend type Project {
file(path: ID!): File
}
type AppData {
isSupportedContent: Boolean!
project: String!
returnUrl: String
sourcePath: String!
username: String!
}
extend type Query {
appData: AppData!
}
......@@ -12,6 +12,11 @@ import InvalidContentMessage from '../components/invalid_content_message.vue';
import SubmitChangesError from '../components/submit_changes_error.vue';
import appDataQuery from '../graphql/queries/app_data.query.graphql';
import sourceContentQuery from '../graphql/queries/source_content.query.graphql';
import createFlash from '~/flash';
import { LOAD_CONTENT_ERROR } from '../constants';
export default {
components: {
......@@ -26,9 +31,31 @@ export default {
},
mixins: [glFeatureFlagsMixin()],
apollo: {
isSupportedContent: {
appData: {
query: appDataQuery,
},
sourceContent: {
query: sourceContentQuery,
update: ({
project: {
file: { title, content },
},
}) => {
return { title, content };
},
variables() {
return {
project: this.appData.project,
sourcePath: this.appData.sourcePath,
};
},
skip() {
return !this.appData.isSupportedContent;
},
error() {
createFlash(LOAD_CONTENT_ERROR);
},
},
},
computed: {
...mapState([
......@@ -44,7 +71,7 @@ export default {
...mapGetters(['contentChanged']),
},
mounted() {
if (this.isSupportedContent) {
if (this.appData.isSupportedContent) {
this.loadContent();
}
},
......@@ -66,7 +93,7 @@ export default {
/>
<!-- Main view -->
<template v-else-if="isSupportedContent">
<template v-else-if="appData.isSupportedContent">
<div v-if="isLoadingContent" class="w-50 h-50">
<gl-skeleton-loader :width="500" :height="102">
<rect width="500" height="16" rx="4" />
......
import createFlash from '~/flash';
import { __ } from '~/locale';
import * as mutationTypes from './mutation_types';
import loadSourceContent from '~/static_site_editor/services/load_source_content';
import submitContentChanges from '~/static_site_editor/services/submit_content_changes';
import { LOAD_CONTENT_ERROR } from '../constants';
export const loadContent = ({ commit, state: { sourcePath, projectId } }) => {
commit(mutationTypes.LOAD_CONTENT);
......@@ -12,7 +13,7 @@ export const loadContent = ({ commit, state: { sourcePath, projectId } }) => {
.then(data => commit(mutationTypes.RECEIVE_CONTENT_SUCCESS, data))
.catch(() => {
commit(mutationTypes.RECEIVE_CONTENT_ERROR);
createFlash(__('An error ocurred while loading your content. Please try again.'));
createFlash(LOAD_CONTENT_ERROR);
});
};
......
......@@ -6,6 +6,7 @@ class Projects::AlertManagementController < Projects::ApplicationController
before_action :authorize_read_alert_management_alert!
before_action do
push_frontend_feature_flag(:alert_list_status_filtering_enabled)
push_frontend_feature_flag(:create_issue_from_alert_enabled)
end
def index
......
......@@ -11,10 +11,11 @@ module Projects::AlertManagementHelper
}
end
def alert_management_detail_data(project_path, alert_id)
def alert_management_detail_data(project, alert_id)
{
'alert-id' => alert_id,
'project-path' => project_path
'project-path' => project.full_path,
'new-issue-path' => new_project_issue_path(project)
}
end
end
......@@ -203,7 +203,7 @@
- if project_nav_tab? :operations
= nav_link(controller: sidebar_operations_paths) do
= link_to sidebar_operations_link_path, class: 'shortcuts-operations qa-link-operations' do
= link_to sidebar_operations_link_path, class: 'shortcuts-operations', data: { qa_selector: 'operations_link' } do
.nav-icon-container
= sprite_icon('cloud-gear')
%span.nav-item-name
......@@ -375,7 +375,7 @@
= _('CI / CD')
- if !@project.archived? && settings_operations_available?
= nav_link(controller: [:operations]) do
= link_to project_settings_operations_path(@project), title: _('Operations') do
= link_to project_settings_operations_path(@project), title: _('Operations'), data: { qa_selector: 'operations_settings_link' } do
= _('Operations')
- if @project.pages_available?
= nav_link(controller: :pages) do
......
- add_to_breadcrumbs s_('AlertManagement|Alerts'), project_alert_management_index_path(@project)
- page_title s_('AlertManagement|Alert detail')
#js-alert_details{ data: alert_management_detail_data(@project.full_path, @alert_id) }
#js-alert_details{ data: alert_management_detail_data(@project, @alert_id) }
......@@ -2,7 +2,7 @@
- setting = project_incident_management_setting
- templates = setting.available_issue_templates.map { |t| [t.name, t.key] }
%section.settings.no-animate.qa-incident-management-settings
%section.settings.no-animate.qa-incident-management-settings{ data: { qa_selector: 'incidents_settings_content' } }
.settings-header
%h3{ :class => "h4" }= _('Incidents')
%button.btn.js-settings-toggle{ type: 'button' }
......@@ -17,16 +17,16 @@
.form-group
= f.fields_for :incident_management_setting_attributes, setting do |form|
.form-group
= form.check_box :create_issue
= form.check_box :create_issue, data: { qa_selector: 'create_issue_checkbox' }
= form.label :create_issue, _('Create an issue. Issues are created for each alert triggered.'), class: 'form-check-label'
.form-group.col-sm-8
= form.label :issue_template_key, class: 'label-bold' do
= _('Issue template (optional)')
= link_to icon('question-circle'), help_page_path('user/project/description_templates', anchor: 'creating-issue-templates'), target: '_blank', rel: 'noopener noreferrer'
.select-wrapper
= form.select :issue_template_key, templates, {include_blank: 'No template selected'}, class: "form-control select-control"
= form.select :issue_template_key, templates, {include_blank: 'No template selected'}, class: "form-control select-control", data: { qa_selector: 'incident_templates_dropdown' }
= icon('chevron-down')
.form-group
= form.check_box :send_email
= form.label :send_email, _('Send a separate email notification to Developers.'), class: 'form-check-label'
= f.submit _('Save changes'), class: 'btn btn-success'
= f.submit _('Save changes'), class: 'btn btn-success', data: { qa_selector: 'save_changes_button' }
---
title: Add docs for alert management list
merge_request: 31225
author:
type: other
---
title: Fix mapping group membets as Jira issues authors/assignees
merge_request: 30820
author:
type: fixed
# Reference architecture: up to 10,000 users
This page describes GitLab reference architecture for up to 10,000 users.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
> - **Supported users (approximate):** 10,000
> - **High Availability:** True
> - **Test RPS rates:** API: 200 RPS, Web: 20 RPS, Git: 20 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|----------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 3 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | c5.9xlarge | F32s v2 |
| PostgreSQL | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 16 vCPU, 60GB Memory | n1-standard-16 | m5.4xlarge | D16s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
## Footnotes
1. In our architectures we run each GitLab Rails node using the Puma webserver
and have its number of workers set to 90% of available CPUs along with four threads. For
nodes that are running Rails with other components the worker value should be reduced
accordingly where we've found 50% achieves a good balance but this is dependent
on workload.
1. Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend two nodes as an absolute minimum for HA environments
and at least four nodes should be used when supporting 50,000 or more users.
We also recommend that each Gitaly node should store no more than 5TB of data
and have the number of [`gitaly-ruby` workers](../gitaly/index.md#gitaly-ruby)
set to 20% of available CPUs. Additional nodes should be considered in conjunction
with a review of expected data size and spread based on the recommendations above.
1. Recommended Redis setup differs depending on the size of the architecture.
For smaller architectures (less than 3,000 users) a single instance should suffice.
For medium sized installs (3,000 - 5,000) we suggest one Redis cluster for all
classes and that Redis Sentinel is hosted alongside Consul.
For larger architectures (10,000 users or more) we suggest running a separate
[Redis Cluster](../high_availability/redis.md#running-multiple-redis-clusters) for the Cache class
and another for the Queues and Shared State classes respectively. We also recommend
that you run the Redis Sentinel clusters separately for each Redis Cluster.
1. For data objects such as LFS, Uploads, Artifacts, etc. We recommend an [Object Storage service](../object_storage.md)
over NFS where possible, due to better performance and availability.
1. NFS can be used as an alternative for both repository data (replacing Gitaly) and
object storage but this isn't typically recommended for performance reasons. Note however it is required for
[GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196).
1. Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. Although other load balancers with similar feature sets
could also be used, those load balancers have not been validated.
1. We strongly recommend that any Gitaly or NFS nodes be set up with SSD disks over
HDD with a throughput of at least 8,000 IOPS for read operations and 2,000 IOPS for write
as these components have heavy I/O. These IOPS values are recommended only as a starter
as with time they may be adjusted higher or lower depending on the scale of your
environment's workload. If you're running the environment on a Cloud provider
you may need to refer to their documentation on how configure IOPS correctly.
1. The architectures were built and tested with the [Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform on GCP. On different hardware you may find that adjustments, either lower
or higher, are required for your CPU or Node counts accordingly. For more information, a
[Sysbench](https://github.com/akopytov/sysbench) benchmark of the CPU can be found
[here](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks).
1. AWS-equivalent and Azure-equivalent configurations are rough suggestions
and may change in the future. They have not yet been tested and validated.
# Reference architecture: up to 1,000 users
This page describes GitLab reference architecture for up to 1,000 users.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
> - **Supported users (approximate):** 1,000
> - **High Availability:** False
| Users | Configuration([8](#footnotes)) | GCP type | AWS type([9](#footnotes)) |
|-------|--------------------------------|---------------|---------------------------|
| 100 | 2 vCPU, 7.2GB Memory | n1-standard-2 | c5.2xlarge |
| 500 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge |
| 1000 | 8 vCPU, 30GB Memory | n1-standard-8 | m5.2xlarge |
For situations where you need to serve up to 1,000 users, a single-node
solution with [frequent backups](index.md#automated-backups-core-only) is appropriate
for many organizations. With automatic backup of the GitLab repositories,
configuration, and the database, if you don't have strict availability
requirements, this is the ideal solution.
## Setup instructions
- For this default reference architecture, use the standard [installation instructions](../../install/README.md) to install GitLab.
NOTE: **Note:**
You can also optionally configure GitLab to use an
[external PostgreSQL service](../external_database.md) or an
[external object storage service](../high_availability/object_storage.md) for
added performance and reliability at a reduced complexity cost.
## Footnotes
1. In our architectures we run each GitLab Rails node using the Puma webserver
and have its number of workers set to 90% of available CPUs along with four threads. For
nodes that are running Rails with other components the worker value should be reduced
accordingly where we've found 50% achieves a good balance but this is dependent
on workload.
1. Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend two nodes as an absolute minimum for HA environments
and at least four nodes should be used when supporting 50,000 or more users.
We also recommend that each Gitaly node should store no more than 5TB of data
and have the number of [`gitaly-ruby` workers](../gitaly/index.md#gitaly-ruby)
set to 20% of available CPUs. Additional nodes should be considered in conjunction
with a review of expected data size and spread based on the recommendations above.
1. Recommended Redis setup differs depending on the size of the architecture.
For smaller architectures (less than 3,000 users) a single instance should suffice.
For medium sized installs (3,000 - 5,000) we suggest one Redis cluster for all
classes and that Redis Sentinel is hosted alongside Consul.
For larger architectures (10,000 users or more) we suggest running a separate
[Redis Cluster](../high_availability/redis.md#running-multiple-redis-clusters) for the Cache class
and another for the Queues and Shared State classes respectively. We also recommend
that you run the Redis Sentinel clusters separately for each Redis Cluster.
1. For data objects such as LFS, Uploads, Artifacts, etc. We recommend an [Object Storage service](../object_storage.md)
over NFS where possible, due to better performance and availability.
1. NFS can be used as an alternative for both repository data (replacing Gitaly) and
object storage but this isn't typically recommended for performance reasons. Note however it is required for
[GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196).
1. Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. Although other load balancers with similar feature sets
could also be used, those load balancers have not been validated.
1. We strongly recommend that any Gitaly or NFS nodes be set up with SSD disks over
HDD with a throughput of at least 8,000 IOPS for read operations and 2,000 IOPS for write
as these components have heavy I/O. These IOPS values are recommended only as a starter
as with time they may be adjusted higher or lower depending on the scale of your
environment's workload. If you're running the environment on a Cloud provider
you may need to refer to their documentation on how configure IOPS correctly.
1. The architectures were built and tested with the [Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform on GCP. On different hardware you may find that adjustments, either lower
or higher, are required for your CPU or Node counts accordingly. For more information, a
[Sysbench](https://github.com/akopytov/sysbench) benchmark of the CPU can be found
[here](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks).
1. AWS-equivalent and Azure-equivalent configurations are rough suggestions
and may change in the future. They have not yet been tested and validated.
# Reference architecture: up to 25,000 users
This page describes GitLab reference architecture for up to 25,000 users.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
> - **Supported users (approximate):** 25,000
> - **High Availability:** True
> - **Test RPS rates:** API: 500 RPS, Web: 50 RPS, Git: 50 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|----------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 5 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | c5.9xlarge | F32s v2 |
| PostgreSQL | 3 | 8 vCPU, 30GB Memory | n1-standard-8 | m5.2xlarge | D8s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 32 vCPU, 120GB Memory | n1-standard-32 | m5.8xlarge | D32s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
## Footnotes
1. In our architectures we run each GitLab Rails node using the Puma webserver
and have its number of workers set to 90% of available CPUs along with four threads. For
nodes that are running Rails with other components the worker value should be reduced
accordingly where we've found 50% achieves a good balance but this is dependent
on workload.
1. Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend two nodes as an absolute minimum for HA environments
and at least four nodes should be used when supporting 50,000 or more users.
We also recommend that each Gitaly node should store no more than 5TB of data
and have the number of [`gitaly-ruby` workers](../gitaly/index.md#gitaly-ruby)
set to 20% of available CPUs. Additional nodes should be considered in conjunction
with a review of expected data size and spread based on the recommendations above.
1. Recommended Redis setup differs depending on the size of the architecture.
For smaller architectures (less than 3,000 users) a single instance should suffice.
For medium sized installs (3,000 - 5,000) we suggest one Redis cluster for all
classes and that Redis Sentinel is hosted alongside Consul.
For larger architectures (10,000 users or more) we suggest running a separate
[Redis Cluster](../high_availability/redis.md#running-multiple-redis-clusters) for the Cache class
and another for the Queues and Shared State classes respectively. We also recommend
that you run the Redis Sentinel clusters separately for each Redis Cluster.
1. For data objects such as LFS, Uploads, Artifacts, etc. We recommend an [Object Storage service](../object_storage.md)
over NFS where possible, due to better performance and availability.
1. NFS can be used as an alternative for both repository data (replacing Gitaly) and
object storage but this isn't typically recommended for performance reasons. Note however it is required for
[GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196).
1. Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. Although other load balancers with similar feature sets
could also be used, those load balancers have not been validated.
1. We strongly recommend that any Gitaly or NFS nodes be set up with SSD disks over
HDD with a throughput of at least 8,000 IOPS for read operations and 2,000 IOPS for write
as these components have heavy I/O. These IOPS values are recommended only as a starter
as with time they may be adjusted higher or lower depending on the scale of your
environment's workload. If you're running the environment on a Cloud provider
you may need to refer to their documentation on how configure IOPS correctly.
1. The architectures were built and tested with the [Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform on GCP. On different hardware you may find that adjustments, either lower
or higher, are required for your CPU or Node counts accordingly. For more information, a
[Sysbench](https://github.com/akopytov/sysbench) benchmark of the CPU can be found
[here](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks).
1. AWS-equivalent and Azure-equivalent configurations are rough suggestions
and may change in the future. They have not yet been tested and validated.
# Reference architecture: up to 2,000 users
This page describes GitLab reference architecture for up to 2,000 users.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
> - **Supported users (approximate):** 2,000
> - **High Availability:** False
> - **Test RPS rates:** API: 40 RPS, Web: 4 RPS, Git: 4 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|---------------|-----------------------|----------------|
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| PostgreSQL | 1 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Redis ([3](#footnotes)) | 1 | 1 vCPU, 3.75GB Memory | n1-standard-1 | m5.large | D2s v3 |
| Gitaly ([5](#footnotes)) ([7](#footnotes)) | X ([2](#footnotes)) | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| GitLab Rails ([1](#footnotes)) | 2 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
## Setup instructions
1. [Configure the external load balancing node](../high_availability/load_balancer.md)
that will handle the load balancing of the two GitLab application services nodes.
1. [Configure the Object Storage](../object_storage.md) ([4](#footnotes)) used for shared data objects.
1. (Optional) [Configure NFS](../high_availability/nfs.md) to have
shared disk storage service as an alternative to Gitaly and/or
[Object Storage](../object_storage.md) (although not recommended).
NFS is required for GitLab Pages, you can skip this step if you're not using that feature.
1. [Configure PostgreSQL](../high_availability/load_balancer.md), the database for GitLab.
1. [Configure Redis](../high_availability/redis.md).
1. [Configure Gitaly](../gitaly/index.md#running-gitaly-on-its-own-server),
which is used to provide access to the Git repositories.
1. [Configure the main GitLab Rails application](../high_availability/gitlab.md)
to run Puma/Unicorn, Workhorse, GitLab Shell, and to serve all
frontend requests (UI, API, Git over HTTP/SSH).
1. [Configure Prometheus](../high_availability/monitoring_node.md) to monitor your GitLab environment.
## Footnotes
1. In our architectures we run each GitLab Rails node using the Puma webserver
and have its number of workers set to 90% of available CPUs along with four threads. For
nodes that are running Rails with other components the worker value should be reduced
accordingly where we've found 50% achieves a good balance but this is dependent
on workload.
1. Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend two nodes as an absolute minimum for HA environments
and at least four nodes should be used when supporting 50,000 or more users.
We also recommend that each Gitaly node should store no more than 5TB of data
and have the number of [`gitaly-ruby` workers](../gitaly/index.md#gitaly-ruby)
set to 20% of available CPUs. Additional nodes should be considered in conjunction
with a review of expected data size and spread based on the recommendations above.
1. Recommended Redis setup differs depending on the size of the architecture.
For smaller architectures (less than 3,000 users) a single instance should suffice.
For medium sized installs (3,000 - 5,000) we suggest one Redis cluster for all
classes and that Redis Sentinel is hosted alongside Consul.
For larger architectures (10,000 users or more) we suggest running a separate
[Redis Cluster](../high_availability/redis.md#running-multiple-redis-clusters) for the Cache class
and another for the Queues and Shared State classes respectively. We also recommend
that you run the Redis Sentinel clusters separately for each Redis Cluster.
1. For data objects such as LFS, Uploads, Artifacts, etc. We recommend an [Object Storage service](../object_storage.md)
over NFS where possible, due to better performance and availability.
1. NFS can be used as an alternative for both repository data (replacing Gitaly) and
object storage but this isn't typically recommended for performance reasons. Note however it is required for
[GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196).
1. Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. Although other load balancers with similar feature sets
could also be used, those load balancers have not been validated.
1. We strongly recommend that any Gitaly or NFS nodes be set up with SSD disks over
HDD with a throughput of at least 8,000 IOPS for read operations and 2,000 IOPS for write
as these components have heavy I/O. These IOPS values are recommended only as a starter
as with time they may be adjusted higher or lower depending on the scale of your
environment's workload. If you're running the environment on a Cloud provider
you may need to refer to their documentation on how configure IOPS correctly.
1. The architectures were built and tested with the [Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform on GCP. On different hardware you may find that adjustments, either lower
or higher, are required for your CPU or Node counts accordingly. For more information, a
[Sysbench](https://github.com/akopytov/sysbench) benchmark of the CPU can be found
[here](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks).
1. AWS-equivalent and Azure-equivalent configurations are rough suggestions
and may change in the future. They have not yet been tested and validated.
# Reference architecture: up to 3,000 users
This page describes GitLab reference architecture for up to 3,000 users.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
NOTE: **Note:** The 3,000-user reference architecture documented below is
designed to help your organization achieve a highly-available GitLab deployment.
If you do not have the expertise or need to maintain a highly-available
environment, you can have a simpler and less costly-to-operate environment by
following the [2,000-user reference architecture](2k_users.md).
> - **Supported users (approximate):** 3,000
> - **High Availability:** True
> - **Test RPS rates:** API: 60 RPS, Web: 6 RPS, Git: 6 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|---------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 3 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Consul + Sentinel ([3](#footnotes)) | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
## Footnotes
1. In our architectures we run each GitLab Rails node using the Puma webserver
and have its number of workers set to 90% of available CPUs along with four threads. For
nodes that are running Rails with other components the worker value should be reduced
accordingly where we've found 50% achieves a good balance but this is dependent
on workload.
1. Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend two nodes as an absolute minimum for HA environments
and at least four nodes should be used when supporting 50,000 or more users.
We also recommend that each Gitaly node should store no more than 5TB of data
and have the number of [`gitaly-ruby` workers](../gitaly/index.md#gitaly-ruby)
set to 20% of available CPUs. Additional nodes should be considered in conjunction
with a review of expected data size and spread based on the recommendations above.
1. Recommended Redis setup differs depending on the size of the architecture.
For smaller architectures (less than 3,000 users) a single instance should suffice.
For medium sized installs (3,000 - 5,000) we suggest one Redis cluster for all
classes and that Redis Sentinel is hosted alongside Consul.
For larger architectures (10,000 users or more) we suggest running a separate
[Redis Cluster](../high_availability/redis.md#running-multiple-redis-clusters) for the Cache class
and another for the Queues and Shared State classes respectively. We also recommend
that you run the Redis Sentinel clusters separately for each Redis Cluster.
1. For data objects such as LFS, Uploads, Artifacts, etc. We recommend an [Object Storage service](../object_storage.md)
over NFS where possible, due to better performance and availability.
1. NFS can be used as an alternative for both repository data (replacing Gitaly) and
object storage but this isn't typically recommended for performance reasons. Note however it is required for
[GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196).
1. Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. Although other load balancers with similar feature sets
could also be used, those load balancers have not been validated.
1. We strongly recommend that any Gitaly or NFS nodes be set up with SSD disks over
HDD with a throughput of at least 8,000 IOPS for read operations and 2,000 IOPS for write
as these components have heavy I/O. These IOPS values are recommended only as a starter
as with time they may be adjusted higher or lower depending on the scale of your
environment's workload. If you're running the environment on a Cloud provider
you may need to refer to their documentation on how configure IOPS correctly.
1. The architectures were built and tested with the [Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform on GCP. On different hardware you may find that adjustments, either lower
or higher, are required for your CPU or Node counts accordingly. For more information, a
[Sysbench](https://github.com/akopytov/sysbench) benchmark of the CPU can be found
[here](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks).
1. AWS-equivalent and Azure-equivalent configurations are rough suggestions
and may change in the future. They have not yet been tested and validated.
# Reference architecture: up to 50,000 users
This page describes GitLab reference architecture for up to 50,000 users.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
> - **Supported users (approximate):** 50,000
> - **High Availability:** True
> - **Test RPS rates:** API: 1000 RPS, Web: 100 RPS, Git: 100 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|----------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 12 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | c5.9xlarge | F32s v2 |
| PostgreSQL | 3 | 16 vCPU, 60GB Memory | n1-standard-16 | m5.4xlarge | D16s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 64 vCPU, 240GB Memory | n1-standard-64 | m5.16xlarge | D64s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
## Footnotes
1. In our architectures we run each GitLab Rails node using the Puma webserver
and have its number of workers set to 90% of available CPUs along with four threads. For
nodes that are running Rails with other components the worker value should be reduced
accordingly where we've found 50% achieves a good balance but this is dependent
on workload.
1. Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend two nodes as an absolute minimum for HA environments
and at least four nodes should be used when supporting 50,000 or more users.
We also recommend that each Gitaly node should store no more than 5TB of data
and have the number of [`gitaly-ruby` workers](../gitaly/index.md#gitaly-ruby)
set to 20% of available CPUs. Additional nodes should be considered in conjunction
with a review of expected data size and spread based on the recommendations above.
1. Recommended Redis setup differs depending on the size of the architecture.
For smaller architectures (less than 3,000 users) a single instance should suffice.
For medium sized installs (3,000 - 5,000) we suggest one Redis cluster for all
classes and that Redis Sentinel is hosted alongside Consul.
For larger architectures (10,000 users or more) we suggest running a separate
[Redis Cluster](../high_availability/redis.md#running-multiple-redis-clusters) for the Cache class
and another for the Queues and Shared State classes respectively. We also recommend
that you run the Redis Sentinel clusters separately for each Redis Cluster.
1. For data objects such as LFS, Uploads, Artifacts, etc. We recommend an [Object Storage service](../object_storage.md)
over NFS where possible, due to better performance and availability.
1. NFS can be used as an alternative for both repository data (replacing Gitaly) and
object storage but this isn't typically recommended for performance reasons. Note however it is required for
[GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196).
1. Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. Although other load balancers with similar feature sets
could also be used, those load balancers have not been validated.
1. We strongly recommend that any Gitaly or NFS nodes be set up with SSD disks over
HDD with a throughput of at least 8,000 IOPS for read operations and 2,000 IOPS for write
as these components have heavy I/O. These IOPS values are recommended only as a starter
as with time they may be adjusted higher or lower depending on the scale of your
environment's workload. If you're running the environment on a Cloud provider
you may need to refer to their documentation on how configure IOPS correctly.
1. The architectures were built and tested with the [Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform on GCP. On different hardware you may find that adjustments, either lower
or higher, are required for your CPU or Node counts accordingly. For more information, a
[Sysbench](https://github.com/akopytov/sysbench) benchmark of the CPU can be found
[here](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks).
1. AWS-equivalent and Azure-equivalent configurations are rough suggestions
and may change in the future. They have not yet been tested and validated.
# Reference architecture: up to 5,000 users
This page describes GitLab reference architecture for up to 5,000 users.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
> - **Supported users (approximate):** 5,000
> - **High Availability:** True
> - **Test RPS rates:** API: 100 RPS, Web: 10 RPS, Git: 10 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|---------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 3 | 16 vCPU, 14.4GB Memory | n1-highcpu-16 | c5.4xlarge | F16s v2 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 8 vCPU, 30GB Memory | n1-standard-8 | m5.2xlarge | D8s v3 |
| Redis ([3](#footnotes)) | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Consul + Sentinel ([3](#footnotes)) | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
## Footnotes
1. In our architectures we run each GitLab Rails node using the Puma webserver
and have its number of workers set to 90% of available CPUs along with four threads. For
nodes that are running Rails with other components the worker value should be reduced
accordingly where we've found 50% achieves a good balance but this is dependent
on workload.
1. Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend two nodes as an absolute minimum for HA environments
and at least four nodes should be used when supporting 50,000 or more users.
We also recommend that each Gitaly node should store no more than 5TB of data
and have the number of [`gitaly-ruby` workers](../gitaly/index.md#gitaly-ruby)
set to 20% of available CPUs. Additional nodes should be considered in conjunction
with a review of expected data size and spread based on the recommendations above.
1. Recommended Redis setup differs depending on the size of the architecture.
For smaller architectures (less than 3,000 users) a single instance should suffice.
For medium sized installs (3,000 - 5,000) we suggest one Redis cluster for all
classes and that Redis Sentinel is hosted alongside Consul.
For larger architectures (10,000 users or more) we suggest running a separate
[Redis Cluster](../high_availability/redis.md#running-multiple-redis-clusters) for the Cache class
and another for the Queues and Shared State classes respectively. We also recommend
that you run the Redis Sentinel clusters separately for each Redis Cluster.
1. For data objects such as LFS, Uploads, Artifacts, etc. We recommend an [Object Storage service](../object_storage.md)
over NFS where possible, due to better performance and availability.
1. NFS can be used as an alternative for both repository data (replacing Gitaly) and
object storage but this isn't typically recommended for performance reasons. Note however it is required for
[GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196).
1. Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. Although other load balancers with similar feature sets
could also be used, those load balancers have not been validated.
1. We strongly recommend that any Gitaly or NFS nodes be set up with SSD disks over
HDD with a throughput of at least 8,000 IOPS for read operations and 2,000 IOPS for write
as these components have heavy I/O. These IOPS values are recommended only as a starter
as with time they may be adjusted higher or lower depending on the scale of your
environment's workload. If you're running the environment on a Cloud provider
you may need to refer to their documentation on how configure IOPS correctly.
1. The architectures were built and tested with the [Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform on GCP. On different hardware you may find that adjustments, either lower
or higher, are required for your CPU or Node counts accordingly. For more information, a
[Sysbench](https://github.com/akopytov/sysbench) benchmark of the CPU can be found
[here](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks).
1. AWS-equivalent and Azure-equivalent configurations are rough suggestions
and may change in the future. They have not yet been tested and validated.
......@@ -48,187 +48,17 @@ how much automation you use, mirroring, and repository/change size. Additionally
displayed memory values are provided by [GCP machine types](https://cloud.google.com/compute/docs/machine-types).
For different cloud vendors, attempt to select options that best match the provided architecture.
## Up to 1,000 users
> - **Supported users (approximate):** 1,000
> - **High Availability:** False
| Users | Configuration([8](#footnotes)) | GCP type | AWS type([9](#footnotes)) |
|-------|--------------------------------|---------------|---------------------------|
| 100 | 2 vCPU, 7.2GB Memory | n1-standard-2 | c5.2xlarge |
| 500 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge |
| 1000 | 8 vCPU, 30GB Memory | n1-standard-8 | m5.2xlarge |
For situations where you need to serve up to 1,000 users, a single-node
solution with [frequent backups](#automated-backups-core-only) is appropriate
for many organizations. With automatic backup of the GitLab repositories,
configuration, and the database, if you don't have strict availability
requirements, this is the ideal solution.
### Setup instructions
- For this default reference architecture, use the standard [installation instructions](../../install/README.md) to install GitLab.
NOTE: **Note:**
You can also optionally configure GitLab to use an
[external PostgreSQL service](../external_database.md) or an
[external object storage service](../high_availability/object_storage.md) for
added performance and reliability at a reduced complexity cost.
## Up to 2,000 users
> - **Supported users (approximate):** 2,000
> - **High Availability:** False
> - **Test RPS rates:** API: 40 RPS, Web: 4 RPS, Git: 4 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|---------------|-----------------------|----------------|
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| PostgreSQL | 1 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Redis ([3](#footnotes)) | 1 | 1 vCPU, 3.75GB Memory | n1-standard-1 | m5.large | D2s v3 |
| Gitaly ([5](#footnotes)) ([7](#footnotes)) | X ([2](#footnotes)) | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| GitLab Rails ([1](#footnotes)) | 2 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
### Setup instructions
1. [Configure the external load balancing node](../high_availability/load_balancer.md)
that will handle the load balancing of the two GitLab application services nodes.
1. [Configure the Object Storage](../object_storage.md) ([4](#footnotes)) used for shared data objects.
1. (Optional) [Configure NFS](../high_availability/nfs.md) to have
shared disk storage service as an alternative to Gitaly and/or
[Object Storage](../object_storage.md) (although not recommended).
NFS is required for GitLab Pages, you can skip this step if you're not using that feature.
1. [Configure PostgreSQL](../high_availability/load_balancer.md), the database for GitLab.
1. [Configure Redis](../high_availability/redis.md).
1. [Configure Gitaly](../gitaly/index.md#running-gitaly-on-its-own-server),
which is used to provide access to the Git repositories.
1. [Configure the main GitLab Rails application](../high_availability/gitlab.md)
to run Puma/Unicorn, Workhorse, GitLab Shell, and to serve all
frontend requests (UI, API, Git over HTTP/SSH).
1. [Configure Prometheus](../high_availability/monitoring_node.md) to monitor your GitLab environment.
## Up to 3,000 users
NOTE: **Note:** The 3,000-user reference architecture documented below is
designed to help your organization achieve a highly-available GitLab deployment.
If you do not have the expertise or need to maintain a highly-available
environment, you can have a simpler and less costly-to-operate environment by
following the [2,000-user reference architecture](#up-to-2000-users).
> - **Supported users (approximate):** 3,000
> - **High Availability:** True
> - **Test RPS rates:** API: 60 RPS, Web: 6 RPS, Git: 6 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|---------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 3 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Consul + Sentinel ([3](#footnotes)) | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
## Up to 5,000 users
> - **Supported users (approximate):** 5,000
> - **High Availability:** True
> - **Test RPS rates:** API: 100 RPS, Web: 10 RPS, Git: 10 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|---------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 3 | 16 vCPU, 14.4GB Memory | n1-highcpu-16 | c5.4xlarge | F16s v2 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 8 vCPU, 30GB Memory | n1-standard-8 | m5.2xlarge | D8s v3 |
| Redis ([3](#footnotes)) | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Consul + Sentinel ([3](#footnotes)) | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 | m5.large | D2s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
## Up to 10,000 users
> - **Supported users (approximate):** 10,000
> - **High Availability:** True
> - **Test RPS rates:** API: 200 RPS, Web: 20 RPS, Git: 20 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|----------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 3 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | c5.9xlarge | F32s v2 |
| PostgreSQL | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 16 vCPU, 60GB Memory | n1-standard-16 | m5.4xlarge | D16s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
## Up to 25,000 users
> - **Supported users (approximate):** 25,000
> - **High Availability:** True
> - **Test RPS rates:** API: 500 RPS, Web: 50 RPS, Git: 50 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|----------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 5 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | c5.9xlarge | F32s v2 |
| PostgreSQL | 3 | 8 vCPU, 30GB Memory | n1-standard-8 | m5.2xlarge | D8s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 32 vCPU, 120GB Memory | n1-standard-32 | m5.8xlarge | D32s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
## Up to 50,000 users
> - **Supported users (approximate):** 50,000
> - **High Availability:** True
> - **Test RPS rates:** API: 1000 RPS, Web: 100 RPS, Git: 100 RPS
| Service | Nodes | Configuration ([8](#footnotes)) | GCP | AWS ([9](#footnotes)) | Azure([9](#footnotes)) |
|--------------------------------------------------------------|-------|---------------------------------|----------------|-----------------------|------------------------|
| GitLab Rails ([1](#footnotes)) | 12 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | c5.9xlarge | F32s v2 |
| PostgreSQL | 3 | 16 vCPU, 60GB Memory | n1-standard-16 | m5.4xlarge | D16s v3 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Gitaly ([2](#footnotes)) ([5](#footnotes)) ([7](#footnotes)) | X | 64 vCPU, 240GB Memory | n1-standard-64 | m5.16xlarge | D64s v3 |
| Redis ([3](#footnotes)) - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis ([3](#footnotes)) - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| Redis Sentinel ([3](#footnotes)) - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Redis Sentinel ([3](#footnotes)) - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | t2.small | B1MS |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | m5.xlarge | D4s v3 |
| NFS Server ([5](#footnotes)) ([7](#footnotes)) | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| Object Storage ([4](#footnotes)) | - | - | - | - | - |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | c5.xlarge | F4s v2 |
| External load balancing node ([6](#footnotes)) | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | c5.large | F2s v2 |
| Internal load balancing node ([6](#footnotes)) | 1 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | c5.2xlarge | F8s v2 |
## Available reference architectures
The following reference architectures are available:
- [Up to 1,000 users](1k_users.md)
- [Up to 2,000 users](2k_users.md)
- [Up to 3,000 users](3k_users.md)
- [Up to 5,000 users](5k_users.md)
- [Up to 10,000 users](10k_users.md)
- [Up to 25,000 users](25k_users.md)
- [Up to 50,000 users](50k_users.md)
## Availability complexity
......
......@@ -392,7 +392,11 @@ Parameters:
| ------------------------ | -------------- | -------- | ----------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the group](README.md#namespaced-path-encoding) owned by the authenticated user. |
| `with_custom_attributes` | boolean | no | Include [custom attributes](custom_attributes.md) in response (admins only). |
| `with_projects` | boolean | no | Include details from projects that belong to the specified group (defaults to `true`). (Deprecated, [will be removed in 13.0](https://gitlab.com/gitlab-org/gitlab/-/issues/213797). To get the details of all projects within a group, use the [list a group's projects endpoint](#list-a-groups-projects).) |
| `with_projects` | boolean | no | Include details from projects that belong to the specified group (defaults to `true`). (Deprecated, [will be removed in API v5](https://gitlab.com/gitlab-org/gitlab/-/issues/213797). To get the details of all projects within a group, use the [list a group's projects endpoint](#list-a-groups-projects).) |
NOTE: **Note:**
The `projects` and `shared_projects` attributes in the response are deprecated and will be [removed in API v5](https://gitlab.com/gitlab-org/gitlab/-/issues/213797).
To get the details of all projects within a group, use either the [list a group's projects](#list-a-groups-projects) or the [list a group's shared projects](#list-a-groups-shared-projects) endpoint.
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" https://gitlab.example.com/api/v4/groups/4
......@@ -423,7 +427,7 @@ Example response:
"file_template_project_id": 1,
"parent_id": null,
"created_at": "2020-01-15T12:36:29.590Z",
"projects": [
"projects": [ // Deprecated and will be removed in API v5
{
"id": 7,
"description": "Voluptas veniam qui et beatae voluptas doloremque explicabo facilis.",
......@@ -501,7 +505,7 @@ Example response:
"request_access_enabled": false
}
],
"shared_projects": [
"shared_projects": [ // Deprecated and will be removed in API v5
{
"id": 8,
"description": "Velit eveniet provident fugiat saepe eligendi autem.",
......@@ -704,6 +708,10 @@ PUT /groups/:id
| `shared_runners_minutes_limit` | integer | no | **(STARTER ONLY)** Pipeline minutes quota for this group. |
| `extra_shared_runners_minutes_limit` | integer | no | **(STARTER ONLY)** Extra pipeline minutes quota for this group. |
NOTE: **Note:**
The `projects` and `shared_projects` attributes in the response are deprecated and will be [removed in API v5](https://gitlab.com/gitlab-org/gitlab/-/issues/213797).
To get the details of all projects within a group, use either the [list a group's projects](#list-a-groups-projects) or the [list a group's shared projects](#list-a-groups-shared-projects) endpoint.
```shell
curl --request PUT --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/5?name=Experimental"
```
......@@ -715,9 +723,6 @@ This endpoint returns:
and later. To get the details of all projects within a group, use the
[list a group's projects endpoint](#list-a-groups-projects) instead.
NOTE: **Note:**
The `projects` and `shared_projects` attributes [will be deprecated in GitLab 13.0](https://gitlab.com/gitlab-org/gitlab/-/issues/213797). To get the details of all projects within a group, use the [list a group's projects endpoint](#list-a-groups-projects) instead.
Example response:
```json
......@@ -735,7 +740,7 @@ Example response:
"file_template_project_id": 1,
"parent_id": null,
"created_at": "2020-01-15T12:36:29.590Z",
"projects": [
"projects": [ // Deprecated and will be removed in API v5
{
"id": 9,
"description": "foo",
......
......@@ -162,7 +162,7 @@ When the user is authenticated and `simple` is not set this returns something li
"merge_method": "merge",
"autoclose_referenced_issues": true,
"suggestion_commit_message": null,
"marked_for_deletion_at": "2020-04-03", // to be deprecated in GitLab 13.0 in favor of marked_for_deletion_on
"marked_for_deletion_at": "2020-04-03", // Deprecated and will be removed in API v5 in favor of marked_for_deletion_on
"marked_for_deletion_on": "2020-04-03",
"statistics": {
"commit_count": 37,
......@@ -288,7 +288,7 @@ When the user is authenticated and `simple` is not set this returns something li
```
NOTE: **Note:**
For users on GitLab [Silver, Premium, or higher](https://about.gitlab.com/pricing/) the `marked_for_deletion_at` attribute will be deprecated in GitLab 13.0 in favor of the `marked_for_deletion_on` attribute.
For users on GitLab [Silver, Premium, or higher](https://about.gitlab.com/pricing/) the `marked_for_deletion_at` attribute has been deprecated and will be removed in API v5 in favor of the `marked_for_deletion_on` attribute.
Users on GitLab [Starter, Bronze, or higher](https://about.gitlab.com/pricing/) will also see
the `approvals_before_merge` parameter:
......@@ -411,7 +411,7 @@ This endpoint supports [keyset pagination](README.md#keyset-based-pagination) fo
"merge_method": "merge",
"autoclose_referenced_issues": true,
"suggestion_commit_message": null,
"marked_for_deletion_at": "2020-04-03", // to be deprecated in GitLab 13.0 in favor of marked_for_deletion_on
"marked_for_deletion_at": "2020-04-03", // Deprecated and will be removed in API v5 in favor of marked_for_deletion_on
"marked_for_deletion_on": "2020-04-03",
"statistics": {
"commit_count": 37,
......@@ -879,7 +879,7 @@ GET /projects/:id
"service_desk_address": null,
"autoclose_referenced_issues": true,
"suggestion_commit_message": null,
"marked_for_deletion_at": "2020-04-03", // to be deprecated in GitLab 13.0 in favor of marked_for_deletion_on
"marked_for_deletion_at": "2020-04-03", // Deprecated and will be removed in API v5 in favor of marked_for_deletion_on
"marked_for_deletion_on": "2020-04-03",
"statistics": {
"commit_count": 37,
......
......@@ -72,7 +72,7 @@ cat > /tmp/test-config.template.toml << EOF
[[runners]]
[runners.docker]
[[runners.docker.services]]
name = "mysql:latest"
name = "postgres:latest"
EOF
```
......
......@@ -1101,9 +1101,9 @@ useful is not available, please
### `only`/`except` (basic)
NOTE: **Note:**
The [`rules`](#rules) syntax is now the preferred method of setting job policies.
`only` and `except` are [candidates for deprecation](https://gitlab.com/gitlab-org/gitlab/issues/27449),
and may be removed in the future.
The [`rules`](#rules) syntax is an improved, more powerful solution for defining
when jobs should run or not. Consider using `rules` instead of `only/except` to get
the most out of your pipelines.
`only` and `except` are two parameters that set a job policy to limit when
jobs are created:
......
......@@ -837,7 +837,8 @@ management project. Refer to the
for the available configuration options.
CAUTION: **Caution:**
Installation and removal of the Cilium [requires restart](https://cilium.readthedocs.io/en/stable/gettingstarted/k8s-install-gke/#restart-remaining-pods)
Installation and removal of the Cilium requires a **manual**
[restart](https://cilium.readthedocs.io/en/stable/gettingstarted/k8s-install-gke/#restart-remaining-pods)
of all affected pods in all namespaces to ensure that they are
[managed](https://cilium.readthedocs.io/en/stable/troubleshooting/#ensure-pod-is-managed-by-cilium)
by the correct networking plugin.
......
......@@ -6,8 +6,6 @@ type: reference
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/34758) in GitLab 11.6.
## Overview
Similar to [project-level](../../project/clusters/index.md) and
[instance-level](../../instance/clusters/index.md) Kubernetes clusters,
group-level Kubernetes clusters allow you to connect a Kubernetes cluster to
......@@ -22,47 +20,43 @@ and troubleshooting applications for your group cluster, see
## RBAC compatibility
For each project under a group with a Kubernetes cluster, GitLab will
create a restricted service account with [`edit`
privileges](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles)
in the project namespace.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/29398) in GitLab 11.4.
> - [Project namespace restriction](https://gitlab.com/gitlab-org/gitlab-foss/issues/51716) was introduced in GitLab 11.5.
NOTE: **Note:**
RBAC support was introduced in
[GitLab 11.4](https://gitlab.com/gitlab-org/gitlab-foss/issues/29398), and
Project namespace restriction was introduced in
[GitLab 11.5](https://gitlab.com/gitlab-org/gitlab-foss/issues/51716).
For each project under a group with a Kubernetes cluster, GitLab creates a restricted
service account with [`edit` privileges](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles)
in the project namespace.
## Cluster precedence
GitLab will use the project's cluster before using any cluster belonging
to the group containing the project if the project's cluster is available and not disabled.
In the case of sub-groups, GitLab will use the cluster of the closest ancestor group
If the project's cluster is available and not disabled, GitLab uses the
project's cluster before using any cluster belonging to the group containing
the project.
In the case of sub-groups, GitLab uses the cluster of the closest ancestor group
to the project, provided the cluster is not disabled.
## Multiple Kubernetes clusters **(PREMIUM)**
With GitLab Premium, you can associate more than one Kubernetes clusters to your
group. That way you can have different clusters for different environments,
like dev, staging, production, etc.
With [GitLab Premium](https://about.gitlab.com/pricing/premium/), you can associate
more than one Kubernetes cluster to your group, and maintain different clusters
for different environments, such as development, staging, and production.
Add another cluster similar to the first one and make sure to
[set an environment scope](#environment-scopes-premium) that will
differentiate the new cluster from the rest.
When adding another cluster,
[set an environment scope](#environment-scopes-premium) to help
differentiate the new cluster from your other clusters.
## GitLab-managed clusters
> - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/22011) in GitLab 11.5.
> - Became [optional](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/26565) in GitLab 11.11.
You can choose to allow GitLab to manage your cluster for you. If your cluster is
managed by GitLab, resources for your projects will be automatically created. See the
[Access controls](../../project/clusters/add_remove_clusters.md#access-controls) section for details on which resources will
be created.
You can choose to allow GitLab to manage your cluster for you. If GitLab manages
your cluster, resources for your projects will be automatically created. See the
[Access controls](../../project/clusters/add_remove_clusters.md#access-controls)
section for details on which resources GitLab creates for you.
For clusters not managed by GitLab, project-specific resources will not be created
automatically. If you are using [Auto DevOps](../../../topics/autodevops/index.md)
For clusters not managed by GitLab, project-specific resources won't be created
automatically. If you're using [Auto DevOps](../../../topics/autodevops/index.md)
for deployments with a cluster not managed by GitLab, you must ensure:
- The project's deployment service account has permissions to deploy to
......@@ -72,8 +66,8 @@ for deployments with a cluster not managed by GitLab, you must ensure:
`KUBE_NAMESPACE` directly is discouraged.
NOTE: **Note:**
If you [install applications](#installing-applications) on your cluster, GitLab will create
the resources required to run these even if you have chosen to manage your own cluster.
If you [install applications](#installing-applications) on your cluster, GitLab creates
the resources required to run them even if you choose to manage your own cluster.
### Clearing the cluster cache
......@@ -86,7 +80,8 @@ your cluster, which can cause deployment jobs to fail.
To clear the cache:
1. Navigate to your group’s **Kubernetes** page, and select your cluster.
1. Navigate to your group’s **{cloud-gear}** **Kubernetes** page,
and select your cluster.
1. Expand the **Advanced settings** section.
1. Click **Clear cluster cache**.
......@@ -110,12 +105,12 @@ them with an environment scope. The environment scope associates clusters with
work.
While evaluating which environment matches the environment scope of a
cluster, [cluster precedence](#cluster-precedence) will take
effect. The cluster at the project level will take precedence, followed
cluster, [cluster precedence](#cluster-precedence) takes
effect. The cluster at the project level takes precedence, followed
by the closest ancestor group, followed by that groups' parent and so
on.
For example, let's say we have the following Kubernetes clusters:
For example, if your project has the following Kubernetes clusters:
| Cluster | Environment scope | Where |
| ---------- | ------------------- | ----------|
......@@ -151,11 +146,11 @@ deploy to production:
url: https://example.com/
```
The result will then be:
The result is:
- The Project cluster will be used for the `test` job.
- The Staging cluster will be used for the `deploy to staging` job.
- The Production cluster will be used for the `deploy to production` job.
- The Project cluster is used for the `test` job.
- The Staging cluster is used for the `deploy to staging` job.
- The Production cluster is used for the `deploy to production` job.
## Cluster environments **(PREMIUM)**
......@@ -166,8 +161,7 @@ are deployed to the Kubernetes cluster, see the documentation for
## Security of Runners
For important information about securely configuring GitLab Runners, see
[Security of
Runners](../../project/clusters/add_remove_clusters.md#security-of-gitlab-runners)
[Security of Runners](../../project/clusters/add_remove_clusters.md#security-of-gitlab-runners)
documentation for project-level clusters.
## More information
......
......@@ -257,6 +257,9 @@ Set other user attributes and claims according to the [assertions table](#assert
### Okta setup notes
<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
For a demo of the Okta SAML setup including SCIM, see [Demo: Okta Group SAML & SCIM setup](https://youtu.be/0ES9HsZq0AQ).
| GitLab Setting | Okta Field |
|--------------|----------------|
| Identifier | Audience URI |
......
# Infrastructure as Code
# Infrastructure as code with GitLab managed Terraform State
GitLab can be used to manage infrastructure as code. The following are some examples:
[Terraform remote backends](https://www.terraform.io/docs/backends/index.html)
enable you to store the state file in a remote, shared store. GitLab uses the
[Terraform HTTP backend](https://www.terraform.io/docs/backends/types/http.html)
to securely store the state files in local storage (the default) or
[the remote store of your choice](../../administration/terraform_state.md).
- [A generic tutorial for Terraform with GitLab](https://medium.com/@timhberry/terraform-pipelines-in-gitlab-415b9d842596).
- [Terraform at GitLab](https://about.gitlab.com/blog/2019/11/12/gitops-part-2/).
The GitLab managed Terraform state backend can store your Terraform state easily and
securely, and spares you from setting up additional remote resources like
Amazon S3 or Google Cloud Storage. Its features include:
- Supporting encryption of the state file both in transit and at rest.
- Locking and unlocking state.
- Remote Terraform plan and apply execution.
To get started, there are two different options when using GitLab managed Terraform State.
- Use a local machine
- Use GitLab CI
## Get Started using local development
If you are planning to only run `terraform plan` and `terraform apply` commands from your local machine, this is a simple way to get started.
First, create your project on your GitLab instance.
Next, define the Terraform backend in your Terraform project to be:
```hcl
terraform {
backend "http" {
}
}
```
Finally, you need to run `terraform init` on your local machine and pass in the following options. The below example is using GitLab.com:
```bash
terraform init \
-backend-config="address=https://gitlab.com/api/v4/projects/<YOUR-PROJECT-ID>/terraform/state/<YOUR-PROJECT-NAME>" \
-backend-config="lock_address=https://gitlab.com/api/v4/projects/<YOUR-PROJECT-ID>/terraform/state/<YOUR-PROJECT-NAME>/lock" \
-backend-config="unlock_address=https://gitlab.com/api/v4/projects/<YOUR-PROJECT-ID>/terraform/state/<YOUR-PROJECT-NAME>/lock" \
-backend-config="username=<YOUR-USERNAME>" \
-backend-config="password=<YOUR-ACCESS-TOKEN>" \
-backend-config="lock_method=POST" \
-backend-config="unlock_method=DELETE" \
-backend-config="retry_wait_min=5"
```
This will initialize your Terraform state and store that state within your GitLab project.
NOTE: YOUR-PROJECT-ID and YOUR-PROJECT-NAME can be accessed from the project main page.
## Get Started using a GitLab CI
Another route is to leverage GitLab CI to run your `terraform plan` and `terraform apply` commands.
### Configure the CI variables
To use the Terraform backend, [first create a Personal Access Token](../profile/personal_access_tokens.md) with the `api` scope. Keep in mind that the Terraform backend is restricted to tokens with [Maintainer access](../permissions.md) to the repository.
To keep the Personal Access Token secure, add it as a [CI/CD environment variable](../../ci/variables/README.md). In this example we set ours to the ENV: `GITLAB_TF_PASSWORD`.
If you are planning to use the ENV on a branch which is not protected, make sure to set the variable protection settings correctly.
### Configure the Terraform backend
Next we need to define the [http backend](https://www.terraform.io/docs/backends/types/http.html). In your Terraform project add the following code block in a `.tf` file such as `backend.tf` or wherever you desire to define the remote backend:
```hcl
terraform {
backend "http" {
}
}
```
### Configure the CI YAML file
Finally, configure a `.gitlab-ci.yaml`, which lives in the root of your project repository.
In our case we are using a pre-built image:
```yaml
image:
name: hashicorp/terraform:light
entrypoint:
- '/usr/bin/env'
- 'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
```
We then define some environment variables to make life easier. `GITLAB_TF_ADDRESS` is the URL of the GitLab instance where this pipeline runs, and `TF_ROOT` is the directory where the Terraform commands must be executed.
```yaml
variables:
GITLAB_TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}
TF_ROOT: ${CI_PROJECT_DIR}/environments/cloudflare/production
cache:
paths:
- .terraform
```
In a `before_script`, pass a `terraform init` call containing configuration parameters.
These parameters correspond to variables required by the
[http backend](https://www.terraform.io/docs/backends/types/http.html):
```yaml
before_script:
- cd ${TF_ROOT}
- terraform --version
- terraform init -backend-config="address=${GITLAB_TF_ADDRESS}" -backend-config="lock_address=${GITLAB_TF_ADDRESS}/lock" -backend-config="unlock_address=${GITLAB_TF_ADDRESS}/lock" -backend-config="username=${GITLAB_USER_LOGIN}" -backend-config="password=${GITLAB_TF_PASSWORD}" -backend-config="lock_method=POST" -backend-config="unlock_method=DELETE" -backend-config="retry_wait_min=5"
stages:
- validate
- build
- test
- deploy
validate:
stage: validate
script:
- terraform validate
plan:
stage: build
script:
- terraform plan
- terraform show
apply:
stage: deploy
environment:
name: production
script:
- terraform apply
dependencies:
- plan
when: manual
only:
- master
```
### Push to GitLab
Pushing your project to GitLab triggers a CI job pipeline, which runs the `terraform init`, `terraform validate`, and `terraform plan` commands automatically.
The output from the above `terraform` commands should be viewable in the job logs.
## Example project
See [this reference project](https://gitlab.com/nicholasklick/gitlab-terraform-aws) using GitLab and Terraform to deploy a basic AWS EC2 within a custom VPC.
......@@ -9,6 +9,15 @@ Jira issues import is an MVC, project-level feature, meaning that issues from mu
Jira projects can be imported into a GitLab project. MVC version imports issue title and description
as well as some other issue metadata as a section in the issue description.
## Future iterations
As of GitLab 12.10, the Jira issue importer only brings across the title and description of
an issue.
There is an [epic](https://gitlab.com/groups/gitlab-org/-/epics/2738) tracking the
addition of items such as issue assignees, labels, comments, user mapping, and much more.
These will be included in the future iterations of the GitLab Jira importer.
## Prerequisites
### Permissions
......
# Alert Management
> [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/2877) in GitLab 13.0.
Alert Management enables developers to easily discover and view the alerts
generated by their application. By surfacing alert information where the code is
being developed, efficiency and awareness can be increased.
## Enable Alert Management
NOTE: **Note:**
You will need at least Maintainer [permissions](../../permissions.md) to enable the Alert Management feature.
1. Follow the [instructions for toggling generic alerts](../integrations/generic_alerts.md#setting-up-generic-alerts)
1. You can now visit **{cloud-gear}** **Operations > Alert Management** in your project's sidebar to [view a list](#alert-management-list) of alerts.
![Alert Management Toggle](img/alert_management_1_v13_1.png)
## Alert Management severity
Each level of alert contains a uniquely shaped and color-coded icon to help
you identify the severity of a particular alert. These severity icons help you
immediately identify which alerts you should prioritize investigating:
![Alert Management Severity System](img/alert_management_severity_v13_0.png)
Alerts contain one of the following icons:
- **Critical**: **{severity-critical}** and hexadecimal color `#8b2615`
- **High**: **{severity-high}** and hexadecimal color `#c0341d`
- **Medium**: **{severity-medium}** and hexadecimal color `#fca429`
- **Low**: **{severity-low}** and hexadecimal color `#fdbc60`
- **Info**: **{severity-info}** and hexadecimal color `#418cd8`
- **Unknown**: **{severity-unknown}** and hexadecimal color `#bababa`
## Alert Management list
NOTE: **Note:**
You will need at least Developer [permissions](../../permissions.md) to view the Alert Management list.
You can find the Alert Management list at **{cloud-gear}** **Operations > Alerts** in your project's sidebar.
Each alert contains the following metrics:
![Alert Management List](img/alert_management_1_v13_0.png)
- **Severity** - The current importance of a alert and how much attention it should receive.
- **Start time** - How long ago the alert fired. This field uses the standard GitLab pattern of `X time ago`, but is supported by a granular date/time tooltip depending on the user's locale.
- **End time** - How long ago the alert fired was resolved. This field uses the standard GitLab pattern of `X time ago`, but is supported by a granular date/time tooltip depending on the user's locale.
- **Alert description** - The description of the alert, which attempts to capture the most meaningful data.
- **Event count** - The number of times that an alert has fired.
- **Status** - The [current status](#alert-management-statuses) of the alert.
### Alert Management statuses
Each alert contains a status dropdown to indicate which alerts need investigation.
Standard alert statuses include `triggered`, `acknowledged`, and `resolved`:
- **Triggered**: No one has begun investigation.
- **Acknowledged**: Someone is actively investigating the problem.
- **Resolved**: No further work is required.
......@@ -44,11 +44,24 @@ Note the following:
## Version history
The following table lists updates to Import/Export:
Starting with GitLab 13.0, GitLab can import bundles that were exported from a different GitLab deployment.
This ability is limited to two previous GitLab [minor](../../../policy/maintenance.md#versioning)
releases, which is similar to our process for [Security Releases](../../../policy/maintenance.md#security-releases).
For example:
| Current version | Can import bundles exported from |
|-----------------|----------------------------------|
| 13.0 | 13.0, 12.10, 12.9 |
| 13.1 | 13.1, 13.0, 12.10 |
### 12.x
Prior to 13.0 this was a defined compatibility table:
| Exporting GitLab version | Importing GitLab version |
| -------------------------- | -------------------------- |
| 11.7 to current | 11.7 to current |
| 11.7 to 13.0 | 11.7 to 13.0 |
| 11.1 to 11.6 | 11.1 to 11.6 |
| 10.8 to 11.0 | 10.8 to 11.0 |
| 10.4 to 10.7 | 10.4 to 10.7 |
......
......@@ -51,32 +51,16 @@ module Gitlab
end
end
def map_user_id(email)
return unless email
# We also include emails that are not yet confirmed
users = User.by_any_email(email).to_a
# this event should never happen but we should log it in case we have invalid data
log_user_mapping_message('Multiple users found for an email address', email) if users.count > 1
user = users.first
unless project.project_member(user)
log_user_mapping_message('Jira user not found', email)
return
end
user.id
def map_user_id(jira_user)
Gitlab::JiraImport::UserMapper.new(project, jira_user).execute&.id
end
def reporter
map_user_id(jira_issue&.reporter&.emailAddress) || import_owner_id
map_user_id(jira_issue.reporter&.attrs) || import_owner_id
end
def assignees
found_user_id = map_user_id(jira_issue&.assignee&.emailAddress)
found_user_id = map_user_id(jira_issue.assignee&.attrs)
return unless found_user_id
......@@ -95,15 +79,6 @@ module Gitlab
def logger
@logger ||= Gitlab::Import::Logger.build
end
def log_user_mapping_message(message, email)
logger.info(
project_id: project.id,
project_path: project.full_path,
user_email: email,
message: message
)
end
end
end
end
# frozen_string_literal: true
module Gitlab
module JiraImport
class UserMapper
include ::Gitlab::Utils::StrongMemoize
def initialize(project, jira_user)
@project = project
@jira_user = jira_user
end
def execute
return unless jira_user
email = jira_user['emailAddress']
# We also include emails that are not yet confirmed
users = User.by_any_email(email).to_a
user = users.first
# this event should never happen but we should log it in case we have invalid data
log_user_mapping_message('Multiple users found for an email address', email) if users.count > 1
unless project.project_member(user) || project.group&.group_member(user)
log_user_mapping_message('Jira user not found', email)
return
end
user
end
private
attr_reader :project, :jira_user, :params
def log_user_mapping_message(message, email)
logger.info(
project_id: project.id,
project_path: project.full_path,
user_email: email,
message: message
)
end
def logger
@logger ||= Gitlab::Import::Logger.build
end
end
end
end
......@@ -1724,6 +1724,9 @@ msgstr ""
msgid "AlertManagement|Authorize external service"
msgstr ""
msgid "AlertManagement|Create issue"
msgstr ""
msgid "AlertManagement|Display alerts from all your monitoring tools directly within GitLab. Streamline the investigation of your alerts and the escalation of alerts to incidents."
msgstr ""
......
......@@ -263,6 +263,8 @@ module QA
autoload :Members, 'qa/page/project/settings/members'
autoload :MirroringRepositories, 'qa/page/project/settings/mirroring_repositories'
autoload :VisibilityFeaturesPermissions, 'qa/page/project/settings/visibility_features_permissions'
autoload :Operations, 'qa/page/project/settings/operations'
autoload :Incidents, 'qa/page/project/settings/incidents'
end
module SubMenus
......
......@@ -22,10 +22,12 @@ variables:
stages:
- production
# This job continuously deploys to staging/production on every push to `master`.
# This job continuously deploys to production on every push to `master`.
production:
stage: production
tags:
- qa
script:
- check_kube_domain
- install_dependencies
......@@ -34,7 +36,6 @@ production:
- initialize_tiller
- create_secret
- deploy
- persist_environment_url
environment:
name: production
url: http://$CI_PROJECT_PATH_SLUG.$AUTO_DEVOPS_DOMAIN
......
......@@ -17,6 +17,10 @@ module QA
def has_cluster?(cluster)
has_element?(:cluster, cluster_name: cluster.to_s)
end
def click_on_cluster(cluster)
click_on cluster.cluster_name
end
end
end
end
......
......@@ -25,6 +25,7 @@ module QA
element :prometheus_graph_widgets
element :prometheus_widgets_dropdown
element :alert_widget_menu_item
element :generate_chart_link_menu_item
end
def wait_for_metrics
......@@ -72,6 +73,11 @@ module QA
end
end
def copy_link_to_first_chart
all_elements(:prometheus_widgets_dropdown, minimum: 1).first.click
find_element(:generate_chart_link_menu_item)['data-clipboard-text']
end
private
def wait_for_data
......
# frozen_string_literal: true
module QA
module Page
module Project
module Settings
class Incidents < Page::Base
view 'app/views/projects/settings/operations/_incidents.html.haml' do
element :create_issue_checkbox
element :incident_templates_dropdown
element :save_changes_button
end
def enable_issues_for_incidents
check_element :create_issue_checkbox
end
def select_issue_template(template)
within_element :incident_templates_dropdown do
find(:option, template).select_option
end
end
def save_incident_settings
click_element :save_changes_button
end
def has_template?(template)
within_element :incident_templates_dropdown do
has_text?(template)
end
end
end
end
end
end
end
# frozen_string_literal: true
module QA
module Page
module Project
module Settings
class Operations < Page::Base
include Common
view 'app/views/projects/settings/operations/_incidents.html.haml' do
element :incidents_settings_content
end
def expand_incidents(&block)
expand_section(:incidents_settings_content) do
Settings::Incidents.perform(&block)
end
end
end
end
end
end
end
......@@ -10,7 +10,7 @@ module QA
def self.included(base)
base.class_eval do
view 'app/views/layouts/nav/sidebar/_project.html.haml' do
element :link_operations
element :operations_link
element :operations_environments_link
element :operations_metrics_link
end
......@@ -45,8 +45,8 @@ module QA
def hover_operations
within_sidebar do
scroll_to_element(:link_operations)
find_element(:link_operations).hover
scroll_to_element(:operations_link)
find_element(:operations_link).hover
yield
end
......
......@@ -14,6 +14,7 @@ module QA
element :link_members_settings
element :general_settings_link
element :integrations_settings_link
element :operations_settings_link
end
end
end
......@@ -64,6 +65,14 @@ module QA
end
end
def go_to_operations_settings
hover_settings do
within_submenu do
click_element :operations_settings_link
end
end
end
private
def hover_settings
......
......@@ -3,16 +3,21 @@ import AlertDetails from '~/alert_management/components/alert_details.vue';
describe('AlertDetails', () => {
let wrapper;
const newIssuePath = 'root/alerts/-/issues/new';
function mountComponent(alert = {}) {
function mountComponent(alert = {}, createIssueFromAlertEnabled = false) {
wrapper = shallowMount(AlertDetails, {
propsData: {
alertId: 'alertId',
projectPath: 'projectPath',
newIssuePath,
},
data() {
return { alert };
},
provide: {
glFeatures: { createIssueFromAlertEnabled },
},
});
}
......@@ -22,6 +27,8 @@ describe('AlertDetails', () => {
}
});
const findCreatedIssueBtn = () => wrapper.find('[data-testid="createIssueBtn"]');
describe('Alert details', () => {
describe('when alert is null', () => {
beforeEach(() => {
......@@ -60,5 +67,22 @@ describe('AlertDetails', () => {
it('renders a status dropdown containing three items', () => {
expect(wrapper.findAll('[data-testid="statusDropdownItem"]').length).toBe(3);
});
describe('Create issue from alert', () => {
describe('createIssueFromAlertEnabled feature flag enabled', () => {
it('should display a button that links to new issue page', () => {
mountComponent({}, true);
expect(findCreatedIssueBtn().exists()).toBe(true);
expect(findCreatedIssueBtn().attributes('href')).toBe(newIssuePath);
});
});
describe('createIssueFromAlertEnabled feature flag disabled', () => {
it('should display a button that links to a new issue page', () => {
mountComponent({}, false);
expect(findCreatedIssueBtn().exists()).toBe(false);
});
});
});
});
});
import fileResolver from '~/static_site_editor/graphql/resolvers/file';
import loadSourceContent from '~/static_site_editor/services/load_source_content';
import {
projectId,
sourcePath,
sourceContentTitle as title,
sourceContent as content,
} from '../../mock_data';
jest.mock('~/static_site_editor/services/load_source_content', () => jest.fn());
describe('static_site_editor/graphql/resolvers/file', () => {
it('returns file content and title when fetching file successfully', () => {
loadSourceContent.mockResolvedValueOnce({ title, content });
return fileResolver({ fullPath: projectId }, { path: sourcePath }).then(file => {
expect(file).toEqual({
__typename: 'File',
title,
content,
});
});
});
});
......@@ -66,7 +66,7 @@ describe('static_site_editor/pages/home', () => {
});
};
const buildWrapper = (data = { isSupportedContent: true }) => {
const buildWrapper = (data = { appData: { isSupportedContent: true } }) => {
wrapper = shallowMount(Home, {
localVue,
store,
......@@ -196,7 +196,7 @@ describe('static_site_editor/pages/home', () => {
});
it('displays invalid content message when content is not supported', () => {
buildWrapper({ isSupportedContent: false });
buildWrapper({ appData: { isSupportedContent: false } });
expect(findInvalidContentMessage().exists()).toBe(true);
});
......
......@@ -69,11 +69,13 @@ describe Projects::AlertManagementHelper do
describe '#alert_management_detail_data' do
let(:alert_id) { 1 }
let(:new_issue_path) { new_project_issue_path(project) }
it 'returns detail page configuration' do
expect(helper.alert_management_detail_data(project_path, alert_id)).to eq(
expect(helper.alert_management_detail_data(project, alert_id)).to eq(
'alert-id' => alert_id,
'project-path' => project_path
'project-path' => project_path,
'new-issue-path' => new_issue_path
)
end
end
......
......@@ -17,8 +17,8 @@ describe Gitlab::JiraImport::IssueSerializer do
let(:description) { 'basic description' }
let(:created_at) { '2020-01-01 20:00:00' }
let(:updated_at) { '2020-01-10 20:00:00' }
let(:assignee) { double(displayName: 'Solver', emailAddress: 'assignee@example.com') }
let(:reporter) { double(displayName: 'Reporter', emailAddress: 'reporter@example.com') }
let(:assignee) { double(attrs: { 'displayName' => 'Solver', 'emailAddress' => 'assignee@example.com' }) }
let(:reporter) { double(attrs: { 'displayName' => 'Reporter', 'emailAddress' => 'reporter@example.com' }) }
let(:jira_status) { 'new' }
let(:parent_field) do
......@@ -109,7 +109,7 @@ describe Gitlab::JiraImport::IssueSerializer do
end
context 'author' do
context 'when reporter maps to a GitLab user who is a project member' do
context 'when reporter maps to a valid GitLab user' do
let!(:user) { create(:user, email: 'reporter@example.com') }
it 'sets the issue author to the mapped user' do
......@@ -119,15 +119,7 @@ describe Gitlab::JiraImport::IssueSerializer do
end
end
context 'when reporter maps to a GitLab user who is not a project member' do
let!(:user) { create(:user, email: 'reporter@example.com') }
it 'defaults the issue author to project creator' do
expect(subject[:author_id]).to eq(current_user.id)
end
end
context 'when reporter does not map to a GitLab user' do
context 'when reporter does not map to a valid Gitlab user' do
it 'defaults the issue author to project creator' do
expect(subject[:author_id]).to eq(current_user.id)
end
......@@ -142,7 +134,7 @@ describe Gitlab::JiraImport::IssueSerializer do
end
context 'when reporter field is missing email address' do
let(:reporter) { double(name: 'Reporter', emailAddress: nil) }
let(:reporter) { double(attrs: { 'displayName' => 'Reporter' }) }
it 'defaults the issue author to project creator' do
expect(subject[:author_id]).to eq(current_user.id)
......@@ -151,7 +143,7 @@ describe Gitlab::JiraImport::IssueSerializer do
end
context 'assignee' do
context 'when assignee maps to a GitLab user who is a project member' do
context 'when assignee maps to a valid GitLab user' do
let!(:user) { create(:user, email: 'assignee@example.com') }
it 'sets the issue assignees to the mapped user' do
......@@ -161,15 +153,7 @@ describe Gitlab::JiraImport::IssueSerializer do
end
end
context 'when assignee maps to a GitLab user who is not a project member' do
let!(:user) { create(:user, email: 'assignee@example.com') }
it 'leaves the assignee empty' do
expect(subject[:assignee_ids]).to be_nil
end
end
context 'when assignee does not map to a GitLab user' do
context 'when assignee does not map to a valid GitLab user' do
it 'leaves the assignee empty' do
expect(subject[:assignee_ids]).to be_nil
end
......@@ -184,7 +168,7 @@ describe Gitlab::JiraImport::IssueSerializer do
end
context 'when assginee field is missing email address' do
let(:assignee) { double(name: 'Assignee', emailAddress: nil) }
let(:assignee) { double(attrs: { 'displayName' => 'Reporter' }) }
it 'leaves the assignee empty' do
expect(subject[:assignee_ids]).to be_nil
......
# frozen_string_literal: true
require 'spec_helper'
describe Gitlab::JiraImport::UserMapper do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:user) { create(:user, email: 'user@example.com') }
let_it_be(:email) { create(:email, user: user, email: 'second_email@example.com', confirmed_at: nil) }
let(:jira_user) { { 'acountId' => '1a2b', 'emailAddress' => 'user@example.com' } }
describe '#execute' do
subject { described_class.new(project, jira_user).execute }
context 'when jira_user is nil' do
let(:jira_user) { nil }
it 'returns nil' do
expect(subject).to be_nil
end
end
context 'when Gitlab user is not found by email' do
let(:jira_user) { { 'acountId' => '1a2b', 'emailAddress' => 'other@example.com' } }
it 'returns nil' do
expect(subject).to be_nil
end
end
context 'when jira_user emailAddress is nil' do
let(:jira_user) { { 'acountId' => '1a2b', 'emailAddress' => nil } }
it 'returns nil' do
expect(subject).to be_nil
end
end
context 'when jira_user emailAddress key is missing' do
let(:jira_user) { { 'acountId' => '1a2b' } }
it 'returns nil' do
expect(subject).to be_nil
end
end
context 'when found user is not a project member' do
it 'returns nil' do
expect(subject).to be_nil
end
end
context 'when found user is a project member' do
it 'returns the found user' do
project.add_developer(user)
expect(subject).to eq(user)
end
end
context 'when user found by unconfirmd secondary address is a project member' do
let(:jira_user) { { 'acountId' => '1a2b', 'emailAddress' => 'second_email@example.com' } }
it 'returns the found user' do
project.add_developer(user)
expect(subject).to eq(user)
end
end
context 'when user is a group member' do
it 'returns the found user' do
group.add_developer(user)
expect(subject).to eq(user)
end
end
end
end
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册