提交 9bbcab83 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 39a548dd
......@@ -15,7 +15,7 @@ stages:
# in cases where jobs require Docker-in-Docker, the job
# definition must be extended with `.use-docker-in-docker`
default:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34"
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-81.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34"
tags:
- gitlab-org
# All jobs are interruptible by default
......
......@@ -15,7 +15,7 @@
- .default-retry
- .default-before_script
- .assets-compile-cache
image: registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-graphicsmagick-1.3.34-docker-19.03.1
image: registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-git-2.26-lfs-2.9-chrome-81.0-node-12.x-yarn-1.21-graphicsmagick-1.3.34-docker-19.03.1
stage: prepare
variables:
NODE_ENV: "production"
......
......@@ -30,7 +30,7 @@
policy: pull
.use-pg9:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34"
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-81.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34"
services:
- name: postgres:9.6.17
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
......@@ -41,7 +41,7 @@
key: "debian-stretch-ruby-2.6.6-pg9-node-12.x"
.use-pg10:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-10-graphicsmagick-1.3.34"
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-81.0-node-12.x-yarn-1.21-postgresql-10-graphicsmagick-1.3.34"
services:
- name: postgres:10.12
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
......@@ -52,7 +52,7 @@
key: "debian-stretch-ruby-2.6.6-pg10-node-12.x"
.use-pg11:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-11-graphicsmagick-1.3.34"
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-81.0-node-12.x-yarn-1.21-postgresql-11-graphicsmagick-1.3.34"
services:
- name: postgres:11.6
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
......@@ -63,7 +63,7 @@
key: "debian-stretch-ruby-2.6.6-pg11-node-12.x"
.use-pg9-ee:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34"
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-81.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34"
services:
- name: postgres:9.6.17
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
......@@ -75,7 +75,7 @@
key: "debian-stretch-ruby-2.6.6-pg9-node-12.x"
.use-pg10-ee:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-10-graphicsmagick-1.3.34"
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-81.0-node-12.x-yarn-1.21-postgresql-10-graphicsmagick-1.3.34"
services:
- name: postgres:10.12
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
......@@ -87,7 +87,7 @@
key: "debian-stretch-ruby-2.6.6-pg10-node-12.x"
.use-pg11-ee:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-11-graphicsmagick-1.3.34"
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.6-golang-1.14-git-2.26-lfs-2.9-chrome-81.0-node-12.x-yarn-1.21-postgresql-11-graphicsmagick-1.3.34"
services:
- name: postgres:11.6
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
......
......@@ -141,7 +141,9 @@ db:migrate:reset:
- bundle exec rake db:migrate:reset
db:check-schema:
extends: .db-job-base
extends:
- .db-job-base
- .rails:rules:ee-mr-and-master-only
script:
- scripts/regenerate-schema
- source scripts/schema_changed.sh
......
......@@ -445,7 +445,7 @@ gem 'sys-filesystem', '~> 1.1.6'
gem 'net-ntp'
# SSH host key support
gem 'net-ssh', '~> 5.2'
gem 'net-ssh', '~> 6.0'
gem 'sshkey', '~> 2.0'
# Required for ED25519 SSH host key support
......
......@@ -653,7 +653,7 @@ GEM
nenv (0.3.0)
net-ldap (0.16.2)
net-ntp (2.1.3)
net-ssh (5.2.0)
net-ssh (6.0.0)
netrc (0.11.0)
nio4r (2.5.2)
no_proxy_fix (0.1.2)
......@@ -1301,7 +1301,7 @@ DEPENDENCIES
nakayoshi_fork (~> 0.0.4)
net-ldap
net-ntp
net-ssh (~> 5.2)
net-ssh (~> 6.0)
nokogiri (~> 1.10.9)
oauth2 (~> 1.4)
octokit (~> 4.15)
......
export const AJAX_USERS_SELECT_OPTIONS_MAP = {
projectId: 'projectId',
groupId: 'groupId',
showCurrentUser: 'currentUser',
authorId: 'authorId',
skipUsers: 'skipUsers',
};
export const AJAX_USERS_SELECT_PARAMS_MAP = {
project_id: 'projectId',
group_id: 'groupId',
skip_ldap: 'skipLdap',
todo_filter: 'todoFilter',
todo_state_filter: 'todoStateFilter',
current_user: 'showCurrentUser',
author_id: 'authorId',
skip_users: 'skipUsers',
};
......@@ -4,10 +4,15 @@
import $ from 'jquery';
import { escape, template, uniqBy } from 'lodash';
import axios from './lib/utils/axios_utils';
import { s__, __, sprintf } from './locale';
import ModalStore from './boards/stores/modal_store';
import { parseBoolean } from './lib/utils/common_utils';
import axios from '../lib/utils/axios_utils';
import { s__, __, sprintf } from '../locale';
import ModalStore from '../boards/stores/modal_store';
import { parseBoolean } from '../lib/utils/common_utils';
import {
AJAX_USERS_SELECT_OPTIONS_MAP,
AJAX_USERS_SELECT_PARAMS_MAP,
} from 'ee_else_ce/users_select/constants';
import { getAjaxUsersSelectOptions, getAjaxUsersSelectParams } from './utils';
// TODO: remove eventHub hack after code splitting refactor
window.emitSidebarEvent = window.emitSidebarEvent || $.noop;
......@@ -555,13 +560,8 @@ function UsersSelect(currentUser, els, options = {}) {
import(/* webpackChunkName: 'select2' */ 'select2/select2')
.then(() => {
$('.ajax-users-select').each((i, select) => {
const options = {};
const options = getAjaxUsersSelectOptions($(select), AJAX_USERS_SELECT_OPTIONS_MAP);
options.skipLdap = $(select).hasClass('skip_ldap');
options.projectId = $(select).data('projectId');
options.groupId = $(select).data('groupId');
options.showCurrentUser = $(select).data('currentUser');
options.authorId = $(select).data('authorId');
options.skipUsers = $(select).data('skipUsers');
const showNullUser = $(select).data('nullUser');
const showAnyUser = $(select).data('anyUser');
const showEmailUser = $(select).data('emailUser');
......@@ -702,14 +702,7 @@ UsersSelect.prototype.users = function(query, options, callback) {
const params = {
search: query,
active: true,
project_id: options.projectId || null,
group_id: options.groupId || null,
skip_ldap: options.skipLdap || null,
todo_filter: options.todoFilter || null,
todo_state_filter: options.todoStateFilter || null,
current_user: options.showCurrentUser || null,
author_id: options.authorId || null,
skip_users: options.skipUsers || null,
...getAjaxUsersSelectParams(options, AJAX_USERS_SELECT_PARAMS_MAP),
};
if (options.issuableType === 'merge_request') {
......
/**
* Get options from data attributes on passed `$select`.
* @param {jQuery} $select
* @param {Object} optionsMap e.g. { optionKeyName: 'dataAttributeName' }
*/
export const getAjaxUsersSelectOptions = ($select, optionsMap) => {
return Object.keys(optionsMap).reduce((accumulator, optionKey) => {
const dataKey = optionsMap[optionKey];
accumulator[optionKey] = $select.data(dataKey);
return accumulator;
}, {});
};
/**
* Get query parameters used for users request from passed `options` parameter
* @param {Object} options e.g. { currentUserId: 1, fooBar: 'baz' }
* @param {Object} paramsMap e.g. { user_id: 'currentUserId', foo_bar: 'fooBar' }
*/
export const getAjaxUsersSelectParams = (options, paramsMap) => {
return Object.keys(paramsMap).reduce((accumulator, paramKey) => {
const optionKey = paramsMap[paramKey];
accumulator[paramKey] = options[optionKey] || null;
return accumulator;
}, {});
};
......@@ -14,6 +14,7 @@ module Types
field :plain_data, GraphQL::STRING_TYPE,
description: 'Blob plain highlighted data',
calls_gitaly: true,
null: true
field :raw_path, GraphQL::STRING_TYPE,
......
......@@ -17,12 +17,14 @@ module Types
field :collapsed, GraphQL::BOOLEAN_TYPE,
description: 'Shows whether the blob should be displayed collapsed',
method: :collapsed?,
null: false
null: false,
resolve: -> (viewer, _args, _ctx) { !!viewer&.collapsed? }
field :too_large, GraphQL::BOOLEAN_TYPE,
description: 'Shows whether the blob too large to be displayed',
method: :too_large?,
null: false
null: false,
resolve: -> (viewer, _args, _ctx) { !!viewer&.too_large? }
field :render_error, GraphQL::STRING_TYPE,
description: 'Error rendering the blob content',
......
---
title: Add percentage of actors feature flag rollout
merge_request: 29698
author:
type: added
---
title: Add a Project's group to list of groups when parsing for codeowner entries
merge_request: 30934
author:
type: fixed
---
title: Fix bug in Snippet BlobViewer GraphQL definition
merge_request: 30927
author:
type: fixed
---
title: Move daily create users statistics cronjob to CE
merge_request: 30843
author:
type: fixed
......@@ -490,6 +490,9 @@ Settings.cron_jobs['container_expiration_policy_worker']['job_class'] = 'Contain
Settings.cron_jobs['x509_issuer_crl_check_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['x509_issuer_crl_check_worker']['cron'] ||= '30 1 * * *'
Settings.cron_jobs['x509_issuer_crl_check_worker']['job_class'] = 'X509IssuerCrlCheckWorker'
Settings.cron_jobs['users_create_statistics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['users_create_statistics_worker']['cron'] ||= '2 15 * * *'
Settings.cron_jobs['users_create_statistics_worker']['job_class'] = 'Users::CreateStatisticsWorker'
Gitlab.ee do
Settings.cron_jobs['adjourned_group_deletion_worker'] ||= Settingslogic.new({})
......@@ -552,9 +555,6 @@ Gitlab.ee do
Settings.cron_jobs['sync_seat_link_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['sync_seat_link_worker']['cron'] ||= "#{rand(60)} 0 * * *"
Settings.cron_jobs['sync_seat_link_worker']['job_class'] = 'SyncSeatLinkWorker'
Settings.cron_jobs['users_create_statistics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['users_create_statistics_worker']['cron'] ||= '2 15 * * *'
Settings.cron_jobs['users_create_statistics_worker']['job_class'] = 'Users::CreateStatisticsWorker'
end
#
......
......@@ -90,6 +90,7 @@ dotenv
downvoted
downvotes
Dpl
Dreamweaver
Elasticsearch
enablement
enqueued
......@@ -121,6 +122,8 @@ Gzip
hardcode
hardcoded
hardcodes
heatmap
heatmaps
Helm
Heroku
Herokuish
......@@ -133,6 +136,7 @@ hotfixes
hotfixing
http
https
idempotence
Ingress
initializer
initializers
......@@ -177,6 +181,7 @@ mergeable
Microsoft
middleware
middlewares
Minikube
MinIO
mitmproxy
misconfigure
......@@ -242,6 +247,7 @@ rebase
rebased
rebases
rebasing
Redcarpet
Redis
Redmine
reCAPTCHA
......@@ -283,6 +289,7 @@ Sentry
serverless
Sidekiq
sharding
shfmt
Shibboleth
sanitization
serializer
......@@ -296,9 +303,13 @@ Splunk
SSH
storable
strace
strikethrough
strikethroughs
subpath
subfolder
subfolders
subgraph
subgraphs
sublicense
sublicensed
sublicenses
......@@ -340,6 +351,7 @@ unchecking
unchecks
uncomment
uncommented
uncommenting
unencode
unencoded
unencoder
......@@ -358,12 +370,14 @@ unoptimize
unoptimized
unoptimizes
unoptimizing
unprioritized
unprotect
unprotects
unprotected
unpublish
unpublished
unpublishes
unpublishing
unreferenced
unresolve
unresolved
......@@ -373,6 +387,9 @@ unstage
unstaged
unstages
unstaging
unstash
unstashed
unstashing
untarred
untracked
untrusted
......
......@@ -23,10 +23,11 @@ No matter how you use GitLab, we have documentation for you.
| Essential Documentation | Essential Documentation |
|:-------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------|
| [**User Documentation**](user/index.md)<br/>Discover features and concepts for GitLab users. | [**Administrator documentation**](administration/index.md)<br/>Everything GitLab self-managed administrators need to know. |
| [**Contributing to GitLab**](#contributing-to-gitlab)<br/>At GitLab, everyone can contribute! | [**New to Git and GitLab?**](#new-to-git-and-gitlab)<br/>We have the resources to get you started. |
| [**Contributing to GitLab**](#contributing-to-gitlab)<br/>At GitLab, everyone can contribute! | [**New to Git and GitLab?**](#new-to-git-and-gitlab)<br/>We have the resources to get you started. |
| [**Building an integration with GitLab?**](#building-an-integration-with-gitlab)<br/>Consult our automation and integration documentation. | [**Coming to GitLab from another platform?**](#coming-to-gitlab-from-another-platform)<br/>Consult our handy guides. |
| [**Install GitLab**](https://about.gitlab.com/install/)<br/>Installation options for different platforms. | [**Customers**](subscriptions/index.md)<br/>Information for new and existing customers. |
| [**Update GitLab**](update/README.md)<br/>Update your GitLab self-managed instance to the latest version. | [**GitLab Releases**](https://about.gitlab.com/releases/)<br/>What's new in GitLab. |
| [**Update GitLab**](update/README.md)<br/>Update your GitLab self-managed instance to the latest version. | [**Reference Architectures**](administration/reference_architectures/index.md)<br/>GitLab's reference architectures |
| [**GitLab Releases**](https://about.gitlab.com/releases/)<br/>What's new in GitLab. | |
## Popular Documentation
......
---
type: reference, concepts
redirect_to: ../reference_architectures/index.md
---
# Availability
GitLab offers a number of options to manage availability and resiliency. Below are the options to consider with trade-offs.
| Event | GitLab Feature | Recovery Point Objective (RPO) | Recovery Time Objective (RTO) | Cost |
| ----- | -------------- | --- | --- | ---- |
| Availability Zone failure | "GitLab HA" | No loss | No loss | 2x Git storage, multiple nodes balanced across AZ's |
| Region failure | [GitLab Geo Disaster Recovery](../geo/disaster_recovery/index.md) | 5-10 minutes | 30 minutes | 2x primary cost |
| All failures | Backup/Restore | Last backup | Hours to Days | Cost of storing the backups |
This document was moved to [another location](../reference_architectures/index.md).
......@@ -35,7 +35,7 @@ Follow the steps below to set up a custom hook:
`/home/git/gitlab/file_hooks/`. For Omnibus installs the path is
usually `/opt/gitlab/embedded/service/gitlab-rails/file_hooks`.
For [highly available](availability/index.md) configurations, your hook file should exist on each
For [highly available](reference_architectures/index.md) configurations, your hook file should exist on each
application server.
1. Inside the `file_hooks` directory, create a file with a name of your choice,
......
......@@ -143,7 +143,7 @@ To configure the connection to the external read-replica database and enable Log
database to keep track of replication status and automatically recover from
potential replication issues. Omnibus automatically configures a tracking database
when `roles ['geo_secondary_role']` is set. For high availability,
refer to [Geo High Availability](../../availability/index.md).
refer to [Geo High Availability](../../reference_architectures/index.md).
If you want to run this database external to Omnibus, please follow the instructions below.
The tracking database requires an [FDW](https://www.postgresql.org/docs/9.6/postgres-fdw.html)
......
......@@ -47,12 +47,12 @@ It is possible to use cloud hosted services for PostgreSQL and Redis, but this i
## Prerequisites: Two working GitLab HA clusters
One cluster will serve as the **primary** node. Use the
[GitLab HA documentation](../../availability/index.md) to set this up. If
[GitLab HA documentation](../../reference_architectures/index.md) to set this up. If
you already have a working GitLab instance that is in-use, it can be used as a
**primary**.
The second cluster will serve as the **secondary** node. Again, use the
[GitLab HA documentation](../../availability/index.md) to set this up.
[GitLab HA documentation](../../reference_architectures/index.md) to set this up.
It's a good idea to log in and test it, however, note that its data will be
wiped out as part of the process of replicating from the **primary**.
......@@ -371,7 +371,7 @@ more information.
The minimal reference architecture diagram above shows all application services
running together on the same machines. However, for high availability we
[strongly recommend running all services separately](../../availability/index.md).
[strongly recommend running all services separately](../../reference_architectures/index.md).
For example, a Sidekiq server could be configured similarly to the frontend
application servers above, with some changes to run only the `sidekiq` service:
......
......@@ -2,7 +2,7 @@
> - Introduced in GitLab Enterprise Edition 8.9.
> - Using Geo in combination with
> [High Availability](../../availability/index.md)
> [High Availability](../../reference_architectures/index.md)
> is considered **Generally Available** (GA) in
> [GitLab Premium](https://about.gitlab.com/pricing/) 10.4.
......
......@@ -460,12 +460,12 @@ documentation](index.md#3-gitaly-server-configuration).
1. Configure the storage location for Git data by setting `git_data_dirs` in
`/etc/gitlab/gitlab.rb`. Each Gitaly node should have a unique storage name
(eg `gitaly-1`).
(such as `gitaly-1`).
Instead of configuring `git_data_dirs` uniquely for each Gitaly node, it is
often easier to have include the configuration for all Gitaly nodes on every
Gitaly node. This is supported because the Praefect `virtual_storages`
configuration maps each storage name (eg `gitaly-1`) to a specific node, and
configuration maps each storage name (such as `gitaly-1`) to a specific node, and
requests are routed accordingly. This means every Gitaly node in your fleet
can share the same configuration.
......@@ -573,7 +573,7 @@ Particular attention should be shown to:
})
```
1. Allow Gitaly to listen on a tcp port by editing
1. Allow Gitaly to listen on a TCP port by editing
`/etc/gitlab/gitlab.rb`
```ruby
......@@ -742,13 +742,13 @@ strategy in the future.
## Identifying Impact of a Primary Node Failure
When a primary Gitaly node fails, there is a chance of dataloss. Dataloss can occur if there were outstanding replication jobs the secondaries did not manage to process before the failure. The Praefect `dataloss` subcommand helps identify these cases by counting the number of dead replication jobs for each repository within a given timeframe.
When a primary Gitaly node fails, there is a chance of data loss. Data loss can occur if there were outstanding replication jobs the secondaries did not manage to process before the failure. The Praefect `dataloss` sub-command helps identify these cases by counting the number of dead replication jobs for each repository within a given time frame.
```shell
sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml dataloss -from <rfc3339-time> -to <rfc3339-time>
```
If the timeframe is not specified, dead replication jobs from the last six hours are counted:
If the time frame is not specified, dead replication jobs from the last six hours are counted:
```shell
sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml dataloss
......@@ -759,7 +759,7 @@ example/repository-2: 4 jobs
example/repository-3: 2 jobs
```
To specify a timeframe in UTC, run:
To specify a time frame in UTC, run:
```shell
sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml dataloss -from 2020-01-02T00:00:00+00:00 -to 2020-01-02T00:02:00+00:00
......@@ -779,7 +779,7 @@ When a Praefect backend node fails and is no longer able to
replicate changes, the backend node will start to drift from the primary. If
that node eventually recovers, it will need to be reconciled with the current
primary. The primary node is considered the single source of truth for the
state of a shard. The Praefect `reconcile` subcommand allows for the manual
state of a shard. The Praefect `reconcile` sub-command allows for the manual
reconciliation between a backend node and the current primary.
Run the following command on the Praefect server after all placeholders
......
---
type: reference, concepts
redirect_to: ../reference_architectures/index.md
---
The page have been deprecated, please see:
# Reference Architectures
1. [Availability page](../availability/index.md)
1. [Scaling page](../scaling/index.md)
1. [Docs page for high availability](./gitlab.md)
1. [High availability solutions page](https://about.gitlab.com/solutions/high-availability/)
This document was moved to [another location](../reference_architectures/index.md).
......@@ -24,7 +24,7 @@ If you use a cloud-managed service, or provide your own PostgreSQL:
## PostgreSQL in a Scaled and Highly Available Environment
This section is relevant for [Scalable and Highly Available Setups](../scaling/index.md).
This section is relevant for [Scalable and Highly Available Setups](../reference_architectures/index.md).
### Provide your own PostgreSQL instance **(CORE ONLY)**
......
......@@ -11,7 +11,7 @@ should consider using Gitaly on a separate node.
See the [Gitaly HA Epic](https://gitlab.com/groups/gitlab-org/-/epics/289) to
track plans and progress toward high availability support.
This document is relevant for [Scalable and Highly Available Setups](../scaling/index.md).
This document is relevant for [scalable and highly available setups](../reference_architectures/index.md).
## Running Gitaly on its own server
......@@ -19,7 +19,7 @@ See [Running Gitaly on its own server](../gitaly/index.md#running-gitaly-on-its-
in Gitaly documentation.
Continue configuration of other components by going back to the
[Scaling](../scaling/index.md#components-provided-by-omnibus-gitlab) page.
[reference architecture](../reference_architectures/index.md#configure-gitlab-to-scale) page.
## Enable Monitoring
......
......@@ -2,7 +2,9 @@
type: reference
---
# Configuring GitLab for Scaling and High Availability
# Configuring GitLab application (Rails)
This section describes how to configure the GitLab application (Rails) component.
NOTE: **Note:** There is some additional configuration near the bottom for
additional GitLab application servers. It's important to read and understand
......
......@@ -11,7 +11,7 @@ You can configure a Prometheus node to monitor GitLab.
## Standalone Monitoring node using Omnibus GitLab
The Omnibus GitLab package can be used to configure a standalone Monitoring node running [Prometheus](../monitoring/prometheus/index.md) and [Grafana](../monitoring/performance/grafana_configuration.md).
The monitoring node is not highly available. See [Scaling and High Availability](../scaling/index.md)
The monitoring node is not highly available. See [Scaling and High Availability](../reference_architectures/index.md)
for an overview of GitLab scaling and high availability options.
The steps below are the minimum necessary to configure a Monitoring node running Prometheus and Grafana with
......
......@@ -27,7 +27,7 @@ These will be necessary when configuring the GitLab application servers later.
## Redis in a Scaled and Highly Available Environment
This section is relevant for [Scalable and Highly Available Setups](../scaling/index.md).
This section is relevant for [scalable and highly available setups](../reference_architectures/index.md).
### Provide your own Redis instance **(CORE ONLY)**
......@@ -43,8 +43,8 @@ In this configuration Redis is not highly available, and represents a single
point of failure. However, in a scaled environment the objective is to allow
the environment to handle more users or to increase throughput. Redis itself
is generally stable and can handle many requests so it is an acceptable
trade off to have only a single instance. See [High Availability](../availability/index.md)
for an overview of GitLab scaling and high availability options.
trade off to have only a single instance. See the [reference architectures](../reference_architectures/index.md)
page for an overview of GitLab scaling and high availability options.
The steps below are the minimum necessary to configure a Redis server with
Omnibus:
......@@ -89,7 +89,7 @@ Advanced configuration options are supported and can be added if
needed.
Continue configuration of other components by going back to the
[Scaling](../scaling/index.md#components-provided-by-omnibus-gitlab) page.
[reference architectures](../reference_architectures/index.md#configure-gitlab-to-scale) page.
### High Availability with Omnibus GitLab **(PREMIUM ONLY)**
......
......@@ -34,7 +34,7 @@ Learn how to install, configure, update, and maintain your GitLab instance.
- [Install](../install/README.md): Requirements, directory structures, and installation methods.
- [Database load balancing](database_load_balancing.md): Distribute database queries among multiple database servers. **(STARTER ONLY)**
- [Omnibus support for log forwarding](https://docs.gitlab.com/omnibus/settings/logs.html#udp-log-shipping-gitlab-enterprise-edition-only) **(STARTER ONLY)**
- [High Availability](availability/index.md): Configure multiple servers for scaling or high availability.
- [High Availability](reference_architectures/index.md): Configure multiple servers for scaling or high availability.
- [Installing GitLab HA on Amazon Web Services (AWS)](../install/aws/index.md): Set up GitLab High Availability on Amazon AWS.
- [Geo](geo/replication/index.md): Replicate your GitLab instance to other geographic locations as a read-only fully operational version. **(PREMIUM ONLY)**
- [Disaster Recovery](geo/disaster_recovery/index.md): Quickly fail-over to a different site with minimal effort in a disaster situation. **(PREMIUM ONLY)**
......
......@@ -428,7 +428,7 @@ installations from source.
This file contains logging information about jobs before they are start
being processed by Sidekiq, for example before being enqueued.
This logfile follows the same structure as
This log file follows the same structure as
[`sidekiq.log`](#sidekiqlog), so it will be structured as JSON if
you've configured this for Sidekiq as mentioned above.
......@@ -523,7 +523,7 @@ User clone/fetch activity using SSH transport appears in this log as `executing
## Gitaly Logs
This file lives in `/var/log/gitlab/gitaly/current` and is produced by [runit](http://smarden.org/runit/). `runit` is packaged with Omnibus and a brief explanation of its purpose is available [in the omnibus documentation](https://docs.gitlab.com/omnibus/architecture/#runit). [Log files are rotated](http://smarden.org/runit/svlogd.8.html), renamed in unix timestamp format and `gzip`-compressed (e.g. `@1584057562.s`).
This file lives in `/var/log/gitlab/gitaly/current` and is produced by [runit](http://smarden.org/runit/). `runit` is packaged with Omnibus and a brief explanation of its purpose is available [in the omnibus documentation](https://docs.gitlab.com/omnibus/architecture/#runit). [Log files are rotated](http://smarden.org/runit/svlogd.8.html), renamed in Unix timestamp format and `gzip`-compressed (e.g. `@1584057562.s`).
### `grpc.log`
......@@ -761,7 +761,7 @@ For Omnibus installations, NGINX logs reside in:
- `/var/log/gitlab/nginx/gitlab_pages_access.log` contains a log of requests made to Pages static sites.
- `/var/log/gitlab/nginx/gitlab_pages_error.log` contains a log of NGINX errors for Pages static sites.
- `/var/log/gitlab/nginx/gitlab_registry_access.log` contains a log of requests made to the Container Registry.
- `/var/log/gitlab/nginx/gitlab_registry_error.log` contains a log of NGINX errors for the Container Regsitry.
- `/var/log/gitlab/nginx/gitlab_registry_error.log` contains a log of NGINX errors for the Container Registry.
- `/var/log/gitlab/nginx/gitlab_mattermost_access.log` contains a log of requests made to Mattermost.
- `/var/log/gitlab/nginx/gitlab_mattermost_error.log` contains a log of NGINX errors for Mattermost.
......
......@@ -37,7 +37,7 @@ For configuring GitLab to use Object Storage refer to the following guides:
### Other alternatives to filesystem storage
If you're working to [scale out](scaling/index.md) your GitLab implementation,
If you're working to [scale out](reference_architectures/index.md) your GitLab implementation,
or add [fault tolerance and redundancy](high_availability/README.md) you may be
looking at removing dependencies on block or network filesystems.
See the following guides and
......@@ -77,7 +77,7 @@ with the Fog library that GitLab uses. Symptoms include:
### GitLab Pages requires NFS
If you're working to add more GitLab servers for [scaling or fault tolerance](scaling/index.md)
If you're working to add more GitLab servers for [scaling or fault tolerance](reference_architectures/index.md)
and one of your requirements is [GitLab Pages](../user/project/pages/index.md) this currently requires
NFS. There is [work in progress](https://gitlab.com/gitlab-org/gitlab-pages/issues/196)
to remove this dependency. In the future, GitLab Pages may use
......
此差异已折叠。
此差异已折叠。
......@@ -72,7 +72,7 @@ and they will assist you with any issues you are having.
This is the principle of Kubernetes, read [Twelve-factor app](https://12factor.net/)
for details.
- How to get cronjobs configured on a cluster
- How to get cron jobs configured on a cluster
```shell
kubectl get cronjobs
......@@ -206,7 +206,7 @@ all Kubernetes resources and dependent charts:
helm get manifest <release name>
```
## Installation of minimal GitLab config via Minukube on macOS
## Installation of minimal GitLab config via Minikube on macOS
This section is based on [Developing for Kubernetes with Minikube](https://docs.gitlab.com/charts/development/minikube/index.html)
and [Helm](https://docs.gitlab.com/charts/installation/tools.html#helm). Refer
......
......@@ -267,7 +267,7 @@ to your review app.
​After determining the ID for the merge request to link to a visual review app, you
can supply the ID by either:​​
- Hardcoding it in the script tag via the data attribute `data-merge-request-id` of the app.
- Hard-coding it in the script tag via the data attribute `data-merge-request-id` of the app.
- Dynamically adding the `data-merge-request-id` value during the build of the app.
- Supplying it manually through the visual review form in the app.
......
......@@ -382,7 +382,7 @@ use for storing things like passwords, SSH keys, and credentials.
Group-level variables can be added by:
1. Navigating to your group's **Settings > CI/CD** page.
1. Inputing variable types, keys, and values in the **Variables** section.
1. Inputting variable types, keys, and values in the **Variables** section.
Any variables of [subgroups](../../user/group/subgroups/index.md) will be inherited recursively.
Once you set them, they will be available for all subsequent pipelines. Any group-level user defined variables can be viewed in projects by:
......
......@@ -159,7 +159,7 @@ During this phase, the idea is to collect as much information as possible about
The analysis usually takes a full milestone to complete, though it's not impossible to start the implementation in the same milestone.
In particular, the upload request can have some [requirements in the GitLab Workhorse project](#file-uploads). This project has a different release cycle than the rails backend. It's **strongly** recommended that you open an issue there as soon as the upload request analysis is done. This way GitLab Worhorse is already ready when the upload request is implemented on the rails backend.
In particular, the upload request can have some [requirements in the GitLab Workhorse project](#file-uploads). This project has a different release cycle than the rails backend. It's **strongly** recommended that you open an issue there as soon as the upload request analysis is done. This way GitLab Workhorse is already ready when the upload request is implemented on the rails backend.
### Implementation
......
......@@ -80,7 +80,20 @@ We format shell scripts according to the [Google Shell Style Guide](https://goog
so the following `shfmt` invocation should be applied to the project's script files:
```shell
shfmt -i 2 -ci scripts/**/*.sh
shfmt -i 2 -ci -w scripts/**/*.sh
```
In addition to the [Linting](#linting) GitLab CI/CD job, all projects with shell scripts should also
use this job:
```yaml
shfmt:
image: mvdan/shfmt:v3.1.0-alpine
stage: test
before_script:
- shfmt -version
script:
- shfmt -i 2 -ci -d scripts # path to your shell scripts
```
TIP: **Tip:**
......@@ -88,11 +101,6 @@ By default, shfmt will use the [shell detection](https://github.com/mvdan/sh#shf
and ignore files starting with a period. To override this, use `-ln` flag to specify the shell dialect:
`-ln posix` or `-ln bash`.
NOTE: **Note:**
Currently, the `shfmt` tool [is not shipped](https://github.com/mvdan/sh/issues/68) as a Docker image containing
a Linux shell. This makes it impossible to use the [official Docker image](https://hub.docker.com/r/mvdan/shfmt)
in GitLab Runner. This [may change](https://github.com/mvdan/sh/issues/68#issuecomment-507721371) in future.
## Testing
NOTE: **Note:**
......
......@@ -14,15 +14,15 @@ and cost of hosting.
There are many ways you can install GitLab depending on your platform:
1. **Omnibus GitLab**: The official deb/rpm packages that contain a bundle of GitLab
and the various components it depends on like PostgreSQL, Redis, Sidekiq, etc.
and the various components it depends on, like PostgreSQL, Redis, Sidekiq, etc.
1. **GitLab Helm chart**: The cloud native Helm chart for installing GitLab and all
its components on Kubernetes.
1. **Docker**: The Omnibus GitLab packages dockerized.
1. **Source**: Install GitLab and all its components from scratch.
TIP: **If in doubt, choose Omnibus:**
The Omnibus GitLab packages are mature, scalable, support
[high availability](../administration/availability/index.md) and are used
The Omnibus GitLab packages are mature,
[scalable](../administration/reference_architectures/index.md) and are used
today on GitLab.com. The Helm charts are recommended for those who are familiar
with Kubernetes.
......@@ -36,7 +36,7 @@ The Omnibus GitLab package uses our official deb/rpm repositories. This is
recommended for most users.
If you need additional flexibility and resilience, we recommend deploying
GitLab as described in our [High Availability documentation](../administration/availability/index.md).
GitLab as described in our [reference architecture documentation](../administration/reference_architectures/index.md).
[**> Install GitLab using the Omnibus GitLab package.**](https://about.gitlab.com/install/)
......@@ -67,7 +67,7 @@ GitLab maintains a set of official Docker images based on the Omnibus GitLab pac
## Installing GitLab from source
If the Omnibus GitLab package is not available in your distribution, you can
install GitLab from source: Useful for unsupported systems like *BSD. For an
install GitLab from source: Useful for unsupported systems like \*BSD. For an
overview of the directory structure, read the [structure documentation](structure.md).
[**> Install GitLab from source.**](installation.md)
......
......@@ -739,7 +739,7 @@ Have a read through these other resources and feel free to
[open an issue](https://gitlab.com/gitlab-org/gitlab/issues/new)
to request additional material:
- [Scaling GitLab](../../administration/scaling/index.md):
- [Scaling GitLab](../../administration/reference_architectures/index.md):
GitLab supports several different types of clustering and high-availability.
- [Geo replication](../../administration/geo/replication/index.md):
Geo is the solution for widely distributed development teams.
......
......@@ -95,7 +95,7 @@ This is the recommended minimum hardware for a handful of example GitLab user ba
- 4 cores supports up to 500 users
- 8 cores supports up to 1,000 users
- 32 cores supports up to 5,000 users
- More users? Run it high-availability on [multiple application servers](https://about.gitlab.com/solutions/high-availability/)
- More users? Consult the [reference architectures page](../administration/reference_architectures/index.md)
### Memory
......@@ -112,7 +112,7 @@ errors during usage.
- 16GB RAM supports up to 500 users
- 32GB RAM supports up to 1,000 users
- 128GB RAM supports up to 5,000 users
- More users? Run it high-availability on [multiple application servers](https://about.gitlab.com/solutions/high-availability/)
- More users? Consult the [reference architectures page](../administration/reference_architectures/index.md)
We recommend having at least [2GB of swap on your server](https://askubuntu.com/a/505344/310789), even if you currently have
enough available RAM. Having swap will help reduce the chance of errors occurring
......
......@@ -624,7 +624,7 @@ Here are some common pitfalls and how to overcome them:
Gitlab::Elastic::Indexer::Error: time="2020-01-23T09:13:00Z" level=fatal msg="health check timeout: no Elasticsearch node available"
```
You probably have not used either `http://` or `https://` as part of your value in the **"URL"** field of the Elasticseach Integration Menu. Please make sure you are using either `http://` or `https://` in this field as the [Elasticsearch client for Go](https://github.com/olivere/elastic) that we are using [needs the prefix for the URL to be accepted as valid](https://github.com/olivere/elastic/commit/a80af35aa41856dc2c986204e2b64eab81ccac3a).
You probably have not used either `http://` or `https://` as part of your value in the **"URL"** field of the Elasticsearch Integration Menu. Please make sure you are using either `http://` or `https://` in this field as the [Elasticsearch client for Go](https://github.com/olivere/elastic) that we are using [needs the prefix for the URL to be accepted as valid](https://github.com/olivere/elastic/commit/a80af35aa41856dc2c986204e2b64eab81ccac3a).
Once you have corrected the formatting of the URL, delete the index (via the [dedicated Rake task](#gitlab-elasticsearch-rake-tasks)) and [reindex the content of your instance](#adding-gitlabs-data-to-the-elasticsearch-index).
### Low level troubleshooting
......
......@@ -107,7 +107,7 @@ When visiting one of these views, you can now hover over a code reference to see
## Sourcegraph for GitLab.com
Sourcegraph powered code intelligence is avaialable for all public projects on GitLab.com.
Sourcegraph powered code intelligence is available for all public projects on GitLab.com.
Support for private projects is currently not available for GitLab.com;
follow the epic [&2201](https://gitlab.com/groups/gitlab-org/-/epics/2201)
......
......@@ -39,7 +39,7 @@ it by running in your terminal:
xcode-select --install
```
Click **Install** to download and install it. Alternativelly, you can install
Click **Install** to download and install it. Alternatively, you can install
the entire [XCode](https://developer.apple.com/xcode/) package through the
macOS App Store.
......
......@@ -105,7 +105,7 @@ changing how standard Markdown is used:
| Standard Markdown | Extended Markdown in GitLab |
| ------------------------------------- | ------------------------- |
| [blockquotes](#blockquotes) | [multiline blockquotes](#multiline-blockquote) |
| [blockquotes](#blockquotes) | [multi-line blockquotes](#multiline-blockquote) |
| [code blocks](#code-spans-and-blocks) | [colored code and syntax highlighting](#colored-code-and-syntax-highlighting) |
| [emphasis](#emphasis) | [multiple underscores in words](#multiple-underscores-in-words-and-mid-word-emphasis)
| [headers](#headers) | [linkable Header IDs](#header-ids-and-links) |
......@@ -353,7 +353,7 @@ However, the wrapping tags can't be mixed:
- [- deletion -}
```
If your diff includes words in `` `code` `` font, make sure to escape each bactick `` ` `` with a
If your diff includes words in `` `code` `` font, make sure to escape each backtick `` ` `` with a
backslash `\`, otherwise the diff highlight won't render correctly:
```markdown
......@@ -396,8 +396,8 @@ a^2+b^2=c^2
_Be advised that KaTeX only supports a [subset](https://katex.org/docs/supported.html) of LaTeX._
NOTE: **Note:** This also works for the asciidoctor `:stem: latexmath`. For details see
the [asciidoctor user manual](https://asciidoctor.org/docs/user-manual/#activating-stem-support).
NOTE: **Note:** This also works for the Asciidoctor `:stem: latexmath`. For details see
the [Asciidoctor user manual](https://asciidoctor.org/docs/user-manual/#activating-stem-support).
### Special GitLab references
......@@ -608,7 +608,7 @@ Quote break.
> If this is not rendered correctly, [view it in GitLab itself](https://gitlab.com/gitlab-org/gitlab/blob/master/doc/user/markdown.md#multiline-blockquote).
GFM extends the standard Markdown standard by also supporting multiline blockquotes
GFM extends the standard Markdown standard by also supporting multi-line blockquotes
fenced by `>>>`:
```markdown
......@@ -1216,7 +1216,7 @@ Some text to show that the reference links can follow later.
- This is an [inline-style link](https://www.google.com)
- This is a [link to a repository file in the same directory](index.md)
- This is a [relative link to a readme one directory higher](../README.md)
- This is a [relative link to a README one directory higher](../README.md)
- This is a [link that also has title text](https://www.google.com "This link takes you to Google!")
Using header ID anchors:
......@@ -1244,7 +1244,7 @@ will point the link to `wikis/style` only when the link is inside of a wiki Mark
#### URL auto-linking
GFM will autolink almost any URL you put into your text:
GFM will auto-link almost any URL you put into your text:
```markdown
- https://www.google.com
......
......@@ -4,7 +4,9 @@ type: concepts, howto
# Personal access tokens
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/3749) in GitLab 8.8.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/3749) in GitLab 8.8.
> - [Notifications about expiring tokens](https://gitlab.com/gitlab-org/gitlab/-/issues/3649) added in GitLab 12.6.
> - [Token lifetime limits](https://gitlab.com/gitlab-org/gitlab/-/issues/3649) added in [GitLab Ultimate](https://about.gitlab.com/pricing/) 12.6.
If you're unable to use [OAuth2](../../api/oauth2.md), you can use a personal access token to authenticate with the [GitLab API](../../api/README.md#personal-access-tokens).
......@@ -12,8 +14,13 @@ You can also use personal access tokens with Git to authenticate over HTTP or SS
Personal access tokens expire on the date you define, at midnight UTC.
- GitLab runs a check at 01:00 AM UTC every day to identify personal access tokens that will expire in under seven days. The owners of these tokens are notified by email.
- In GitLab Ultimate, administrators may [limit the lifetime of personal access tokens](../admin_area/settings/account_and_limit_settings.md#limiting-lifetime-of-personal-access-tokens-ultimate-only).
For examples of how you can use a personal access token to authenticate with the API, see the following section from our [API Docs](../../api/README.md#personal-access-tokens).
GitLab also offers [impersonation tokens](../../api/README.md#impersonation-tokens) which are created by administrators via the API. They're a great fit for automated authentication as a specific user.
## Creating a personal access token
You can create as many personal access tokens as you like from your GitLab
......
......@@ -171,7 +171,7 @@ When the changes are merged, your changes are added to the upstream repository a
the branch as per specification. After your work is merged, if you don't intend to
make any other contributions to the upstream project, you can unlink your
fork from its upstream project in the **Settings > Advanced Settings** section by
[removing the forking relashionship](../settings/index.md#removing-a-fork-relationship).
[removing the forking relationship](../settings/index.md#removing-a-fork-relationship).
For further details, [see the forking workflow documentation](../repository/forking_workflow.md).
......
......@@ -16,6 +16,15 @@ module API
end
end
def gate_key(params)
case params[:key]
when 'percentage_of_actors'
:percentage_of_actors
else
:percentage_of_time
end
end
def gate_targets(params)
Feature::Target.new(params).targets
end
......@@ -40,15 +49,22 @@ module API
end
params do
requires :value, type: String, desc: '`true` or `false` to enable/disable, an integer for percentage of time'
optional :key, type: String, desc: '`percentage_of_actors` or the default `percentage_of_time`'
optional :feature_group, type: String, desc: 'A Feature group name'
optional :user, type: String, desc: 'A GitLab username'
optional :group, type: String, desc: "A GitLab group's path, such as 'gitlab-org'"
optional :project, type: String, desc: 'A projects path, like gitlab-org/gitlab-ce'
mutually_exclusive :key, :feature_group
mutually_exclusive :key, :user
mutually_exclusive :key, :group
mutually_exclusive :key, :project
end
post ':name' do
feature = Feature.get(params[:name])
targets = gate_targets(params)
value = gate_value(params)
key = gate_key(params)
case value
when true
......@@ -64,7 +80,11 @@ module API
feature.disable
end
else
feature.enable_percentage_of_time(value)
if key == :percentage_of_actors
feature.enable_percentage_of_actors(value)
else
feature.enable_percentage_of_time(value)
end
end
present feature, with: Entities::Feature, current_user: current_user
......
......@@ -3,6 +3,6 @@
FactoryBot.define do
factory :identity do
provider { 'ldapmain' }
extern_uid { 'my-ldap-id' }
sequence(:extern_uid) { |n| "my-ldap-id-#{n}" }
end
end
......@@ -113,7 +113,7 @@ describe 'Visual tokens', :js do
describe 'add new token after editing existing token' do
before do
input_filtered_search('author:=@root assignee:=none', submit: false)
first('.tokens-container .filtered-search-token').double_click
first('.tokens-container .filtered-search-token').click
filtered_search.send_keys(' ')
end
......
......@@ -59,7 +59,7 @@ describe 'Project Graph', :js do
it 'HTML escapes branch name' do
expect(page.body).to include("Commit statistics for <strong>#{ERB::Util.html_escape(branch_name)}</strong>")
expect(page.body).not_to include(branch_name)
expect(page.find('.dropdown-toggle-text')['innerHTML']).to eq(ERB::Util.html_escape(branch_name))
end
end
......
import { mount } from '@vue/test-utils';
import PipelinesTable from '~/pipelines/components/pipelines_table.vue';
describe('Pipelines Table', () => {
let pipeline;
let wrapper;
const jsonFixtureName = 'pipelines/pipelines.json';
const defaultProps = {
pipelines: [],
autoDevopsHelpPath: 'foo',
viewType: 'root',
};
const createComponent = (props = defaultProps) => {
wrapper = mount(PipelinesTable, {
propsData: props,
});
};
const findRows = () => wrapper.findAll('.commit.gl-responsive-table-row');
preloadFixtures(jsonFixtureName);
beforeEach(() => {
const { pipelines } = getJSONFixture(jsonFixtureName);
pipeline = pipelines.find(p => p.user !== null && p.commit !== null);
createComponent();
});
afterEach(() => {
wrapper.destroy();
wrapper = null;
});
describe('table', () => {
it('should render a table', () => {
expect(wrapper.classes()).toContain('ci-table');
});
it('should render table head with correct columns', () => {
expect(wrapper.find('.table-section.js-pipeline-status').text()).toEqual('Status');
expect(wrapper.find('.table-section.js-pipeline-info').text()).toEqual('Pipeline');
expect(wrapper.find('.table-section.js-pipeline-commit').text()).toEqual('Commit');
expect(wrapper.find('.table-section.js-pipeline-stages').text()).toEqual('Stages');
});
});
describe('without data', () => {
it('should render an empty table', () => {
expect(findRows()).toHaveLength(0);
});
});
describe('with data', () => {
it('should render rows', () => {
createComponent({ pipelines: [pipeline], autoDevopsHelpPath: 'foo', viewType: 'root' });
expect(findRows()).toHaveLength(1);
});
});
});
import $ from 'jquery';
import { getAjaxUsersSelectOptions, getAjaxUsersSelectParams } from '~/users_select/utils';
const options = {
fooBar: 'baz',
activeUserId: 1,
};
describe('getAjaxUsersSelectOptions', () => {
it('returns options built from select data attributes', () => {
const $select = $('<select />', { 'data-foo-bar': 'baz', 'data-user-id': 1 });
expect(
getAjaxUsersSelectOptions($select, { fooBar: 'fooBar', activeUserId: 'user-id' }),
).toEqual(options);
});
});
describe('getAjaxUsersSelectParams', () => {
it('returns query parameters built from provided options', () => {
expect(
getAjaxUsersSelectParams(options, {
foo_bar: 'fooBar',
active_user_id: 'activeUserId',
non_existent_key: 'nonExistentKey',
}),
).toEqual({
foo_bar: 'baz',
active_user_id: 1,
non_existent_key: null,
});
});
});
......@@ -3,10 +3,91 @@
require 'spec_helper'
describe GitlabSchema.types['SnippetBlobViewer'] do
let_it_be(:snippet) { create(:personal_snippet, :repository) }
let_it_be(:blob) { snippet.repository.blob_at('HEAD', 'files/images/6049019_460s.jpg') }
it 'has the correct fields' do
expected_fields = [:type, :load_async, :too_large, :collapsed,
:render_error, :file_type, :loading_partial_name]
expect(described_class).to have_graphql_fields(*expected_fields)
end
it { expect(described_class.fields['type'].type).to be_non_null }
it { expect(described_class.fields['loadAsync'].type).to be_non_null }
it { expect(described_class.fields['collapsed'].type).to be_non_null }
it { expect(described_class.fields['tooLarge'].type).to be_non_null }
it { expect(described_class.fields['renderError'].type).not_to be_non_null }
it { expect(described_class.fields['fileType'].type).to be_non_null }
it { expect(described_class.fields['loadingPartialName'].type).to be_non_null }
shared_examples 'nil field converted to false' do
subject { GitlabSchema.execute(query, context: { current_user: snippet.author }).as_json }
before do
allow_next_instance_of(SnippetPresenter) do |instance|
allow(instance).to receive(:blob).and_return(blob)
end
end
it 'returns false' do
snippet_blob = subject.dig('data', 'snippets', 'edges')[0].dig('node', 'blob')
expect(snippet_blob['path']).to eq blob.path
expect(blob_attribute).to be_nil
expect(snippet_blob['simpleViewer'][attribute]).to eq false
end
end
describe 'collapsed' do
it_behaves_like 'nil field converted to false' do
let(:query) do
%(
query {
snippets(ids:"#{snippet.to_global_id}"){
edges {
node {
blob {
path
simpleViewer {
collapsed
}
}
}
}
}
}
)
end
let(:attribute) { 'collapsed' }
let(:blob_attribute) { blob.simple_viewer.collapsed? }
end
end
describe 'tooLarge' do
it_behaves_like 'nil field converted to false' do
let(:query) do
%(
query {
snippets(ids:"#{snippet.to_global_id}"){
edges {
node {
blob {
path
simpleViewer {
tooLarge
}
}
}
}
}
}
)
end
let(:attribute) { 'tooLarge' }
let(:blob_attribute) { blob.simple_viewer.too_large? }
end
end
end
import Vue from 'vue';
import pipelinesTableComp from '~/pipelines/components/pipelines_table.vue';
import '~/lib/utils/datetime_utility';
describe('Pipelines Table', () => {
const jsonFixtureName = 'pipelines/pipelines.json';
let pipeline;
let PipelinesTableComponent;
preloadFixtures(jsonFixtureName);
beforeEach(() => {
const { pipelines } = getJSONFixture(jsonFixtureName);
PipelinesTableComponent = Vue.extend(pipelinesTableComp);
pipeline = pipelines.find(p => p.user !== null && p.commit !== null);
});
describe('table', () => {
let component;
beforeEach(() => {
component = new PipelinesTableComponent({
propsData: {
pipelines: [],
autoDevopsHelpPath: 'foo',
viewType: 'root',
},
}).$mount();
});
afterEach(() => {
component.$destroy();
});
it('should render a table', () => {
expect(component.$el.getAttribute('class')).toContain('ci-table');
});
it('should render table head with correct columns', () => {
expect(
component.$el.querySelector('.table-section.js-pipeline-status').textContent.trim(),
).toEqual('Status');
expect(
component.$el.querySelector('.table-section.js-pipeline-info').textContent.trim(),
).toEqual('Pipeline');
expect(
component.$el.querySelector('.table-section.js-pipeline-commit').textContent.trim(),
).toEqual('Commit');
expect(
component.$el.querySelector('.table-section.js-pipeline-stages').textContent.trim(),
).toEqual('Stages');
});
});
describe('without data', () => {
it('should render an empty table', () => {
const component = new PipelinesTableComponent({
propsData: {
pipelines: [],
autoDevopsHelpPath: 'foo',
viewType: 'root',
},
}).$mount();
expect(component.$el.querySelectorAll('.commit.gl-responsive-table-row').length).toEqual(0);
});
});
describe('with data', () => {
it('should render rows', () => {
const component = new PipelinesTableComponent({
propsData: {
pipelines: [pipeline],
autoDevopsHelpPath: 'foo',
viewType: 'root',
},
}).$mount();
expect(component.$el.querySelectorAll('.commit.gl-responsive-table-row').length).toEqual(1);
});
});
});
......@@ -198,7 +198,7 @@ describe API::Features do
end
end
it 'creates a feature with the given percentage if passed an integer' do
it 'creates a feature with the given percentage of time if passed an integer' do
post api("/features/#{feature_name}", admin), params: { value: '50' }
expect(response).to have_gitlab_http_status(:created)
......@@ -210,6 +210,19 @@ describe API::Features do
{ 'key' => 'percentage_of_time', 'value' => 50 }
])
end
it 'creates a feature with the given percentage of actors if passed an integer' do
post api("/features/#{feature_name}", admin), params: { value: '50', key: 'percentage_of_actors' }
expect(response).to have_gitlab_http_status(:created)
expect(json_response).to eq(
'name' => 'my_feature',
'state' => 'conditional',
'gates' => [
{ 'key' => 'boolean', 'value' => false },
{ 'key' => 'percentage_of_actors', 'value' => 50 }
])
end
end
context 'when the feature exists' do
......@@ -298,7 +311,7 @@ describe API::Features do
end
end
context 'with a pre-existing percentage value' do
context 'with a pre-existing percentage of time value' do
before do
feature.enable_percentage_of_time(50)
end
......@@ -316,6 +329,25 @@ describe API::Features do
])
end
end
context 'with a pre-existing percentage of actors value' do
before do
feature.enable_percentage_of_actors(42)
end
it 'updates the percentage of actors if passed an integer' do
post api("/features/#{feature_name}", admin), params: { value: '74', key: 'percentage_of_actors' }
expect(response).to have_gitlab_http_status(:created)
expect(json_response).to eq(
'name' => 'my_feature',
'state' => 'conditional',
'gates' => [
{ 'key' => 'boolean', 'value' => false },
{ 'key' => 'percentage_of_actors', 'value' => 74 }
])
end
end
end
end
......
......@@ -358,7 +358,7 @@ describe 'getting an issue list for a project' do
cursored_query = query("sort: LABEL_PRIORITY_ASC, after: \"#{end_cursor}\"")
post_graphql(cursored_query, current_user: current_user)
response_data = JSON.parse(response.body)['data']['project']['issues']['edges']
response_data = Gitlab::Json.parse(response.body)['data']['project']['issues']['edges']
expect(grab_iids(response_data)).to eq [label_issue2.iid, label_issue4.iid]
end
......@@ -380,7 +380,7 @@ describe 'getting an issue list for a project' do
cursored_query = query("sort: LABEL_PRIORITY_DESC, after: \"#{end_cursor}\"")
post_graphql(cursored_query, current_user: current_user)
response_data = JSON.parse(response.body)['data']['project']['issues']['edges']
response_data = Gitlab::Json.parse(response.body)['data']['project']['issues']['edges']
expect(grab_iids(response_data)).to eq [label_issue1.iid, label_issue4.iid]
end
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册