提交 8584b7d7 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 fe25c98f
# frozen_string_literal: true
class Projects::DeploymentsController < Projects::ApplicationController
before_action :authorize_read_environment!
before_action :authorize_read_deployment!
# rubocop: disable CodeReuse/ActiveRecord
......
......@@ -29,7 +29,7 @@ module Projects
remote_mirror.ensure_remote!
# https://gitlab.com/gitlab-org/gitaly/-/issues/2670
if Feature.disabled?(:gitaly_ruby_remote_branches_ls_remote)
if Feature.disabled?(:gitaly_ruby_remote_branches_ls_remote, default_enabled: true)
repository.fetch_remote(remote_mirror.remote_name, ssh_auth: remote_mirror, no_tags: true)
end
......
......@@ -14,7 +14,11 @@ module Projects
end
def execute
repository_storage_move.start!
repository_storage_move.with_lock do
return ServiceResponse.success unless repository_storage_move.scheduled? # rubocop:disable Cop/AvoidReturnFromBlocks
repository_storage_move.start!
end
raise SameFilesystemError if same_filesystem?(repository.storage, destination_storage_name)
......
......@@ -1530,7 +1530,7 @@
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent:
:idempotent: true
:tags: []
- :name: prometheus_create_default_alerts
:feature_category: :incident_management
......
# frozen_string_literal: true
class ProjectUpdateRepositoryStorageWorker # rubocop:disable Scalability/IdempotentWorker
class ProjectUpdateRepositoryStorageWorker
include ApplicationWorker
idempotent!
feature_category :gitaly
def perform(project_id, new_repository_storage_key, repository_storage_move_id = nil)
......
---
title: Make ProjectUpdateRepositoryStorageWorker idempotent
merge_request: 35483
author:
type: fixed
......@@ -18,18 +18,18 @@ The following are GitLab upgrade validation tests we performed.
### February 2020
[Upgrade Geo multi-server installation](https://gitlab.com/gitlab-org/gitlab/-/issues/201837):
[Upgrade Geo multi-node installation](https://gitlab.com/gitlab-org/gitlab/-/issues/201837):
- Description: Tested upgrading from GitLab 12.7.5 to the latest GitLab 12.8 package in a multi-server
- Description: Tested upgrading from GitLab 12.7.5 to the latest GitLab 12.8 package in a multi-node
configuration.
- Outcome: Partial success because we did not run the looping pipeline during the demo to monitor
downtime.
### January 2020
[Upgrade Geo multi-server installation](https://gitlab.com/gitlab-org/gitlab/-/issues/200085):
[Upgrade Geo multi-node installation](https://gitlab.com/gitlab-org/gitlab/-/issues/200085):
- Description: Tested upgrading from GitLab 12.6.x to the latest GitLab 12.7 package in a multi-server
- Description: Tested upgrading from GitLab 12.6.x to the latest GitLab 12.7 package in a multi-node
configuration.
- Outcome: Upgrade test was successful.
- Follow up issues:
......@@ -37,16 +37,16 @@ The following are GitLab upgrade validation tests we performed.
- [Add more logging to Geo end-to-end tests](https://gitlab.com/gitlab-org/gitlab/-/issues/201830).
- [Excess service restarts during zero-downtime upgrade](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5047).
[Upgrade Geo multi-server installation](https://gitlab.com/gitlab-org/gitlab/-/issues/199836):
[Upgrade Geo multi-node installation](https://gitlab.com/gitlab-org/gitlab/-/issues/199836):
- Description: Tested upgrading from GitLab 12.5.7 to GitLab 12.6.6 in a multi-server configuration.
- Description: Tested upgrading from GitLab 12.5.7 to GitLab 12.6.6 in a multi-node configuration.
- Outcome: Upgrade test was successful.
- Follow up issue:
[Update documentation for zero-downtime upgrades to ensure deploy node it not in use](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5046).
[Upgrade Geo multi-server installation](https://gitlab.com/gitlab-org/gitlab/-/issues/37044):
[Upgrade Geo multi-node installation](https://gitlab.com/gitlab-org/gitlab/-/issues/37044):
- Description: Tested upgrading from GitLab 12.4.x to the latest GitLab 12.5 package in a multi-server
- Description: Tested upgrading from GitLab 12.4.x to the latest GitLab 12.5 package in a multi-node
configuration.
- Outcome: Upgrade test was successful.
- Follow up issues:
......@@ -55,17 +55,17 @@ The following are GitLab upgrade validation tests we performed.
### October 2019
[Upgrade Geo multi-server installation](https://gitlab.com/gitlab-org/gitlab/-/issues/35262):
[Upgrade Geo multi-node installation](https://gitlab.com/gitlab-org/gitlab/-/issues/35262):
- Description: Tested upgrading from GitLab 12.3.5 to GitLab 12.4.1 in a multi-server configuration.
- Description: Tested upgrading from GitLab 12.3.5 to GitLab 12.4.1 in a multi-node configuration.
- Outcome: Upgrade test was successful.
[Upgrade Geo multi-server installation](https://gitlab.com/gitlab-org/gitlab/-/issues/32437):
[Upgrade Geo multi-node installation](https://gitlab.com/gitlab-org/gitlab/-/issues/32437):
- Description: Tested upgrading from GitLab 12.2.8 to GitLab 12.3.5.
- Outcome: Upgrade test was successful.
[Upgrade Geo multi-server installation](https://gitlab.com/gitlab-org/gitlab/-/issues/32435):
[Upgrade Geo multi-node installation](https://gitlab.com/gitlab-org/gitlab/-/issues/32435):
- Description: Tested upgrading from GitLab 12.1.9 to GitLab 12.2.8.
- Outcome: Partial success due to possible misconfiguration issues.
......@@ -80,7 +80,7 @@ The following are PostgreSQL upgrade validation tests we performed.
- Description: Prior to making PostgreSQL 11 the default version of PostgreSQL in GitLab 12.10, we
tested upgrading to PostgreSQL 11 in Geo deployments on GitLab 12.9.
- Outcome: Partially successful. Issues were discovered in multi-server configurations with a separate
- Outcome: Partially successful. Issues were discovered in multi-node configurations with a separate
tracking database and concerns were raised about allowing automatic upgrades when Geo enabled.
- Follow up issues:
- [`replicate-geo-database` incorrectly tries to back up repositories](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5241).
......@@ -102,6 +102,6 @@ The following are PostgreSQL upgrade validation tests we performed.
various upgrade scenarios from GitLab 11.11.5 through to GitLab 12.1.8.
- Outcome: Multiple issues were found when upgrading and addressed in follow-up issues.
- Follow up issues:
- [`gitlab-ctl` reconfigure fails on Redis node in multi-server Geo setup](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/4706).
- [Geo multi-server upgrade from 12.0.9 to 12.1.9 does not upgrade PostgreSQL](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/4705).
- [Refresh foreign tables fails on app server in multi-server setup after upgrade to 12.1.9](https://gitlab.com/gitlab-org/gitlab/-/issues/32119).
- [`gitlab-ctl` reconfigure fails on Redis node in multi-node Geo setup](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/4706).
- [Geo multi-node upgrade from 12.0.9 to 12.1.9 does not upgrade PostgreSQL](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/4705).
- [Refresh foreign tables fails on app server in multi-node setup after upgrade to 12.1.9](https://gitlab.com/gitlab-org/gitlab/-/issues/32119).
......@@ -9,7 +9,7 @@ type: howto
> - Introduced in GitLab Enterprise Edition 8.9.
> - Using Geo in combination with
> [multi-server architectures](../../reference_architectures/index.md)
> [multi-node architectures](../../reference_architectures/index.md)
> is considered **Generally Available** (GA) in
> [GitLab Premium](https://about.gitlab.com/pricing/) 10.4.
......@@ -233,9 +233,9 @@ gitlab-ctl geo-replication-pause
gitlab-ctl geo-replication-resume
```
### Configuring Geo for multiple servers
### Configuring Geo for multiple nodes
For information on configuring Geo for multiple servers, see [Geo for multiple servers](multiple_servers.md).
For information on configuring Geo for multiple nodes, see [Geo for multiple servers](multiple_servers.md).
### Configuring Geo with Object Storage
......
......@@ -5,15 +5,15 @@ info: To determine the technical writer assigned to the Stage/Group associated w
type: howto
---
# Geo for multiple servers **(PREMIUM ONLY)**
# Geo for multiple nodes **(PREMIUM ONLY)**
This document describes a minimal reference architecture for running Geo
in a multi-server configuration. If your multi-server setup differs from the one
in a multi-node configuration. If your multi-node setup differs from the one
described, it is possible to adapt these instructions to your needs.
## Architecture overview
![Geo multi-server diagram](../../high_availability/img/geo-ha-diagram.png)
![Geo multi-node diagram](../../high_availability/img/geo-ha-diagram.png)
_[diagram source - GitLab employees only](https://docs.google.com/drawings/d/1z0VlizKiLNXVVVaERFwgsIOuEgjcUqDTWPdQYsE7Z4c/edit)_
......@@ -30,36 +30,36 @@ The only external way to access the two Geo deployments is by HTTPS at
NOTE: **Note:**
The **primary** and **secondary** Geo deployments must be able to communicate to each other over HTTPS.
## Redis and PostgreSQL for multiple servers
## Redis and PostgreSQL for multiple nodes
Geo supports:
- Redis and PostgreSQL on the **primary** node configured for multiple servers.
- Redis on **secondary** nodes configured for multiple servers.
- Redis and PostgreSQL on the **primary** node configured for multiple nodes.
- Redis on **secondary** nodes configured for multiple nodes.
NOTE: **Note:**
Support for PostgreSQL on **secondary** nodes in multi-server configuration
Support for PostgreSQL on **secondary** nodes in multi-node configuration
[is planned](https://gitlab.com/groups/gitlab-org/-/epics/2536).
Because of the additional complexity involved in setting up this configuration
for PostgreSQL and Redis, it is not covered by this Geo multi-server documentation.
for PostgreSQL and Redis, it is not covered by this Geo multi-node documentation.
For more information about setting up a multi-server PostgreSQL cluster and Redis cluster using the omnibus package see the multi-server documentation for
For more information about setting up a multi-node PostgreSQL cluster and Redis cluster using the omnibus package see the multi-node documentation for
[PostgreSQL](../../postgresql/replication_and_failover.md) and
[Redis](../../redis/replication_and_failover.md), respectively.
NOTE: **Note:**
It is possible to use cloud hosted services for PostgreSQL and Redis, but this is beyond the scope of this document.
## Prerequisites: Two working GitLab multi-server clusters
## Prerequisites: Two working GitLab multi-node clusters
One cluster will serve as the **primary** node. Use the
[GitLab multi-server documentation](../../reference_architectures/index.md) to set this up. If
[GitLab multi-node documentation](../../reference_architectures/index.md) to set this up. If
you already have a working GitLab instance that is in-use, it can be used as a
**primary**.
The second cluster will serve as the **secondary** node. Again, use the
[GitLab multi-server documentation](../../reference_architectures/index.md) to set this up.
[GitLab multi-node documentation](../../reference_architectures/index.md) to set this up.
It's a good idea to log in and test it, however, note that its data will be
wiped out as part of the process of replicating from the **primary**.
......@@ -92,8 +92,8 @@ After making these changes, [reconfigure GitLab](../../restart_gitlab.md#omnibus
NOTE: **Note:** PostgreSQL and Redis should have already been disabled on the
application servers, and connections from the application servers to those
services on the backend servers configured, during normal GitLab multi-server set up. See
multi-server configuration documentation for
services on the backend servers configured, during normal GitLab multi-node set up. See
multi-node configuration documentation for
[PostgreSQL](../../postgresql/replication_and_failover.md#configuring-the-application-nodes)
and [Redis](../../redis/replication_and_failover.md#example-configuration-for-the-gitlab-application).
......@@ -110,7 +110,7 @@ and [Redis](../../redis/replication_and_failover.md#example-configuration-for-th
## Configure a **secondary** node
A **secondary** cluster is similar to any other GitLab multi-server cluster, with two
A **secondary** cluster is similar to any other GitLab multi-node cluster, with two
major differences:
- The main PostgreSQL database is a read-only replica of the **primary** node's
......@@ -119,8 +119,8 @@ major differences:
called the "tracking database", which tracks the synchronization state of
various resources.
Therefore, we will set up the multi-server components one-by-one, and include deviations
from the normal multi-server setup. However, we highly recommend first configuring a
Therefore, we will set up the multi-node components one-by-one, and include deviations
from the normal multi-node setup. However, we highly recommend first configuring a
brand-new cluster as if it were not part of a Geo setup so that it can be
tested and verified as a working cluster. And only then should it be modified
for use as a Geo **secondary**. This helps to separate problems that are related
......@@ -128,10 +128,10 @@ and are not related to Geo setup.
### Step 1: Configure the Redis and Gitaly services on the **secondary** node
Configure the following services, again using the non-Geo multi-server
Configure the following services, again using the non-Geo multi-node
documentation:
- [Configuring Redis for GitLab](../../redis/replication_and_failover.md#example-configuration-for-the-gitlab-application) for multiple servers.
- [Configuring Redis for GitLab](../../redis/replication_and_failover.md#example-configuration-for-the-gitlab-application) for multiple nodes.
- [Gitaly](../../high_availability/gitaly.md), which will store data that is
synchronized from the **primary** node.
......@@ -142,7 +142,7 @@ recommended.
### Step 2: Configure the main read-only replica PostgreSQL database on the **secondary** node
NOTE: **Note:** The following documentation assumes the database will be run on
a single node only. Multi-server PostgreSQL on **secondary** nodes is
a single node only. Multi-node PostgreSQL on **secondary** nodes is
[not currently supported](https://gitlab.com/groups/gitlab-org/-/epics/2536).
Configure the [**secondary** database](database.md) as a read-only replica of
......@@ -282,7 +282,7 @@ application services. These services are enabled selectively in the
configuration.
Configure the application servers following
[Configuring GitLab for multiple servers](../../high_availability/gitlab.md), then make the
[Configuring GitLab for multiple nodes](../../high_availability/gitlab.md), then make the
following modifications:
1. Edit `/etc/gitlab/gitlab.rb` on each application server in the **secondary**
......@@ -370,13 +370,13 @@ application servers.
In this topology, a load balancer is required at each geographic location to
route traffic to the application servers.
See [Load Balancer for GitLab with multiple servers](../../high_availability/load_balancer.md) for
See [Load Balancer for GitLab with multiple nodes](../../high_availability/load_balancer.md) for
more information.
### Step 6: Configure the backend application servers on the **secondary** node
The minimal reference architecture diagram above shows all application services
running together on the same machines. However, for multiple servers we
running together on the same machines. However, for multiple nodes we
[strongly recommend running all services separately](../../reference_architectures/index.md).
For example, a Sidekiq server could be configured similarly to the frontend
......
......@@ -100,13 +100,12 @@ the project repository contains Java source code and the `dependency_scanning` f
```yaml
mysec_dependency_scanning:
except:
variables:
- $DEPENDENCY_SCANNING_DISABLED
only:
variables:
- $GITLAB_FEATURES =~ /\bdependency_scanning\b/ &&
$CI_PROJECT_REPOSITORY_LANGUAGES =~ /\bjava\b/
rules:
- if: $DEPENDENCY_SCANNING_DISABLED
when: never
- if: $GITLAB_FEATURES =~ /\bdependency_scanning\b/
exists:
- '**/*.java'
```
Any additional job policy should only be configured by users based on their needs.
......
......@@ -71,7 +71,7 @@ can lead to confusion during deployments.
> - Introduced in GitLab 10.2 for project-level clusters.
> - Introduced in GitLab 11.6 for group-level clusters.
> - A local Tiller option was introduced in GitLab 13.2 behind a feature flag, disabled by default.
> - A local Tiller option was [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/209736) in GitLab 13.2 behind a feature flag, disabled by default.
> - The feature flag for local Tiller is enabled on GitLab.com.
[Helm](https://helm.sh/docs/) is a package manager for Kubernetes and is
......
......@@ -18,55 +18,70 @@ module Gitlab
def topology_usage_data
topology_data, duration = measure_duration do
alt_usage_data(fallback: {}) do
{
nodes: topology_node_data
}.compact
end
alt_usage_data(fallback: {}) { topology_fetch_all_data }
end
{ topology: topology_data.merge(duration_s: duration) }
end
private
def topology_node_data
with_prometheus_client do |client|
# node-level data
by_instance_mem = topology_node_memory(client)
by_instance_cpus = topology_node_cpus(client)
# service-level data
by_instance_by_job_by_metric_memory = topology_all_service_memory(client)
by_instance_by_job_process_count = topology_all_service_process_count(client)
instances = Set.new(by_instance_mem.keys + by_instance_cpus.keys)
instances.map do |instance|
{
node_memory_total_bytes: by_instance_mem[instance],
node_cpus: by_instance_cpus[instance],
node_services:
topology_node_services(instance, by_instance_by_job_process_count, by_instance_by_job_by_metric_memory)
}.compact
end
def topology_fetch_all_data
with_prometheus_client(fallback: {}) do |client|
{
nodes: topology_node_data(client)
}
end
end
def topology_node_data(client)
# node-level data
by_instance_mem = topology_node_memory(client)
by_instance_cpus = topology_node_cpus(client)
# service-level data
by_instance_by_job_by_type_memory = topology_all_service_memory(client)
by_instance_by_job_process_count = topology_all_service_process_count(client)
instances = Set.new(by_instance_mem.keys + by_instance_cpus.keys)
instances.map do |instance|
{
node_memory_total_bytes: by_instance_mem[instance],
node_cpus: by_instance_cpus[instance],
node_services:
topology_node_services(instance, by_instance_by_job_process_count, by_instance_by_job_by_type_memory)
}.compact
end
end
def topology_node_memory(client)
aggregate_single(client, 'avg (node_memory_MemTotal_bytes) by (instance)')
aggregate_by_instance(client, 'gitlab_usage_ping:node_memory_total_bytes:avg')
end
def topology_node_cpus(client)
aggregate_single(client, 'count (node_cpu_seconds_total{mode="idle"}) by (instance)')
aggregate_by_instance(client, 'gitlab_usage_ping:node_cpus:count')
end
def topology_all_service_memory(client)
aggregate_many(
client,
'avg ({__name__ =~ "(ruby_){0,1}process_(resident|unique|proportional)_memory_bytes", job != "gitlab_exporter_process"}) by (instance, job, __name__)'
)
{
rss: topology_service_memory_rss(client),
uss: topology_service_memory_uss(client),
pss: topology_service_memory_pss(client)
}
end
def topology_service_memory_rss(client)
aggregate_by_labels(client, 'gitlab_usage_ping:node_service_process_resident_memory_bytes:avg')
end
def topology_service_memory_uss(client)
aggregate_by_labels(client, 'gitlab_usage_ping:node_service_process_unique_memory_bytes:avg')
end
def topology_service_memory_pss(client)
aggregate_by_labels(client, 'gitlab_usage_ping:node_service_process_proportional_memory_bytes:avg')
end
def topology_all_service_process_count(client)
aggregate_many(client, 'count ({__name__ =~ "(ruby_){0,1}process_start_time_seconds", job != "gitlab_exporter_process"}) by (instance, job)')
aggregate_by_labels(client, 'gitlab_usage_ping:node_service_process:count')
end
def topology_node_services(instance, all_process_counts, all_process_memory)
......@@ -92,24 +107,21 @@ module Gitlab
end
end
def topology_instance_service_memory(instance, all_instance_data)
topology_data_for_instance(instance, all_instance_data).each_with_object({}) do |entry, hash|
metric, memory = entry
job = metric['job']
key =
case metric['__name__']
when match_process_memory_metric_for_type('resident') then :process_memory_rss
when match_process_memory_metric_for_type('unique') then :process_memory_uss
when match_process_memory_metric_for_type('proportional') then :process_memory_pss
end
hash[job] ||= {}
hash[job][key] ||= memory
# Given a hash mapping memory set types to Prometheus response data, returns a hash
# mapping instance/node names to services and their respective memory use in bytes
def topology_instance_service_memory(instance, instance_data_by_type)
result = {}
instance_data_by_type.each do |memory_type, instance_data|
topology_data_for_instance(instance, instance_data).each do |metric, memory_bytes|
job = metric['job']
key = "process_memory_#{memory_type}".to_sym
result[job] ||= {}
result[job][key] ||= memory_bytes
end
end
end
def match_process_memory_metric_for_type(type)
/(ruby_){0,1}process_#{type}_memory_bytes/
result
end
def topology_data_for_instance(instance, all_instance_data)
......@@ -120,14 +132,17 @@ module Gitlab
instance.gsub(/:.+$/, '')
end
# Will retain a single `instance` key that values are mapped to
def aggregate_single(client, query)
client.aggregate(query) { |metric| drop_port(metric['instance']) }
def one_week_average(query)
"avg_over_time (#{query}[1w])"
end
def aggregate_by_instance(client, query)
client.aggregate(one_week_average(query)) { |metric| drop_port(metric['instance']) }
end
# Will retain a composite key that values are mapped to
def aggregate_many(client, query)
client.aggregate(query) do |metric|
def aggregate_by_labels(client, query)
client.aggregate(one_week_average(query)) do |metric|
metric['instance'] = drop_port(metric['instance'])
metric
end
......
......@@ -77,11 +77,11 @@ module Gitlab
end
end
def with_prometheus_client
if Gitlab::Prometheus::Internal.prometheus_enabled?
prometheus_address = Gitlab::Prometheus::Internal.uri
yield Gitlab::PrometheusClient.new(prometheus_address, allow_local_requests: true)
end
def with_prometheus_client(fallback: nil)
return fallback unless Gitlab::Prometheus::Internal.prometheus_enabled?
prometheus_address = Gitlab::Prometheus::Internal.uri
yield Gitlab::PrometheusClient.new(prometheus_address, allow_local_requests: true)
end
def measure_duration
......
......@@ -36,6 +36,52 @@ RSpec.describe Projects::DeploymentsController do
expect(response).to be_ok
expect(response).to match_response_schema('deployments')
end
context 'anonymous user' do
let(:anonymous_user) { create(:user) }
before do
sign_in(anonymous_user)
end
context 'project and metrics dashboard are public' do
before do
project.update!(
visibility_level: Gitlab::VisibilityLevel::PUBLIC,
project_feature_attributes: {
metrics_dashboard_access_level: Gitlab::VisibilityLevel::PUBLIC
}
)
end
it 'returns a list with deployments information' do
create(:deployment, :success, environment: environment)
get :index, params: deployment_params
expect(response).to be_ok
end
end
context 'project and metrics dashboard are private' do
before do
project.update!(
visibility_level: Gitlab::VisibilityLevel::PRIVATE,
project_feature_attributes: {
metrics_dashboard_access_level: Gitlab::VisibilityLevel::PRIVATE
}
)
end
it 'responds with not found' do
create(:deployment, :success, environment: environment)
get :index, params: deployment_params
expect(response).to be_not_found
end
end
end
end
describe 'GET #metrics' do
......
......@@ -14,5 +14,13 @@ FactoryBot.define do
trait :started do
state { ProjectRepositoryStorageMove.state_machines[:state].states[:started].value }
end
trait :finished do
state { ProjectRepositoryStorageMove.state_machines[:state].states[:finished].value }
end
trait :failed do
state { ProjectRepositoryStorageMove.state_machines[:state].states[:failed].value }
end
end
end
......@@ -19,18 +19,14 @@ RSpec.describe Gitlab::UsageDataConcerns::Topology do
expect(Gitlab::Prometheus::Internal).to receive(:uri).and_return('http://prom:9090')
end
it 'contains a topology element' do
allow_prometheus_queries
expect(subject).to have_key(:topology)
end
context 'tracking node metrics' do
it 'contains node level metrics for each instance' do
expect_prometheus_api_to(
receive_node_memory_query,
receive_node_cpu_count_query,
receive_node_service_memory_query,
receive_node_service_memory_rss_query,
receive_node_service_memory_uss_query,
receive_node_service_memory_pss_query,
receive_node_service_process_count_query
)
......@@ -82,19 +78,51 @@ RSpec.describe Gitlab::UsageDataConcerns::Topology do
expect_prometheus_api_to(
receive_node_memory_query(result: []),
receive_node_cpu_count_query,
receive_node_service_memory_query,
receive_node_service_memory_rss_query(result: []),
receive_node_service_memory_uss_query(result: []),
receive_node_service_memory_pss_query,
receive_node_service_process_count_query
)
keys = subject[:topology][:nodes].flat_map(&:keys)
expect(keys).not_to include(:node_memory_total_bytes)
expect(keys).to include(:node_cpus, :node_services)
expect(subject[:topology]).to eq({
duration_s: 0,
nodes: [
{
node_cpus: 16,
node_services: [
{
name: 'sidekiq',
process_count: 15,
process_memory_pss: 401
},
{
name: 'redis',
process_count: 1
}
]
},
{
node_cpus: 8,
node_services: [
{
name: 'web',
process_count: 10,
process_memory_pss: 302
},
{
name: 'sidekiq',
process_count: 5
}
]
}
]
})
end
end
context 'and no results are found' do
it 'does not report anything' do
expect_prometheus_api_to receive(:aggregate).at_least(:once).and_return({})
expect_prometheus_api_to receive(:query).at_least(:once).and_return({})
expect(subject[:topology]).to eq({
duration_s: 0,
......@@ -105,7 +133,7 @@ RSpec.describe Gitlab::UsageDataConcerns::Topology do
context 'and a connection error is raised' do
it 'does not report anything' do
expect_prometheus_api_to receive(:aggregate).and_raise('Connection failed')
expect_prometheus_api_to receive(:query).and_raise('Connection failed')
expect(subject[:topology]).to eq({ duration_s: 0 })
end
......@@ -123,7 +151,7 @@ RSpec.describe Gitlab::UsageDataConcerns::Topology do
def receive_node_memory_query(result: nil)
receive(:query)
.with(/node_memory_MemTotal_bytes/, an_instance_of(Hash))
.with(/node_memory_total_bytes/, an_instance_of(Hash))
.and_return(result || [
{
'metric' => { 'instance' => 'instance1:8080' },
......@@ -138,7 +166,7 @@ RSpec.describe Gitlab::UsageDataConcerns::Topology do
def receive_node_cpu_count_query(result: nil)
receive(:query)
.with(/node_cpu_seconds_total/, an_instance_of(Hash))
.with(/node_cpus/, an_instance_of(Hash))
.and_return(result || [
{
'metric' => { 'instance' => 'instance2:8090' },
......@@ -151,46 +179,59 @@ RSpec.describe Gitlab::UsageDataConcerns::Topology do
])
end
def receive_node_service_memory_query(result: nil)
def receive_node_service_memory_rss_query(result: nil)
receive(:query)
.with(/process_.+_memory_bytes/, an_instance_of(Hash))
.with(/process_resident_memory_bytes/, an_instance_of(Hash))
.and_return(result || [
# instance 1: runs Puma + a small Sidekiq
{
'metric' => { 'instance' => 'instance1:8080', 'job' => 'gitlab-rails', '__name__' => 'ruby_process_resident_memory_bytes' },
'metric' => { 'instance' => 'instance1:8080', 'job' => 'gitlab-rails' },
'value' => [1000, '300']
},
{
'metric' => { 'instance' => 'instance1:8080', 'job' => 'gitlab-rails', '__name__' => 'ruby_process_unique_memory_bytes' },
'value' => [1000, '301']
},
{
'metric' => { 'instance' => 'instance1:8080', 'job' => 'gitlab-rails', '__name__' => 'ruby_process_proportional_memory_bytes' },
'value' => [1000, '302']
},
{
'metric' => { 'instance' => 'instance1:8090', 'job' => 'gitlab-sidekiq', '__name__' => 'ruby_process_resident_memory_bytes' },
'metric' => { 'instance' => 'instance1:8090', 'job' => 'gitlab-sidekiq' },
'value' => [1000, '303']
},
# instance 2: runs a dedicated Sidekiq + Redis (which uses a different metric name)
{
'metric' => { 'instance' => 'instance2:8090', 'job' => 'gitlab-sidekiq', '__name__' => 'ruby_process_resident_memory_bytes' },
'metric' => { 'instance' => 'instance2:8090', 'job' => 'gitlab-sidekiq' },
'value' => [1000, '400']
},
{
'metric' => { 'instance' => 'instance2:8090', 'job' => 'gitlab-sidekiq', '__name__' => 'ruby_process_proportional_memory_bytes' },
'value' => [1000, '401']
'metric' => { 'instance' => 'instance2:9121', 'job' => 'redis' },
'value' => [1000, '402']
}
])
end
def receive_node_service_memory_uss_query(result: nil)
receive(:query)
.with(/process_unique_memory_bytes/, an_instance_of(Hash))
.and_return(result || [
{
'metric' => { 'instance' => 'instance1:8080', 'job' => 'gitlab-rails' },
'value' => [1000, '301']
}
])
end
def receive_node_service_memory_pss_query(result: nil)
receive(:query)
.with(/process_proportional_memory_bytes/, an_instance_of(Hash))
.and_return(result || [
{
'metric' => { 'instance' => 'instance1:8080', 'job' => 'gitlab-rails' },
'value' => [1000, '302']
},
{
'metric' => { 'instance' => 'instance2:9121', 'job' => 'redis', '__name__' => 'process_resident_memory_bytes' },
'value' => [1000, '402']
'metric' => { 'instance' => 'instance2:8090', 'job' => 'gitlab-sidekiq' },
'value' => [1000, '401']
}
])
end
def receive_node_service_process_count_query(result: nil)
receive(:query)
.with(/process_start_time_seconds/, an_instance_of(Hash))
.with(/service_process:count/, an_instance_of(Hash))
.and_return(result || [
# instance 1
{
......
......@@ -88,13 +88,21 @@ RSpec.describe Gitlab::Utils::UsageData do
end
context 'when Prometheus is disabled' do
it 'returns nil' do
before do
expect(Gitlab::Prometheus::Internal).to receive(:prometheus_enabled?).and_return(false)
end
it 'returns nil by default' do
result = described_class.with_prometheus_client { |client| client }
expect(result).to be nil
end
it 'returns fallback if provided' do
result = described_class.with_prometheus_client(fallback: []) { |client| client }
expect(result).to eq([])
end
end
end
......
......@@ -327,7 +327,7 @@ RSpec.describe Projects::ForkService do
destination_storage_name: 'test_second_storage'
)
Projects::UpdateRepositoryStorageService.new(storage_move).execute
fork_after_move = fork_project(project)
fork_after_move = fork_project(project.reload)
pool_repository_before_move = PoolRepository.joins(:shard)
.find_by(source_project: project, shards: { name: 'default' })
pool_repository_after_move = PoolRepository.joins(:shard)
......
......@@ -45,6 +45,7 @@ RSpec.describe Projects::UpdateRepositoryStorageService do
.and_call_original
result = subject.execute
project.reload
expect(result).to be_success
expect(project).not_to be_repository_read_only
......@@ -115,12 +116,37 @@ RSpec.describe Projects::UpdateRepositoryStorageService do
.and_return(checksum)
result = subject.execute
project.reload
expect(result).to be_success
expect(project.repository_storage).to eq('test_second_storage')
expect(project.reload_pool_repository).to be_nil
end
end
context 'when the repository move is finished' do
let(:repository_storage_move) { create(:project_repository_storage_move, :finished, project: project, destination_storage_name: destination) }
it 'is idempotent' do
expect do
result = subject.execute
expect(result).to be_success
end.not_to change(repository_storage_move, :state)
end
end
context 'when the repository move is failed' do
let(:repository_storage_move) { create(:project_repository_storage_move, :failed, project: project, destination_storage_name: destination) }
it 'is idempotent' do
expect do
result = subject.execute
expect(result).to be_success
end.not_to change(repository_storage_move, :state)
end
end
end
context 'with wiki repository' do
......
......@@ -47,6 +47,7 @@ RSpec.shared_examples 'moves repository to another storage' do |repository_type|
old_repository_path = repository.full_path
result = subject.execute
project.reload
expect(result).to be_success
expect(project).not_to be_repository_read_only
......
# frozen_string_literal: true
require 'spec_helper'
require 'securerandom'
RSpec.describe ProjectUpdateRepositoryStorageWorker do
let(:project) { create(:project, :repository) }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册