提交 a8991287 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 645d157d
......@@ -18,7 +18,7 @@ const popoverStates = {
suggest_commit_first_project_gitlab_ci_yml: {
title: s__(`suggestPipeline|2/2: Commit your changes`),
content: s__(
`suggestPipeline|Commit the changes and your pipeline will automatically run for the first time.`,
`suggestPipeline|The template is ready! You can now commit it to create your first pipeline.`,
),
},
};
......
......@@ -50,15 +50,14 @@ function backOffRequest(makeRequestCallback) {
}, PROMETHEUS_TIMEOUT);
}
function getPrometheusMetricResult(prometheusEndpoint, params) {
function getPrometheusQueryData(prometheusEndpoint, params) {
return backOffRequest(() => axios.get(prometheusEndpoint, { params }))
.then(res => res.data)
.then(response => {
if (response.status === 'error') {
throw new Error(response.error);
}
return response.data.result;
return response.data;
});
}
......@@ -229,9 +228,9 @@ export const fetchPrometheusMetric = (
commit(types.REQUEST_METRIC_RESULT, { metricId: metric.metricId });
return getPrometheusMetricResult(metric.prometheusEndpointPath, queryParams)
.then(result => {
commit(types.RECEIVE_METRIC_RESULT_SUCCESS, { metricId: metric.metricId, result });
return getPrometheusQueryData(metric.prometheusEndpointPath, queryParams)
.then(data => {
commit(types.RECEIVE_METRIC_RESULT_SUCCESS, { metricId: metric.metricId, data });
})
.catch(error => {
Sentry.captureException(error);
......
import Vue from 'vue';
import { pick } from 'lodash';
import * as types from './mutation_types';
import { mapToDashboardViewModel, normalizeQueryResult } from './utils';
import { mapToDashboardViewModel, normalizeQueryResponseData } from './utils';
import { BACKOFF_TIMEOUT } from '../../lib/utils/common_utils';
import { endpointKeys, initialStateKeys, metricStates } from '../constants';
import httpStatusCodes from '~/lib/utils/http_status';
......@@ -135,19 +135,19 @@ export default {
metric.state = metricStates.LOADING;
}
},
[types.RECEIVE_METRIC_RESULT_SUCCESS](state, { metricId, result }) {
[types.RECEIVE_METRIC_RESULT_SUCCESS](state, { metricId, data }) {
const metric = findMetricInDashboard(metricId, state.dashboard);
metric.loading = false;
state.showEmptyState = false;
if (!result || result.length === 0) {
state.showEmptyState = false;
if (!data.result || data.result.length === 0) {
metric.state = metricStates.NO_DATA;
metric.result = null;
} else {
const normalizedResults = result.map(normalizeQueryResult);
const result = normalizeQueryResponseData(data);
metric.state = metricStates.OK;
metric.result = Object.freeze(normalizedResults);
metric.result = Object.freeze(result);
}
},
[types.RECEIVE_METRIC_RESULT_FAILURE](state, { metricId, error }) {
......
......@@ -295,9 +295,87 @@ export const mapToDashboardViewModel = ({
};
};
// Prometheus Results Parsing
const dateTimeFromUnixTime = unixTime => new Date(unixTime * 1000).toISOString();
const mapScalarValue = ([unixTime, value]) => [dateTimeFromUnixTime(unixTime), Number(value)];
// Note: `string` value type is unused as of prometheus 2.19.
const mapStringValue = ([unixTime, value]) => [dateTimeFromUnixTime(unixTime), value];
/**
* Processes a scalar result.
*
* The corresponding result property has the following format:
*
* [ <unix_time>, "<scalar_value>" ]
*
* @param {array} result
* @returns {array}
*/
const normalizeScalarResult = result => [
{
metric: {},
value: mapScalarValue(result),
values: [mapScalarValue(result)],
},
];
/**
* Processes a string result.
*
* The corresponding result property has the following format:
*
* [ <unix_time>, "<string_value>" ]
*
* Note: This value type is unused as of prometheus 2.19.
*
* @param {array} result
* @returns {array}
*/
const normalizeStringResult = result => [
{
metric: {},
value: mapStringValue(result),
values: [mapStringValue(result)],
},
];
/**
* Processes a single Range vector, part of the result
* of type `matrix` in the form:
* Proccesses an instant vector.
*
* Instant vectors are returned as result type `vector`.
*
* The corresponding result property has the following format:
*
* [
* {
* "metric": { "<label_name>": "<label_value>", ... },
* "value": [ <unix_time>, "<sample_value>" ]
* },
* ...
* ]
*
* This method also adds the matrix version of the vector
* by introducing a `values` array with a single element. This
* allows charts to default to `values` if needed.
*
* @param {array} result
* @returns {array}
*/
const normalizeVectorResult = result =>
result.map(({ metric, value }) => {
const scalar = mapScalarValue(value);
// Add a single element to `values`, to support matrix
// style charts.
return { metric, value: scalar, values: [scalar] };
});
/**
* Range vectors are returned as result type matrix.
*
* The corresponding result property has the following format:
*
* {
* "metric": { "<label_name>": "<label_value>", ... },
......@@ -306,32 +384,45 @@ export const mapToDashboardViewModel = ({
*
* See https://prometheus.io/docs/prometheus/latest/querying/api/#range-vectors
*
* @param {*} timeSeries
* @param {array} result
* @returns {array}
*/
export const normalizeQueryResult = timeSeries => {
let normalizedResult = {};
const normalizeResultMatrix = result =>
result.map(({ metric, values }) => ({ metric, values: values.map(mapScalarValue) }));
if (timeSeries.values) {
normalizedResult = {
...timeSeries,
values: timeSeries.values.map(([timestamp, value]) => [
new Date(timestamp * 1000).toISOString(),
Number(value),
]),
};
// Check result for empty data
normalizedResult.values = normalizedResult.values.filter(series => {
const hasValue = d => !Number.isNaN(d[1]) && (d[1] !== null || d[1] !== undefined);
return series.find(hasValue);
});
} else if (timeSeries.value) {
normalizedResult = {
...timeSeries,
value: [new Date(timeSeries.value[0] * 1000).toISOString(), Number(timeSeries.value[1])],
};
/**
* Parse response data from a Prometheus Query that comes
* in the format:
*
* {
* "resultType": "matrix" | "vector" | "scalar" | "string",
* "result": <value>
* }
*
* @see https://prometheus.io/docs/prometheus/latest/querying/api/#expression-query-result-formats
*
* @param {object} data - Data containing results and result type.
* @returns {object} - A result array of metric results:
* [
* {
* metric: { ... },
* value: ['2015-07-01T20:10:51.781Z', '1'],
* values: [['2015-07-01T20:10:51.781Z', '1'] , ... ],
* },
* ...
* ]
*
*/
export const normalizeQueryResponseData = data => {
const { resultType, result } = data;
if (resultType === 'vector') {
return normalizeVectorResult(result);
} else if (resultType === 'scalar') {
return normalizeScalarResult(result);
} else if (resultType === 'string') {
return normalizeStringResult(result);
}
return normalizedResult;
return normalizeResultMatrix(result);
};
/**
......
......@@ -14,11 +14,13 @@ export default {
} else {
this.source = legacyEntry.parentNode;
this.$el.appendChild(legacyEntry);
legacyEntry.classList.add('active');
}
},
beforeDestroy() {
if (this.source) {
this.$el.firstChild.classList.remove('active');
this.source.appendChild(this.$el.firstChild);
}
},
......
......@@ -30,12 +30,16 @@ module Mutations
description: 'The visibility level of the snippet',
required: false
argument :files, [Types::Snippets::FileInputType],
description: 'The snippet files to update',
required: false
def resolve(args)
snippet = authorized_find!(id: args.delete(:id))
result = ::Snippets::UpdateService.new(snippet.project,
context[:current_user],
args).execute(snippet)
context[:current_user],
update_params(args)).execute(snippet)
snippet = result.payload[:snippet]
{
......@@ -47,7 +51,15 @@ module Mutations
private
def ability_name
"update"
'update'
end
def update_params(args)
args.tap do |update_args|
# We need to rename `files` into `snippet_files` because
# it's the expected key param
update_args[:snippet_files] = update_args.delete(:files)&.map(&:to_h)
end
end
end
end
......
......@@ -2,7 +2,7 @@
- issue_votes = @issuable_meta_data[issuable.id]
- upvotes, downvotes = issue_votes.upvotes, issue_votes.downvotes
- issuable_path = issuable_path(issuable, anchor: 'notes')
- issuable_mr = @issuable_meta_data[issuable.id].merge_requests_count(current_user)
- issuable_mr = @issuable_meta_data[issuable.id].merge_requests_count
- if issuable_mr > 0
%li.issuable-mr.d-none.d-sm-block.has-tooltip{ title: _('Related merge requests') }
......
---
title: Add Falco to the managed cluster apps template
merge_request: 32779
author:
type: added
---
title: Add files argument to snippet update mutation
merge_request: 34514
author:
type: changed
---
title: Send fixed pipeline notification by default
merge_request: 34589
author:
type: added
......@@ -284,6 +284,10 @@ NOTE: **Note:** Set the limit to `0` to disable it.
See the [documentation on Snippets settings](snippets/index.md).
## Design Management limits
See the [Design Management Limitations](../user/project/issues/design_management.md#limitations) section.
## Push Event Limits
### Webhooks and Project Services
......
......@@ -11715,6 +11715,41 @@ type SnippetEdge {
node: Snippet
}
"""
Type of a snippet file input action
"""
enum SnippetFileInputActionEnum {
create
delete
move
update
}
"""
Represents an action to perform over a snippet file
"""
input SnippetFileInputType {
"""
Type of input action
"""
action: SnippetFileInputActionEnum!
"""
Snippet file content
"""
content: String
"""
Path of the snippet file
"""
filePath: String!
"""
Previous path of the snippet file
"""
previousPath: String
}
type SnippetPermissions {
"""
Indicates the user can perform `admin_snippet` on this resource
......@@ -12908,6 +12943,11 @@ input UpdateSnippetInput {
"""
fileName: String
"""
The snippet files to update
"""
files: [SnippetFileInputType!]
"""
The global id of the snippet to update
"""
......
......@@ -34577,6 +34577,100 @@
"enumValues": null,
"possibleTypes": null
},
{
"kind": "ENUM",
"name": "SnippetFileInputActionEnum",
"description": "Type of a snippet file input action",
"fields": null,
"inputFields": null,
"interfaces": null,
"enumValues": [
{
"name": "create",
"description": null,
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "update",
"description": null,
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "delete",
"description": null,
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "move",
"description": null,
"isDeprecated": false,
"deprecationReason": null
}
],
"possibleTypes": null
},
{
"kind": "INPUT_OBJECT",
"name": "SnippetFileInputType",
"description": "Represents an action to perform over a snippet file",
"fields": null,
"inputFields": [
{
"name": "action",
"description": "Type of input action",
"type": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "ENUM",
"name": "SnippetFileInputActionEnum",
"ofType": null
}
},
"defaultValue": null
},
{
"name": "previousPath",
"description": "Previous path of the snippet file",
"type": {
"kind": "SCALAR",
"name": "String",
"ofType": null
},
"defaultValue": null
},
{
"name": "filePath",
"description": "Path of the snippet file",
"type": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "SCALAR",
"name": "String",
"ofType": null
}
},
"defaultValue": null
},
{
"name": "content",
"description": "Snippet file content",
"type": {
"kind": "SCALAR",
"name": "String",
"ofType": null
},
"defaultValue": null
}
],
"interfaces": null,
"enumValues": null,
"possibleTypes": null
},
{
"kind": "OBJECT",
"name": "SnippetPermissions",
......@@ -38099,6 +38193,24 @@
},
"defaultValue": null
},
{
"name": "files",
"description": "The snippet files to update",
"type": {
"kind": "LIST",
"name": null,
"ofType": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "INPUT_OBJECT",
"name": "SnippetFileInputType",
"ofType": null
}
}
},
"defaultValue": null
},
{
"name": "clientMutationId",
"description": "A unique identifier for the client performing the mutation.",
......@@ -609,6 +609,7 @@ Supported applications:
- [Sentry](#install-sentry-using-gitlab-cicd)
- [GitLab Runner](#install-gitlab-runner-using-gitlab-cicd)
- [Cilium](#install-cilium-using-gitlab-cicd)
- [Falco](#install-falco-using-gitlab-cicd)
- [Vault](#install-vault-using-gitlab-cicd)
- [JupyterHub](#install-jupyterhub-using-gitlab-cicd)
- [Elastic Stack](#install-elastic-stack-using-gitlab-cicd)
......@@ -986,6 +987,93 @@ metrics:
- 'flow:sourceContext=namespace;destinationContext=namespace'
```
### Install Falco using GitLab CI/CD
> [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/merge_requests/91) in GitLab 13.1.
GitLab Container Host Security Monitoring uses [Falco](https://falco.org/)
as a runtime security tool that listens to the Linux kernel using eBPF. Falco parses system calls
and asserts the stream against a configurable rules engine in real-time. For more information, see
[Falco's Documentation](https://falco.org/docs/).
You can enable Falco in the
`.gitlab/managed-apps/config.yaml` file:
```yaml
falco:
installed: true
```
You can customize Falco's Helm variables by defining the
`.gitlab/managed-apps/falco/values.yaml` file in your cluster
management project. Refer to the
[Falco chart](https://github.com/helm/charts/blob/master/stable/falco/)
for the available configuration options.
CAUTION: **Caution:**
By default eBPF support is enabled and Falco will use an [eBPF probe](https://falco.org/docs/event-sources/drivers/#using-the-ebpf-probe) to pass system calls to userspace.
If your cluster doesn't support this, you can configure it to use Falco kernel module instead by adding the following to `.gitlab/managed-apps/falco/values.yaml`:
```yaml
ebpf:
enabled: false
```
In rare cases where automatic probe installation on your cluster isn't possible and the kernel/probe
isn't precompiled, you may need to manually prepare the kernel module or eBPF probe with
[driverkit](https://github.com/falcosecurity/driverkit#against-a-kubernetes-cluster)
and install it on each cluster node.
By default, Falco is deployed with a limited set of rules. To add more rules, add the following to
`.gitlab/managed-apps/falco/values.yaml` (you can get examples from
[Cloud Native Security Hub](https://securityhub.dev/)):
```yaml
customRules:
file-integrity.yaml: |-
- rule: Detect New File
desc: detect new file created
condition: >
evt.type = chmod or evt.type = fchmod
output: >
File below a known directory opened for writing (user=%user.name
command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2])
priority: ERROR
tags: [filesystem]
- rule: Detect New Directory
desc: detect new directory created
condition: >
mkdir
output: >
File below a known directory opened for writing (user=%user.name
command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2])
priority: ERROR
tags: [filesystem]
```
By default, Falco only outputs security events to logs as JSON objects. To set it to output to an
[external API](https://falco.org/docs/alerts#https-output-send-alerts-to-an-https-end-point)
or [application](https://falco.org/docs/alerts#program-output),
add the following to `.gitlab/managed-apps/falco/values.yaml`:
```yaml
falco:
programOutput:
enabled: true
keepAlive: false
program: mail -s "Falco Notification" someone@example.com
httpOutput:
enabled: true
url: http://some.url
```
You can check these logs with the following command:
```shell
kubectl logs -l app=falco -n gitlab-managed-apps
```
### Install Vault using GitLab CI/CD
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/9982) in GitLab 12.9.
......
......@@ -187,7 +187,7 @@ To minimize the number of notifications that do not require any action, from [Gi
| Remove milestone merge request | Subscribers, participants mentioned, and Custom notification level with this event selected |
| New comment | The above, plus anyone mentioned by `@username` in the comment, with notification level "Mention" or higher |
| Failed pipeline | The author of the pipeline |
| Fixed pipeline | The author of the pipeline. Disabled by default. To activate it you must [enable the `ci_pipeline_fixed_notifications` feature flag](../../development/feature_flags/development.md#enabling-a-feature-flag-in-development). |
| Fixed pipeline | The author of the pipeline. Enabled by default. |
| Successful pipeline | The author of the pipeline, if they have the custom notification setting for successful pipelines set. If the pipeline failed previously, a `Fixed pipeline` message will be sent for the first successful pipeline after the failure, then a `Successful pipeline` message for any further successful pipelines. |
| New epic **(ULTIMATE)** | |
| Close epic **(ULTIMATE)** | |
......
---
type: reference
---
# Code Intelligence
> [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/1576) in GitLab 13.1.
Code Intelligence adds code navigation features common to interactive
development environments (IDE), including:
- Type signatures and symbol documentation.
- Go-to definition
Code Intelligence is built into GitLab and powered by [LSIF](https://lsif.dev/)
(Language Server Index Format), a file format for precomputed code
intelligence data.
## Configuration
Enable code intelligence for a project by adding a GitLab CI/CD job to the project's
`.gitlab-ci.yml` which will generate the LSIF artifact:
```yaml
code_navigation:
script:
- go get github.com/sourcegraph/lsif-go/cmd/lsif-go
- lsif-go
artifacts:
reports:
lsif: dump.lsif
```
The generated LSIF file must be less than 170MiB.
After the job succeeds, code intelligence data can be viewed while browsing the code:
![Code intelligence](img/code_intelligence_v13_1.png)
## Language support
Generating an LSIF file requires a language server indexer implementation for the
relevant language.
| Language | Implementation |
|---|---|
| Go | [sourcegraph/lsif-go](https://github.com/sourcegraph/lsif-go) |
| JavaScript | [sourcegraph/lsif-node](https://github.com/sourcegraph/lsif-node) |
| TypeScript | [sourcegraph/lsif-node](https://github.com/sourcegraph/lsif-node) |
View a complete list of [available LSIF indexers](https://lsif.dev/#implementations-server) on their website and
refer to their documentation to see how to generate an LSIF file for your specific language.
......@@ -104,6 +104,7 @@ When you create a project in GitLab, you'll have access to a large number of
- [Dependency List](../application_security/dependency_list/index.md): view project dependencies. **(ULTIMATE)**
- [Requirements](requirements/index.md): Requirements allow you to create criteria to check your products against. **(ULTIMATE)**
- [Static Site Editor](static_site_editor/index.md): quickly edit content on static websites without prior knowledge of the codebase or Git commands.
- [Code Intelligence](code_intelligence.md): code navigation features.
### Project integrations
......
......@@ -30,9 +30,11 @@ to be enabled:
project level, navigate to your project's **Settings > General**, expand **Visibility, project features, permissions**
and enable **Git Large File Storage**.
Design Management requires that projects are using
[hashed storage](../../../administration/repository_storage_types.md#hashed-storage)
(the default storage type since v10.0).
Design Management also requires that projects are using
[hashed storage](../../../administration/raketasks/storage.md#migrate-to-hashed-storage). Since
GitLab 10.0, newly created projects use hashed storage by default. A GitLab admin can verify the storage type of a
project by navigating to **Admin Area > Projects** and then selecting the project in question.
A project can be identified as hashed-stored if its *Gitaly relative path* contains `@hashed`.
If the requirements are not met, the **Designs** tab displays a message to the user.
......@@ -47,6 +49,7 @@ and [PDFs](https://gitlab.com/gitlab-org/gitlab/-/issues/32811) is planned for a
## Limitations
- Design uploads are limited to 10 files at a time.
- From GitLab 13.1, Design filenames are limited to 255 characters.
- Design Management data
[isn't deleted when a project is destroyed](https://gitlab.com/gitlab-org/gitlab/-/issues/13429) yet.
- Design Management data [won't be moved](https://gitlab.com/gitlab-org/gitlab/-/issues/13426)
......
......@@ -11,7 +11,12 @@ module API
# Avoids an N+1 query when metadata is included
def issuable_metadata(subject, options, method, args = nil)
cached_subject = options.dig(:issuable_metadata, subject.id)
(cached_subject || subject).public_send(method, *args) # rubocop: disable GitlabSecurity/PublicSend
if cached_subject
cached_subject[method]
else
subject.public_send(method, *args) # rubocop: disable GitlabSecurity/PublicSend
end
end
end
end
......
......@@ -23,7 +23,7 @@ module Gitlab
end
def self.pipeline_fixed_notifications?
::Feature.enabled?(:ci_pipeline_fixed_notifications)
::Feature.enabled?(:ci_pipeline_fixed_notifications, default_enabled: true)
end
def self.instance_variables_ui_enabled?
......
......@@ -7,11 +7,13 @@ module Gitlab
# data structure to store issuable meta data like
# upvotes, downvotes, notes and closing merge requests counts for issues and merge requests
# this avoiding n+1 queries when loading issuable collections on frontend
IssuableMeta = Struct.new(:upvotes, :downvotes, :user_notes_count, :mrs_count) do
def merge_requests_count(user = nil)
mrs_count
end
end
IssuableMeta = Struct.new(
:upvotes,
:downvotes,
:user_notes_count,
:merge_requests_count,
:blocking_issues_count # EE-ONLY
)
attr_reader :current_user, :issuable_collection
......@@ -95,3 +97,5 @@ module Gitlab
end
end
end
Gitlab::IssuableMetadata.prepend_if_ee('EE::Gitlab::IssuableMetadata')
......@@ -29,14 +29,15 @@ module Gitlab
def uncached_data
clear_memoized_limits
license_usage_data
.merge(system_usage_data)
.merge(features_usage_data)
.merge(components_usage_data)
.merge(cycle_analytics_usage_data)
.merge(object_store_usage_data)
.merge(topology_usage_data)
.merge(recording_ce_finish_data)
with_finished_at(:recording_ce_finished_at) do
license_usage_data
.merge(system_usage_data)
.merge(features_usage_data)
.merge(components_usage_data)
.merge(cycle_analytics_usage_data)
.merge(object_store_usage_data)
.merge(topology_usage_data)
end
end
def to_json(force_refresh: false)
......@@ -59,12 +60,6 @@ module Gitlab
Time.now
end
def recording_ce_finish_data
{
recording_ce_finished_at: Time.now
}
end
# rubocop: disable Metrics/AbcSize
# rubocop: disable CodeReuse/ActiveRecord
def system_usage_data
......
......@@ -92,6 +92,10 @@ module Gitlab
[result, duration]
end
def with_finished_at(key, &block)
yield.merge(key => Time.now)
end
private
def redis_usage_counter
......
......@@ -27648,7 +27648,7 @@ msgstr ""
msgid "suggestPipeline|2/2: Commit your changes"
msgstr ""
msgid "suggestPipeline|Commit the changes and your pipeline will automatically run for the first time."
msgid "suggestPipeline|The template is ready! You can now commit it to create your first pipeline."
msgstr ""
msgid "suggestPipeline|We recommend the %{boldStart}Code Quality%{boldEnd} template, which will add a report widget to your Merge Requests. This way you’ll learn about code quality degradations much sooner. %{footerStart} Goodbye technical debt! %{footerEnd}"
......
......@@ -15,7 +15,7 @@ import { createStore } from '~/monitoring/stores';
import { panelTypes, chartHeight } from '~/monitoring/constants';
import TimeSeries from '~/monitoring/components/charts/time_series.vue';
import * as types from '~/monitoring/stores/mutation_types';
import { deploymentData, mockProjectDir, annotationsData } from '../../mock_data';
import { deploymentData, mockProjectDir, annotationsData, metricsResult } from '../../mock_data';
import {
metricsDashboardPayload,
metricsDashboardViewModel,
......@@ -702,9 +702,7 @@ describe('Time series component', () => {
beforeEach(() => {
store = createStore();
const graphData = cloneDeep(metricsDashboardViewModel.panelGroups[0].panels[3]);
graphData.metrics.forEach(metric =>
Object.assign(metric, { result: metricResultStatus.result }),
);
graphData.metrics.forEach(metric => Object.assign(metric, { result: metricsResult }));
createWrapper({ graphData: { ...graphData, type: 'area-chart' } }, mount);
return wrapper.vm.$nextTick();
......
......@@ -14,16 +14,25 @@ export const metricsDashboardPanelCount = 22;
export const metricResultStatus = {
// First metric in fixture `metrics_dashboard/environment_metrics_dashboard.json`
metricId: 'NO_DB_response_metrics_nginx_ingress_throughput_status_code',
result: metricsResult,
data: {
resultType: 'matrix',
result: metricsResult,
},
};
export const metricResultPods = {
// Second metric in fixture `metrics_dashboard/environment_metrics_dashboard.json`
metricId: 'NO_DB_response_metrics_nginx_ingress_latency_pod_average',
result: metricsResult,
data: {
resultType: 'matrix',
result: metricsResult,
},
};
export const metricResultEmpty = {
metricId: 'NO_DB_response_metrics_nginx_ingress_16_throughput_status_code',
result: [],
data: {
resultType: 'matrix',
result: [],
},
};
// Graph data
......
......@@ -738,7 +738,7 @@ describe('Monitoring store actions', () => {
type: types.RECEIVE_METRIC_RESULT_SUCCESS,
payload: {
metricId: metric.metricId,
result: data.result,
data,
},
},
],
......@@ -775,7 +775,7 @@ describe('Monitoring store actions', () => {
type: types.RECEIVE_METRIC_RESULT_SUCCESS,
payload: {
metricId: metric.metricId,
result: data.result,
data,
},
},
],
......@@ -817,7 +817,7 @@ describe('Monitoring store actions', () => {
type: types.RECEIVE_METRIC_RESULT_SUCCESS,
payload: {
metricId: metric.metricId,
result: data.result,
data,
},
},
],
......@@ -852,7 +852,7 @@ describe('Monitoring store actions', () => {
type: types.RECEIVE_METRIC_RESULT_SUCCESS,
payload: {
metricId: metric.metricId,
result: data.result,
data,
},
},
],
......
......@@ -27,7 +27,10 @@ describe('Monitoring store Getters', () => {
const { metricId } = state.dashboard.panelGroups[group].panels[panel].metrics[metric];
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, {
metricId,
result,
data: {
resultType: 'matrix',
result,
},
});
};
......
......@@ -225,11 +225,28 @@ describe('Monitoring mutations', () => {
describe('Individual panel/metric results', () => {
const metricId = 'NO_DB_response_metrics_nginx_ingress_throughput_status_code';
const result = [
{
values: [[0, 1], [1, 1], [1, 3]],
},
];
const data = {
resultType: 'matrix',
result: [
{
metric: {
__name__: 'up',
job: 'prometheus',
instance: 'localhost:9090',
},
values: [[1435781430.781, '1'], [1435781445.781, '1'], [1435781460.781, '1']],
},
{
metric: {
__name__: 'up',
job: 'node',
instance: 'localhost:9091',
},
values: [[1435781430.781, '0'], [1435781445.781, '0'], [1435781460.781, '1']],
},
],
};
const dashboard = metricsDashboardPayload;
const getMetric = () => stateCopy.dashboard.panelGroups[1].panels[0].metrics[0];
......@@ -262,7 +279,7 @@ describe('Monitoring mutations', () => {
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](stateCopy, {
metricId,
result,
data,
});
expect(stateCopy.showEmptyState).toBe(false);
......@@ -273,10 +290,10 @@ describe('Monitoring mutations', () => {
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](stateCopy, {
metricId,
result,
data,
});
expect(getMetric().result).toHaveLength(result.length);
expect(getMetric().result).toHaveLength(data.result.length);
expect(getMetric()).toEqual(
expect.objectContaining({
loading: false,
......
......@@ -5,7 +5,7 @@ import {
parseAnnotationsResponse,
removeLeadingSlash,
mapToDashboardViewModel,
normalizeQueryResult,
normalizeQueryResponseData,
convertToGrafanaTimeRange,
addDashboardMetaDataToLink,
} from '~/monitoring/stores/utils';
......@@ -400,28 +400,6 @@ describe('mapToDashboardViewModel', () => {
});
});
describe('normalizeQueryResult', () => {
const testData = {
metric: {
__name__: 'up',
job: 'prometheus',
instance: 'localhost:9090',
},
values: [[1435781430.781, '1'], [1435781445.781, '1'], [1435781460.781, '1']],
};
it('processes a simple matrix result', () => {
expect(normalizeQueryResult(testData)).toEqual({
metric: { __name__: 'up', job: 'prometheus', instance: 'localhost:9090' },
values: [
['2015-07-01T20:10:30.781Z', 1],
['2015-07-01T20:10:45.781Z', 1],
['2015-07-01T20:11:00.781Z', 1],
],
});
});
});
describe('uniqMetricsId', () => {
[
{ input: { id: 1 }, expected: `${NOT_IN_DB_PREFIX}_1` },
......@@ -607,3 +585,118 @@ describe('user-defined links utils', () => {
});
});
});
describe('normalizeQueryResponseData', () => {
// Data examples from
// https://prometheus.io/docs/prometheus/latest/querying/api/#expression-queries
it('processes a string result', () => {
const mockScalar = {
resultType: 'string',
result: [1435781451.781, '1'],
};
expect(normalizeQueryResponseData(mockScalar)).toEqual([
{
metric: {},
value: ['2015-07-01T20:10:51.781Z', '1'],
values: [['2015-07-01T20:10:51.781Z', '1']],
},
]);
});
it('processes a scalar result', () => {
const mockScalar = {
resultType: 'scalar',
result: [1435781451.781, '1'],
};
expect(normalizeQueryResponseData(mockScalar)).toEqual([
{
metric: {},
value: ['2015-07-01T20:10:51.781Z', 1],
values: [['2015-07-01T20:10:51.781Z', 1]],
},
]);
});
it('processes a vector result', () => {
const mockVector = {
resultType: 'vector',
result: [
{
metric: {
__name__: 'up',
job: 'prometheus',
instance: 'localhost:9090',
},
value: [1435781451.781, '1'],
},
{
metric: {
__name__: 'up',
job: 'node',
instance: 'localhost:9100',
},
value: [1435781451.781, '0'],
},
],
};
expect(normalizeQueryResponseData(mockVector)).toEqual([
{
metric: { __name__: 'up', job: 'prometheus', instance: 'localhost:9090' },
value: ['2015-07-01T20:10:51.781Z', 1],
values: [['2015-07-01T20:10:51.781Z', 1]],
},
{
metric: { __name__: 'up', job: 'node', instance: 'localhost:9100' },
value: ['2015-07-01T20:10:51.781Z', 0],
values: [['2015-07-01T20:10:51.781Z', 0]],
},
]);
});
it('processes a matrix result', () => {
const mockMatrix = {
resultType: 'matrix',
result: [
{
metric: {
__name__: 'up',
job: 'prometheus',
instance: 'localhost:9090',
},
values: [[1435781430.781, '1'], [1435781445.781, '1'], [1435781460.781, '1']],
},
{
metric: {
__name__: 'up',
job: 'node',
instance: 'localhost:9091',
},
values: [[1435781430.781, '0'], [1435781445.781, '0'], [1435781460.781, '1']],
},
],
};
expect(normalizeQueryResponseData(mockMatrix)).toEqual([
{
metric: { __name__: 'up', instance: 'localhost:9090', job: 'prometheus' },
values: [
['2015-07-01T20:10:30.781Z', 1],
['2015-07-01T20:10:45.781Z', 1],
['2015-07-01T20:11:00.781Z', 1],
],
},
{
metric: { __name__: 'up', instance: 'localhost:9091', job: 'node' },
values: [
['2015-07-01T20:10:30.781Z', 0],
['2015-07-01T20:10:45.781Z', 0],
['2015-07-01T20:11:00.781Z', 1],
],
},
]);
});
});
......@@ -8,7 +8,10 @@ export const setMetricResult = ({ store, result, group = 0, panel = 0, metric =
store.commit(`monitoringDashboard/${types.RECEIVE_METRIC_RESULT_SUCCESS}`, {
metricId,
result,
data: {
resultType: 'matrix',
result,
},
});
};
......
......@@ -23,13 +23,28 @@ describe('Legacy container component', () => {
createComponent({ selector: '.dummy-target' });
});
it('moves node inside component when mounted', () => {
expect(dummy.parentNode).toBe(wrapper.element);
describe('when mounted', () => {
it('moves node inside component', () => {
expect(dummy.parentNode).toBe(wrapper.element);
});
it('sets active class', () => {
expect(dummy.classList.contains('active')).toBe(true);
});
});
it('moves node back when unmounted', () => {
wrapper.destroy();
expect(dummy.parentNode).toBe(document.body);
describe('when unmounted', () => {
beforeEach(() => {
wrapper.destroy();
});
it('moves node back', () => {
expect(dummy.parentNode).toBe(document.body);
});
it('removes active class', () => {
expect(dummy.classList.contains('active')).toBe(false);
});
});
});
......
......@@ -31,6 +31,8 @@ issues:
- closed_by
- epic_issue
- epic
- feature_flag_issues
- feature_flags
- designs
- design_versions
- description_versions
......@@ -569,6 +571,9 @@ self_managed_prometheus_alert_events:
epic_issues:
- issue
- epic
feature_flag_issues:
- issue
- feature_flag
tracing_setting:
- project
reviews:
......
......@@ -169,6 +169,10 @@ describe Gitlab::UsageData, :aggregate_failures do
expect { subject }.not_to raise_error
end
it 'includes a recording_ce_finished_at timestamp' do
expect(subject[:recording_ce_finished_at]).to be_a(Time)
end
it 'jira usage works when queries time out' do
allow_any_instance_of(ActiveRecord::Relation)
.to receive(:find_in_batches).and_raise(ActiveRecord::StatementInvalid.new(''))
......@@ -216,14 +220,6 @@ describe Gitlab::UsageData, :aggregate_failures do
end
end
describe '.recording_ce_finished_at' do
subject { described_class.recording_ce_finish_data }
it 'gathers time ce recording finishes at' do
expect(subject[:recording_ce_finished_at]).to be_a(Time)
end
end
context 'when not relying on database records' do
describe '#features_usage_data_ce' do
subject { described_class.features_usage_data_ce }
......
......@@ -108,4 +108,14 @@ describe Gitlab::Utils::UsageData do
expect(duration).to eq(2)
end
end
describe '#with_finished_at' do
it 'adds a timestamp to the hash yielded by the block' do
freeze_time do
result = described_class.with_finished_at(:current_time) { { a: 1 } }
expect(result).to eq(a: 1, current_time: Time.now)
end
end
end
end
......@@ -16,8 +16,8 @@ describe 'Updating a Snippet' do
let(:current_user) { snippet.author }
let(:snippet_gid) { GitlabSchema.id_from_object(snippet).to_s }
let(:mutation) do
variables = {
let(:mutation_vars) do
{
id: snippet_gid,
content: updated_content,
description: updated_description,
......@@ -25,8 +25,9 @@ describe 'Updating a Snippet' do
file_name: updated_file_name,
title: updated_title
}
graphql_mutation(:update_snippet, variables)
end
let(:mutation) do
graphql_mutation(:update_snippet, mutation_vars)
end
def mutation_response
......@@ -101,7 +102,6 @@ describe 'Updating a Snippet' do
end
it_behaves_like 'graphql update actions'
it_behaves_like 'when the snippet is not found'
end
......@@ -148,4 +148,40 @@ describe 'Updating a Snippet' do
it_behaves_like 'when the snippet is not found'
end
context 'when using the files params' do
let!(:snippet) { create(:personal_snippet, :private, :repository) }
let(:updated_content) { 'updated_content' }
let(:updated_file) { 'CHANGELOG' }
let(:deleted_file) { 'README' }
let(:mutation_vars) do
{
id: snippet_gid,
files: [
{ action: :update, filePath: updated_file, content: updated_content },
{ action: :delete, filePath: deleted_file }
]
}
end
it 'updates the Snippet' do
blob_to_update = blob_at(updated_file)
expect(blob_to_update.data).not_to eq updated_content
blob_to_delete = blob_at(deleted_file)
expect(blob_to_delete).to be_present
post_graphql_mutation(mutation, current_user: current_user)
blob_to_update = blob_at(updated_file)
expect(blob_to_update.data).to eq updated_content
blob_to_delete = blob_at(deleted_file)
expect(blob_to_delete).to be_nil
end
def blob_at(filename)
snippet.repository.blob_at('HEAD', filename)
end
end
end
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册