提交 693602d9 编写于 作者: P Pawel Chojnacki

Keep presentation logic in one place and remove unecessary arguments.

+ fix tests
上级 4f824d2a
......@@ -106,8 +106,7 @@ class Deployment < ActiveRecord::Base
def metrics
return {} unless has_metrics?
metrics = project.monitoring_service.deployment_metrics(self)
metrics&.merge(deployment_time: created_at.to_i) || {}
project.monitoring_service.deployment_metrics(self)
end
private
......
......@@ -63,12 +63,13 @@ class PrometheusService < MonitoringService
{ success: false, result: err }
end
def environment_metrics(environment, **args)
def environment_metrics(environment)
with_reactive_cache(Gitlab::Prometheus::Queries::EnvironmentQuery.name, environment.id, &:itself)
end
def deployment_metrics(deployment)
with_reactive_cache(Gitlab::Prometheus::Queries::DeploymentQuery.name, deployment.id, &:itself)
metrics = with_reactive_cache(Gitlab::Prometheus::Queries::DeploymentQuery.name, deployment.id, &:itself)
metrics&.merge(deployment_time: created_at.to_i) || {}
end
# Cache metrics for specific environment
......
require 'spec_helper'
describe Gitlab::Prometheus::Queries::DeploymentQuery, lib: true do
let(:environment) { create(:environment) }
let(:environment) { create(:environment, slug: 'environment-slug') }
let(:deployment) { create(:deployment, environment: environment) }
let(:client) { double('prometheus_client') }
subject { described_class.new(client) }
around do |example|
Timecop.freeze { example.run }
end
it 'sends appropriate queries to prometheus' do
start_time = (deployment.created_at - 30.minutes).to_f
stop_time = (deployment.created_at + 30.minutes).to_f
expect(client).to receive(:query_range).with('avg(container_memory_usage_bytes{container_name!="POD",environment="environment1"}) / 2^20',
expect(client).to receive(:query_range).with('avg(container_memory_usage_bytes{container_name!="POD",environment="environment-slug"}) / 2^20',
start: start_time, stop: stop_time)
expect(client).to receive(:query).with('avg(avg_over_time(container_memory_usage_bytes{container_name!="POD",environment="environment1"}[30m]))',
expect(client).to receive(:query).with('avg(avg_over_time(container_memory_usage_bytes{container_name!="POD",environment="environment-slug"}[30m]))',
time: deployment.created_at.to_f)
expect(client).to receive(:query).with('avg(avg_over_time(container_memory_usage_bytes{container_name!="POD",environment="environment1"}[30m]))',
expect(client).to receive(:query).with('avg(avg_over_time(container_memory_usage_bytes{container_name!="POD",environment="environment-slug"}[30m]))',
time: stop_time)
expect(client).to receive(:query_range).with('avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="environment1"}[2m])) * 100',
expect(client).to receive(:query_range).with('avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="environment-slug"}[2m])) * 100',
start: start_time, stop: stop_time)
expect(client).to receive(:query).with('avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="environment1"}[30m])) * 100',
expect(client).to receive(:query).with('avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="environment-slug"}[30m])) * 100',
time: deployment.created_at.to_f)
expect(client).to receive(:query).with('avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="environment1"}[30m])) * 100',
expect(client).to receive(:query).with('avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="environment-slug"}[30m])) * 100',
time: stop_time)
expect(subject.query(deployment.id)).to eq(memory_values: nil, memory_before: nil, memory_after: nil,
......
......@@ -52,7 +52,7 @@ describe Deployment, models: true do
describe '#metrics' do
let(:deployment) { create(:deployment) }
subject { deployment.metrics(1.hour) }
subject { deployment.metrics }
context 'metrics are disabled' do
it { is_expected.to eq({}) }
......@@ -63,16 +63,17 @@ describe Deployment, models: true do
{
success: true,
metrics: {},
last_update: 42
last_update: 42,
deployment_time: 1494408956
}
end
before do
allow(deployment.project).to receive_message_chain(:monitoring_service, :metrics)
allow(deployment.project).to receive_message_chain(:monitoring_service, :deployment_metrics)
.with(any_args).and_return(simple_metrics)
end
it { is_expected.to eq(simple_metrics.merge(deployment_time: deployment.created_at.utc.to_i)) }
it { is_expected.to eq(simple_metrics) }
end
end
......
......@@ -393,7 +393,7 @@ describe Environment, models: true do
it 'returns the metrics from the deployment service' do
expect(project.monitoring_service)
.to receive(:metrics).with(environment)
.to receive(:environment_metrics).with(environment)
.and_return(:fake_metrics)
is_expected.to eq(:fake_metrics)
......
......@@ -82,7 +82,7 @@ describe PrometheusService, models: true, caching: true do
end
it 'returns reactive data' do
is_expected.to eq(prometheus_data)
is_expected.to eq(prometheus_data.merge(deployment_time: deployment.created_at.to_i))
end
end
end
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册