Commit 4c286e62 authored by GitLab Bot's avatar GitLab Bot
Browse files

Add latest changes from gitlab-org/gitlab@master

parent 1234edfae0fc
......@@ -22,8 +22,7 @@ import MonitorTimeSeriesChart from './charts/time_series.vue';
import MonitorSingleStatChart from './charts/single_stat.vue';
import GraphGroup from './graph_group.vue';
import EmptyState from './empty_state.vue';
import TrackEventDirective from '~/vue_shared/directives/track_event';
import { getTimeDiff, isValidDate, downloadCSVOptions, generateLinkToChartOptions } from '../utils';
import { getTimeDiff, isValidDate } from '../utils';
export default {
components: {
......@@ -44,7 +43,6 @@ export default {
directives: {
GlModal: GlModalDirective,
GlTooltip: GlTooltipDirective,
TrackEvent: TrackEventDirective,
},
props: {
externalDashboardUrl: {
......@@ -300,8 +298,6 @@ export default {
onDateTimePickerApply(timeWindowUrlParams) {
return redirectTo(mergeUrlParams(timeWindowUrlParams, window.location.href));
},
downloadCSVOptions,
generateLinkToChartOptions,
},
addMetric: {
title: s__('Metrics|Add metric'),
......
......@@ -883,6 +883,15 @@ $ide-commit-header-height: 48px;
margin-right: $ide-tree-padding;
border-bottom: 1px solid $white-dark;
svg {
color: $gray-700;
&:focus,
&:hover {
color: $blue-600;
}
}
.ide-new-btn {
margin-left: auto;
}
......@@ -899,6 +908,11 @@ $ide-commit-header-height: 48px;
.dropdown-menu-toggle {
svg {
vertical-align: middle;
color: $gray-700;
&:hover {
color: $gray-700;
}
}
&:hover {
......
......@@ -21,16 +21,11 @@
margin-bottom: 2px;
}
.issue-labels {
.issue-labels,
.author-link {
display: inline-block;
}
.issuable-meta {
.author-link {
display: inline-block;
}
}
.icon-merge-request-unmerged {
height: 13px;
margin-bottom: 3px;
......@@ -53,16 +48,6 @@
margin-right: 15px;
}
.issues_content {
.title {
height: 40px;
}
form {
margin: 0;
}
}
form.edit-issue {
margin: 0;
}
......@@ -79,10 +64,6 @@ ul.related-merge-requests > li {
margin-left: 5px;
}
.row_title {
vertical-align: bottom;
}
gl-emoji {
font-size: 1em;
}
......@@ -93,10 +74,6 @@ ul.related-merge-requests > li {
font-weight: $gl-font-weight-bold;
}
.merge-request-id {
display: inline-block;
}
.merge-request-status {
&.merged {
color: $blue-500;
......@@ -118,11 +95,7 @@ ul.related-merge-requests > li {
border-color: $issues-today-border;
}
&.closed {
background: $gray-light;
border-color: $border-color;
}
&.closed,
&.merged {
background: $gray-light;
border-color: $border-color;
......@@ -160,9 +133,12 @@ ul.related-merge-requests > li {
padding-bottom: 37px;
}
.issues-nav-controls {
.issues-nav-controls,
.new-branch-col {
font-size: 0;
}
.issues-nav-controls {
.btn-group:empty {
display: none;
}
......@@ -198,8 +174,6 @@ ul.related-merge-requests > li {
}
.new-branch-col {
font-size: 0;
.discussion-filter-container {
&:not(:only-child) {
margin-right: $gl-padding-8;
......@@ -297,11 +271,11 @@ ul.related-merge-requests > li {
padding-top: 0;
align-self: center;
}
}
.create-mr-dropdown-wrap {
.btn-group:not(.hidden) {
display: inline-flex;
}
.create-mr-dropdown-wrap {
.btn-group:not(.hidden) {
display: inline-flex;
}
}
}
......
......@@ -114,8 +114,10 @@ def remove_fork_project_description_message(project)
source = visible_fork_source(project)
if source
_('This will remove the fork relationship between this project and %{fork_source}.') %
msg = _('This will remove the fork relationship between this project and %{fork_source}.') %
{ fork_source: link_to(source.full_name, project_path(source)) }
msg.html_safe
else
_('This will remove the fork relationship between this project and other projects in the fork network.')
end
......
......@@ -4,6 +4,7 @@ class ActiveSession
include ActiveModel::Model
SESSION_BATCH_SIZE = 200
ALLOWED_NUMBER_OF_ACTIVE_SESSIONS = 100
attr_accessor :created_at, :updated_at,
:session_id, :ip_address,
......@@ -65,21 +66,22 @@ def self.list(user)
def self.destroy(user, session_id)
Gitlab::Redis::SharedState.with do |redis|
redis.srem(lookup_key_name(user.id), session_id)
destroy_sessions(redis, user, [session_id])
end
end
deleted_keys = redis.del(key_name(user.id, session_id))
def self.destroy_sessions(redis, user, session_ids)
key_names = session_ids.map {|session_id| key_name(user.id, session_id) }
session_names = session_ids.map {|session_id| "#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id}" }
# only allow deleting the devise session if we could actually find a
# related active session. this prevents another user from deleting
# someone else's session.
if deleted_keys > 0
redis.del("#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id}")
end
end
redis.srem(lookup_key_name(user.id), session_ids)
redis.del(key_names)
redis.del(session_names)
end
def self.cleanup(user)
Gitlab::Redis::SharedState.with do |redis|
clean_up_old_sessions(redis, user)
cleaned_up_lookup_entries(redis, user)
end
end
......@@ -118,19 +120,39 @@ def self.sessions_from_ids(session_ids)
end
end
def self.raw_active_session_entries(session_ids, user_id)
def self.raw_active_session_entries(redis, session_ids, user_id)
return [] if session_ids.empty?
Gitlab::Redis::SharedState.with do |redis|
entry_keys = session_ids.map { |session_id| key_name(user_id, session_id) }
entry_keys = session_ids.map { |session_id| key_name(user_id, session_id) }
redis.mget(entry_keys)
end
redis.mget(entry_keys)
def self.active_session_entries(session_ids, user_id, redis)
return [] if session_ids.empty?
entry_keys = raw_active_session_entries(redis, session_ids, user_id)
entry_keys.map do |raw_session|
Marshal.load(raw_session) # rubocop:disable Security/MarshalLoad
end
end
def self.clean_up_old_sessions(redis, user)
session_ids = session_ids_for_user(user.id)
return if session_ids.count <= ALLOWED_NUMBER_OF_ACTIVE_SESSIONS
# remove sessions if there are more than ALLOWED_NUMBER_OF_ACTIVE_SESSIONS.
sessions = active_session_entries(session_ids, user.id, redis)
sessions.sort_by! {|session| session.updated_at }.reverse!
sessions = sessions[ALLOWED_NUMBER_OF_ACTIVE_SESSIONS..-1].map { |session| session.session_id }
destroy_sessions(redis, user, sessions)
end
def self.cleaned_up_lookup_entries(redis, user)
session_ids = session_ids_for_user(user.id)
entries = raw_active_session_entries(session_ids, user.id)
entries = raw_active_session_entries(redis, session_ids, user.id)
# remove expired keys.
# only the single key entries are automatically expired by redis, the
......
---
title: Resolve Limit the number of stored sessions per user
merge_request: 19325
author:
type: added
---
title: 'Resolve Design view: Download single issue design image'
merge_request: 20703
author:
type: added
---
title: Removed unused methods in monitoring dashboard
merge_request: 20819
author:
type: other
---
title: Fix a display bug in the fork removal description message
merge_request: 20843
author:
type: fixed
......@@ -219,13 +219,43 @@ Note that your exact needs may be more, depending on your workload. Your
workload is influenced by factors such as - but not limited to - how active your
users are, how much automation you use, mirroring, and repo/change size.
### 5,000 User Configuration
- **Supported Users (approximate):** 50,000
- **Test RPS Rates:** API: 100 RPS, Web: 10 RPS, Git: 10 RPS
- **Status:** Work-in-progress
- **Known Issues:** For the latest list of known performance issues head
[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues).
NOTE: **Note:** This architecture is a work-in-progress of the work so far. The
Quality team will be certifying this environment in late 2019 or early 2020. The specifications
may be adjusted prior to certification based on performance testing.
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
| GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 16 threads | 3 | 16 vCPU, 14.4GB Memory | n1-highcpu-16 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Gitaly <br> - Gitaly Ruby workers on each node set to 20% of available CPUs | X[^1] . | 8 vCPU, 30GB Memory | n1-standard-8 |
| Redis Cache + Sentinel <br> - Cache maxmemory set to 90% of available memory | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Redis Persistent + Sentinel | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| NFS Server[^4] . | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| S3 Object Storage[^3] . | - | - | - |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| External load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
### 10,000 User Configuration
- **Supported Users (approximate):** 10,000
- **Test RPS Rates:** API: 200 RPS, Web: 20 RPS, Git: 20 RPS
- **Known Issues:** While validating the reference architectures, slow API
endpoints were discovered. For details, see the related issues list in
[this issue](https://gitlab.com/gitlab-org/quality/performance/issues/125).
- **Known Issues:** For the latest list of known performance issues head
[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues).
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
......@@ -250,9 +280,8 @@ vendors a best effort like for like can be used.
- **Supported Users (approximate):** 25,000
- **Test RPS Rates:** API: 500 RPS, Web: 50 RPS, Git: 50 RPS
- **Known Issues:** While validating the reference architectures, slow API
endpoints were discovered. For details, see the related issues list in
[this issue](https://gitlab.com/gitlab-org/quality/performance/issues/125).
- **Known Issues:** For the latest list of known performance issues head
[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues).
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
......@@ -277,9 +306,8 @@ vendors a best effort like for like can be used.
- **Supported Users (approximate):** 50,000
- **Test RPS Rates:** API: 1000 RPS, Web: 100 RPS, Git: 100 RPS
- **Known Issues:** While validating the reference architectures, slow API
endpoints were discovered. For details, see the related issues list in
[this issue](https://gitlab.com/gitlab-org/quality/performance/issues/125).
- **Known Issues:** For the latest list of known performance issues head
[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues).
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
......@@ -300,15 +328,16 @@ endpoints were discovered. For details, see the related issues list in
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
[^1]: Gitaly node requirements are dependent on customer data. We recommend 2
nodes as an absolute minimum for performance at the 10,000 and 25,000 user
scale and 4 nodes as an absolute minimum at the 50,000 user scale, but
additional nodes should be considered in conjunction with a review of
project counts and sizes.
[^1]: Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend 2 nodes as an absolute minimum for HA environments
and at least 4 nodes should be used when supporting 50,000 or more users.
We recommend that each Gitaly node should store no more than 5TB of data.
Additional nodes should be considered in conjunction with a review of expected
data size and spread based on the recommendations above.
[^2]: Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. However other reputable load balancers with similar feature sets
should also work here but be aware these aren't validated.
should also work instead but be aware these aren't validated.
[^3]: For data objects such as LFS, Uploads, Artifacts, etc... We recommend a S3 Object Storage
where possible over NFS due to better performance and availability. Several types of objects
......
......@@ -18,6 +18,9 @@ review the sessions, and revoke any you don't recognize.
![Active sessions list](img/active_sessions_list.png)
CAUTION: **Caution:**
It is currently possible to have 100 active sessions at once. If the number of active sessions exceed 100, the oldest ones will be deleted.
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
......
......@@ -4,6 +4,7 @@
module Quality
class KubernetesClient
RESOURCE_LIST = 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd'
CommandFailedError = Class.new(StandardError)
attr_reader :namespace
......@@ -13,6 +14,13 @@ def initialize(namespace:)
end
def cleanup(release_name:, wait: true)
delete_by_selector(release_name: release_name, wait: wait)
delete_by_matching_name(release_name: release_name)
end
private
def delete_by_selector(release_name:, wait:)
selector = case release_name
when String
%(-l release="#{release_name}")
......@@ -23,9 +31,9 @@ def cleanup(release_name:, wait: true)
end
command = [
%(--namespace "#{namespace}"),
'delete',
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa',
RESOURCE_LIST,
%(--namespace "#{namespace}"),
'--now',
'--ignore-not-found',
'--include-uninitialized',
......@@ -36,7 +44,29 @@ def cleanup(release_name:, wait: true)
run_command(command)
end
private
def delete_by_matching_name(release_name:)
resource_names = raw_resource_names
command = [
'delete',
%(--namespace "#{namespace}")
]
Array(release_name).each do |release|
resource_names
.select { |resource_name| resource_name.include?(release) }
.each { |matching_resource| run_command(command + [matching_resource]) }
end
end
def raw_resource_names
command = [
'get',
RESOURCE_LIST,
%(--namespace "#{namespace}"),
'-o custom-columns=NAME:.metadata.name'
]
run_command(command).lines.map(&:strip)
end
def run_command(command)
final_command = ['kubectl', *command].join(' ')
......
......@@ -92,7 +92,7 @@ namespace :gitlab do
lookup_key_count = redis.scard(key)
session_ids = ActiveSession.session_ids_for_user(user_id)
entries = ActiveSession.raw_active_session_entries(session_ids, user_id)
entries = ActiveSession.raw_active_session_entries(redis, session_ids, user_id)
session_ids_and_entries = session_ids.zip(entries)
inactive_session_ids = session_ids_and_entries.map do |session_id, session|
......
......@@ -48,11 +48,31 @@ function delete_release() {
return
fi
echoinfo "Deleting release '${release}'..." true
helm_delete_release "${namespace}" "${release}"
kubectl_cleanup_release "${namespace}" "${release}"
}
function helm_delete_release() {
local namespace="${1}"
local release="${2}"
echoinfo "Deleting Helm release '${release}'..." true
helm delete --tiller-namespace "${namespace}" --purge "${release}"
}
function kubectl_cleanup_release() {
local namespace="${1}"
local release="${2}"
echoinfo "Deleting all K8s resources matching '${release}'..." true
kubectl --namespace "${namespace}" get ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd 2>&1 \
| grep "${release}" \
| awk '{print $1}' \
| xargs kubectl --namespace "${namespace}" delete \
|| true
}
function delete_failed_release() {
local namespace="${KUBE_NAMESPACE}"
local release="${CI_ENVIRONMENT_SLUG}"
......
......@@ -71,7 +71,7 @@ module Trigger
# Can be overridden
def version_param_value(version_file)
File.read(version_file).strip
ENV[version_file]&.strip || File.read(version_file).strip
end
def variables
......
......@@ -5,7 +5,7 @@
describe Gitlab::HealthChecks::Probes::Collection do
let(:readiness) { described_class.new(*checks) }
describe '#call' do
describe '#execute' do
subject { readiness.execute }
context 'with all checks' do
......
......@@ -5,15 +5,27 @@
RSpec.describe Quality::KubernetesClient do
let(:namespace) { 'review-apps-ee' }
let(:release_name) { 'my-release' }
let(:pod_for_release) { "pod-my-release-abcd" }
let(:raw_resource_names_str) { "NAME\nfoo\n#{pod_for_release}\nbar" }
let(:raw_resource_names) { raw_resource_names_str.lines.map(&:strip) }
subject { described_class.new(namespace: namespace) }
describe 'RESOURCE_LIST' do
it 'returns the correct list of resources separated by commas' do
expect(described_class::RESOURCE_LIST).to eq('ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd')
end
end
describe '#cleanup' do
before do
allow(subject).to receive(:raw_resource_names).and_return(raw_resource_names)
end
it 'raises an error if the Kubernetes command fails' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=true -l release=\"#{release_name}\""])
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l release="#{release_name}")])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))
expect { subject.cleanup(release_name: release_name) }.to raise_error(described_class::CommandFailedError)
......@@ -21,9 +33,12 @@
it 'calls kubectl with the correct arguments' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=true -l release=\"#{release_name}\""])
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l release="#{release_name}")])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl delete --namespace "#{namespace}" #{pod_for_release})])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
# We're not verifying the output here, just silencing it
......@@ -35,20 +50,22 @@
it 'raises an error if the Kubernetes command fails' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})'"])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})')])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))
expect { subject.cleanup(release_name: release_name) }.to raise_error(described_class::CommandFailedError)
end
it 'calls kubectl with the correct arguments' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})'"])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})')])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl delete --namespace "#{namespace}" #{pod_for_release})])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
# We're not verifying the output here, just silencing it
expect { subject.cleanup(release_name: release_name) }.to output.to_stdout
......@@ -58,24 +75,37 @@
context 'with `wait: false`' do
it 'raises an error if the Kubernetes command fails' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=false -l release=\"#{release_name}\""])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=false -l release="#{release_name}")])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))