pblk-rl.c 6.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright (C) 2016 CNEX Labs
 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
 *                  Matias Bjorling <matias@cnexlabs.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * pblk-rl.c - pblk's rate limiter for user I/O
 *
 */

#include "pblk.h"

static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
{
	mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
}

27 28 29 30 31 32 33 34 35
int pblk_rl_is_limit(struct pblk_rl *rl)
{
	int rb_space;

	rb_space = atomic_read(&rl->rb_space);

	return (rb_space == 0);
}

36 37 38
int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
{
	int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
39 40 41 42 43 44 45
	int rb_space = atomic_read(&rl->rb_space);

	if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
		return NVM_IO_ERR;

	if (rb_user_cnt >= rl->rb_user_max)
		return NVM_IO_REQUEUE;
46

47 48 49 50 51 52 53 54 55
	return NVM_IO_OK;
}

void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
{
	int rb_space = atomic_read(&rl->rb_space);

	if (unlikely(rb_space >= 0))
		atomic_sub(nr_entries, &rl->rb_space);
56 57 58 59 60 61 62 63 64
}

int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
{
	int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
	int rb_user_active;

	/* If there is no user I/O let GC take over space on the write buffer */
	rb_user_active = READ_ONCE(rl->rb_user_active);
65
	return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
66 67 68 69 70 71 72 73 74 75 76
}

void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
{
	atomic_add(nr_entries, &rl->rb_user_cnt);

	/* Release user I/O state. Protect from GC */
	smp_store_release(&rl->rb_user_active, 1);
	pblk_rl_kick_u_timer(rl);
}

77 78 79 80 81 82 83 84 85 86
void pblk_rl_werr_line_in(struct pblk_rl *rl)
{
	atomic_inc(&rl->werr_lines);
}

void pblk_rl_werr_line_out(struct pblk_rl *rl)
{
	atomic_dec(&rl->werr_lines);
}

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
{
	atomic_add(nr_entries, &rl->rb_gc_cnt);
}

void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
{
	atomic_sub(nr_user, &rl->rb_user_cnt);
	atomic_sub(nr_gc, &rl->rb_gc_cnt);
}

unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
{
	return atomic_read(&rl->free_blocks);
}

103 104 105 106 107 108 109
unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
{
	return atomic_read(&rl->free_user_blocks);
}

static void __pblk_rl_update_rates(struct pblk_rl *rl,
				   unsigned long free_blocks)
110
{
111 112
	struct pblk *pblk = container_of(rl, struct pblk, rl);
	int max = rl->rb_budget;
113
	int werr_gc_needed = atomic_read(&rl->werr_lines);
114 115

	if (free_blocks >= rl->high) {
116 117 118 119 120 121 122 123 124 125 126 127
		if (werr_gc_needed) {
			/* Allocate a small budget for recovering
			 * lines with write errors
			 */
			rl->rb_gc_max = 1 << rl->rb_windows_pw;
			rl->rb_user_max = max - rl->rb_gc_max;
			rl->rb_state = PBLK_RL_WERR;
		} else {
			rl->rb_user_max = max;
			rl->rb_gc_max = 0;
			rl->rb_state = PBLK_RL_OFF;
		}
128 129 130
	} else if (free_blocks < rl->high) {
		int shift = rl->high_pw - rl->rb_windows_pw;
		int user_windows = free_blocks >> shift;
131
		int user_max = user_windows << ilog2(NVM_MAX_VLBA);
132 133

		rl->rb_user_max = user_max;
134 135 136 137 138 139 140 141 142 143 144 145
		rl->rb_gc_max = max - user_max;

		if (free_blocks <= rl->rsv_blocks) {
			rl->rb_user_max = 0;
			rl->rb_gc_max = max;
		}

		/* In the worst case, we will need to GC lines in the low list
		 * (high valid sector count). If there are lines to GC on high
		 * or mid lists, these will be prioritized
		 */
		rl->rb_state = PBLK_RL_LOW;
146 147
	}

148
	if (rl->rb_state != PBLK_RL_OFF)
149 150 151
		pblk_gc_should_start(pblk);
	else
		pblk_gc_should_stop(pblk);
152 153
}

154 155 156 157 158
void pblk_rl_update_rates(struct pblk_rl *rl)
{
	__pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
}

159 160
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
{
161
	int blk_in_line = atomic_read(&line->blk_in_line);
162
	int free_blocks;
163

164
	atomic_add(blk_in_line, &rl->free_blocks);
165 166 167
	free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);

	__pblk_rl_update_rates(rl, free_blocks);
168 169
}

170 171
void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
			    bool used)
172
{
173
	int blk_in_line = atomic_read(&line->blk_in_line);
174
	int free_blocks;
175

176
	atomic_sub(blk_in_line, &rl->free_blocks);
177 178 179 180 181 182 183 184

	if (used)
		free_blocks = atomic_sub_return(blk_in_line,
							&rl->free_user_blocks);
	else
		free_blocks = atomic_read(&rl->free_user_blocks);

	__pblk_rl_update_rates(rl, free_blocks);
185 186
}

187
int pblk_rl_high_thrs(struct pblk_rl *rl)
188 189 190 191
{
	return rl->high;
}

192 193 194 195 196
int pblk_rl_max_io(struct pblk_rl *rl)
{
	return rl->rb_max_io;
}

197
static void pblk_rl_u_timer(struct timer_list *t)
198
{
199
	struct pblk_rl *rl = from_timer(rl, t, u_timer);
200 201 202 203 204 205 206 207 208 209

	/* Release user I/O state. Protect from GC */
	smp_store_release(&rl->rb_user_active, 0);
}

void pblk_rl_free(struct pblk_rl *rl)
{
	del_timer(&rl->u_timer);
}

210
void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
211
{
212
	struct pblk *pblk = container_of(rl, struct pblk, rl);
213 214 215
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
216
	struct pblk_line_meta *lm = &pblk->lm;
217
	int sec_meta, blk_meta;
218 219
	unsigned int rb_windows;

220 221
	/* Consider sectors used for metadata */
	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
222
	blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
223

224 225
	rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
	rl->high_pw = get_count_order(rl->high);
226

227
	rl->rsv_blocks = pblk_get_min_chks(pblk);
228

229
	/* This will always be a power-of-2 */
230
	rb_windows = budget / NVM_MAX_VLBA;
231
	rl->rb_windows_pw = get_count_order(rb_windows);
232 233 234 235 236 237

	/* To start with, all buffer is available to user I/O writers */
	rl->rb_budget = budget;
	rl->rb_user_max = budget;
	rl->rb_gc_max = 0;
	rl->rb_state = PBLK_RL_HIGH;
238

J
Javier González 已提交
239 240 241 242 243 244
	/* Maximize I/O size and ansure that back threshold is respected */
	if (threshold)
		rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
	else
		rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;

245
	atomic_set(&rl->rb_user_cnt, 0);
246
	atomic_set(&rl->rb_gc_cnt, 0);
247
	atomic_set(&rl->rb_space, -1);
248
	atomic_set(&rl->werr_lines, 0);
249

250
	timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
251

252
	rl->rb_user_active = 0;
253
	rl->rb_gc_active = 0;
254
}