kfifo_buf.c 4.7 KB
Newer Older
1 2 3 4 5 6 7
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/kfifo.h>
#include <linux/mutex.h>
8
#include <linux/iio/kfifo_buf.h>
9
#include <linux/sched.h>
10
#include <linux/poll.h>
11

12
struct iio_kfifo {
13
	struct iio_buffer buffer;
14
	struct kfifo kf;
15
	struct mutex user_lock;
16 17 18
	int update_needed;
};

19
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
20

21 22 23 24 25 26
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
				int bytes_per_datum, int length)
{
	if ((length == 0) || (bytes_per_datum == 0))
		return -EINVAL;

27 28
	return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
			     bytes_per_datum, GFP_KERNEL);
29 30
}

31
static int iio_request_update_kfifo(struct iio_buffer *r)
32 33 34 35
{
	int ret = 0;
	struct iio_kfifo *buf = iio_to_kfifo(r);

36
	mutex_lock(&buf->user_lock);
37 38 39
	if (buf->update_needed) {
		kfifo_free(&buf->kf);
		ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
40
				   buf->buffer.length);
41
		buf->update_needed = false;
42 43 44
	} else {
		kfifo_reset_out(&buf->kf);
	}
45
	mutex_unlock(&buf->user_lock);
46

47 48 49
	return ret;
}

50
static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
51
{
52 53
	struct iio_kfifo *kf = iio_to_kfifo(r);
	kf->update_needed = true;
54 55 56
	return 0;
}

57
static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
58
{
59 60 61 62
	if (r->bytes_per_datum != bpd) {
		r->bytes_per_datum = bpd;
		iio_mark_update_needed_kfifo(r);
	}
63 64 65
	return 0;
}

66
static int iio_set_length_kfifo(struct iio_buffer *r, int length)
67
{
68 69 70
	/* Avoid an invalid state */
	if (length < 2)
		length = 2;
71 72
	if (r->length != length) {
		r->length = length;
73
		iio_mark_update_needed_kfifo(r);
74 75 76 77
	}
	return 0;
}

78
static int iio_store_to_kfifo(struct iio_buffer *r,
79
			      const void *data)
80 81 82
{
	int ret;
	struct iio_kfifo *kf = iio_to_kfifo(r);
83 84
	ret = kfifo_in(&kf->kf, data, 1);
	if (ret != 1)
85 86 87 88
		return -EBUSY;
	return 0;
}

89
static int iio_read_first_n_kfifo(struct iio_buffer *r,
90
			   size_t n, char __user *buf)
91 92 93 94
{
	int ret, copied;
	struct iio_kfifo *kf = iio_to_kfifo(r);

95 96
	if (mutex_lock_interruptible(&kf->user_lock))
		return -ERESTARTSYS;
97

98 99 100 101 102 103 104 105
	if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
		ret = -EINVAL;
	else
		ret = kfifo_to_user(&kf->kf, buf, n, &copied);
	mutex_unlock(&kf->user_lock);
	if (ret < 0)
		return ret;

106 107
	return copied;
}
108

109
static size_t iio_kfifo_buf_data_available(struct iio_buffer *r)
110 111
{
	struct iio_kfifo *kf = iio_to_kfifo(r);
112
	size_t samples;
113 114

	mutex_lock(&kf->user_lock);
115
	samples = kfifo_len(&kf->kf);
116 117
	mutex_unlock(&kf->user_lock);

118
	return samples;
119 120
}

121 122
static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
{
L
Lars-Peter Clausen 已提交
123 124
	struct iio_kfifo *kf = iio_to_kfifo(buffer);

125
	mutex_destroy(&kf->user_lock);
L
Lars-Peter Clausen 已提交
126 127
	kfifo_free(&kf->kf);
	kfree(kf);
128 129
}

130
static const struct iio_buffer_access_funcs kfifo_access_funcs = {
131 132
	.store_to = &iio_store_to_kfifo,
	.read_first_n = &iio_read_first_n_kfifo,
133
	.data_available = iio_kfifo_buf_data_available,
134 135 136
	.request_update = &iio_request_update_kfifo,
	.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
	.set_length = &iio_set_length_kfifo,
137
	.release = &iio_kfifo_buffer_release,
138 139

	.modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
140
};
141

142
struct iio_buffer *iio_kfifo_allocate(void)
143 144 145
{
	struct iio_kfifo *kf;

146
	kf = kzalloc(sizeof(*kf), GFP_KERNEL);
147 148
	if (!kf)
		return NULL;
149

150 151 152
	kf->update_needed = true;
	iio_buffer_init(&kf->buffer);
	kf->buffer.access = &kfifo_access_funcs;
153
	kf->buffer.length = 2;
154
	mutex_init(&kf->user_lock);
155

156 157 158 159 160 161
	return &kf->buffer;
}
EXPORT_SYMBOL(iio_kfifo_allocate);

void iio_kfifo_free(struct iio_buffer *r)
{
162
	iio_buffer_put(r);
163 164
}
EXPORT_SYMBOL(iio_kfifo_free);
165

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
static void devm_iio_kfifo_release(struct device *dev, void *res)
{
	iio_kfifo_free(*(struct iio_buffer **)res);
}

static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
{
	struct iio_buffer **r = res;

	if (WARN_ON(!r || !*r))
		return 0;

	return *r == data;
}

/**
 * devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
 * @dev:		Device to allocate kfifo buffer for
 *
 * RETURNS:
 * Pointer to allocated iio_buffer on success, NULL on failure.
 */
struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
{
	struct iio_buffer **ptr, *r;

	ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	r = iio_kfifo_allocate();
	if (r) {
		*ptr = r;
		devres_add(dev, ptr);
	} else {
		devres_free(ptr);
	}

	return r;
}
EXPORT_SYMBOL(devm_iio_kfifo_allocate);

/**
 * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
 * @dev:		Device the buffer belongs to
 * @r:			The buffer associated with the device
 */
void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
{
	WARN_ON(devres_release(dev, devm_iio_kfifo_release,
			       devm_iio_kfifo_match, r));
}
EXPORT_SYMBOL(devm_iio_kfifo_free);

220
MODULE_LICENSE("GPL");