cfserl.c 4.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/*
 * Copyright (C) ST-Ericsson AB 2010
 * Author:	Sjur Brendeland/sjur.brandeland@stericsson.com
 * License terms: GNU General Public License (GPL) version 2
 */

#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfserl.h>

#define container_obj(layr) ((struct cfserl *) layr)

#define CFSERL_STX 0x02
#define CAIF_MINIUM_PACKET_SIZE 4
struct cfserl {
	struct cflayer layer;
	struct cfpkt *incomplete_frm;
	/* Protects parallel processing of incoming packets */
	spinlock_t sync;
	bool usestx;
};
#define STXLEN(layr) (layr->usestx ? 1 : 0)

static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
				int phyid);

struct cflayer *cfserl_create(int type, int instance, bool use_stx)
{
	struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
	if (!this) {
		pr_warning("CAIF: %s(): Out of memory\n", __func__);
		return NULL;
	}
	caif_assert(offsetof(struct cfserl, layer) == 0);
	memset(this, 0, sizeof(struct cfserl));
	this->layer.receive = cfserl_receive;
	this->layer.transmit = cfserl_transmit;
	this->layer.ctrlcmd = cfserl_ctrlcmd;
	this->layer.type = type;
	this->usestx = use_stx;
	spin_lock_init(&this->sync);
	snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
	return &this->layer;
}

static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
{
	struct cfserl *layr = container_obj(l);
	u16 pkt_len;
	struct cfpkt *pkt = NULL;
	struct cfpkt *tail_pkt = NULL;
	u8 tmp8;
	u16 tmp;
	u8 stx = CFSERL_STX;
	int ret;
	u16 expectlen = 0;
	caif_assert(newpkt != NULL);
	spin_lock(&layr->sync);

	if (layr->incomplete_frm != NULL) {

		layr->incomplete_frm =
		    cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
		pkt = layr->incomplete_frm;
70 71
		if (pkt == NULL)
			return -ENOMEM;
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
	} else {
		pkt = newpkt;
	}
	layr->incomplete_frm = NULL;

	do {
		/* Search for STX at start of pkt if STX is used */
		if (layr->usestx) {
			cfpkt_extr_head(pkt, &tmp8, 1);
			if (tmp8 != CFSERL_STX) {
				while (cfpkt_more(pkt)
				       && tmp8 != CFSERL_STX) {
					cfpkt_extr_head(pkt, &tmp8, 1);
				}
				if (!cfpkt_more(pkt)) {
					cfpkt_destroy(pkt);
					layr->incomplete_frm = NULL;
					spin_unlock(&layr->sync);
					return -EPROTO;
				}
			}
		}

		pkt_len = cfpkt_getlen(pkt);

		/*
		 *  pkt_len is the accumulated length of the packet data
		 *  we have received so far.
		 *  Exit if frame doesn't hold length.
		 */

		if (pkt_len < 2) {
			if (layr->usestx)
				cfpkt_add_head(pkt, &stx, 1);
			layr->incomplete_frm = pkt;
			spin_unlock(&layr->sync);
			return 0;
		}

		/*
		 *  Find length of frame.
		 *  expectlen is the length we need for a full frame.
		 */
		cfpkt_peek_head(pkt, &tmp, 2);
		expectlen = le16_to_cpu(tmp) + 2;
		/*
		 * Frame error handling
		 */
		if (expectlen < CAIF_MINIUM_PACKET_SIZE
		    || expectlen > CAIF_MAX_FRAMESIZE) {
			if (!layr->usestx) {
				if (pkt != NULL)
					cfpkt_destroy(pkt);
				layr->incomplete_frm = NULL;
				expectlen = 0;
				spin_unlock(&layr->sync);
				return -EPROTO;
			}
			continue;
		}

		if (pkt_len < expectlen) {
			/* Too little received data */
			if (layr->usestx)
				cfpkt_add_head(pkt, &stx, 1);
			layr->incomplete_frm = pkt;
			spin_unlock(&layr->sync);
			return 0;
		}

		/*
		 * Enough data for at least one frame.
		 * Split the frame, if too long
		 */
		if (pkt_len > expectlen)
			tail_pkt = cfpkt_split(pkt, expectlen);
		else
			tail_pkt = NULL;

		/* Send the first part of packet upwards.*/
		spin_unlock(&layr->sync);
		ret = layr->layer.up->receive(layr->layer.up, pkt);
		spin_lock(&layr->sync);
		if (ret == -EILSEQ) {
			if (layr->usestx) {
				if (tail_pkt != NULL)
					pkt = cfpkt_append(pkt, tail_pkt, 0);
				/* Start search for next STX if frame failed */
				continue;
			} else {
				cfpkt_destroy(pkt);
				pkt = NULL;
			}
		}

		pkt = tail_pkt;

	} while (pkt != NULL);

	spin_unlock(&layr->sync);
	return 0;
}

static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
{
	struct cfserl *layr = container_obj(layer);
	int ret;
	u8 tmp8 = CFSERL_STX;
	if (layr->usestx)
		cfpkt_add_head(newpkt, &tmp8, 1);
	ret = layer->dn->transmit(layer->dn, newpkt);
	if (ret < 0)
		cfpkt_extr_head(newpkt, &tmp8, 1);

	return ret;
}

static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
				int phyid)
{
	layr->up->ctrlcmd(layr->up, ctrl, phyid);
}