提交 b33ce442 编写于 作者: L Linus Torvalds

Merge branch 'for-3.15/drivers' of git://git.kernel.dk/linux-block

Pull block driver update from Jens Axboe:
 "On top of the core pull request, here's the pull request for the
  driver related changes for 3.15.  It contains:

   - Improvements for msi-x registration for block drivers (mtip32xx,
     skd, cciss, nvme) from Alexander Gordeev.

   - A round of cleanups and improvements for drbd from Andreas
     Gruenbacher and Rashika Kheria.

   - A round of clanups and improvements for bcache from Kent.

   - Removal of sleep_on() and friends in DAC960, ataflop, swim3 from
     Arnd Bergmann.

   - Bug fix for a bug in the mtip32xx async completion code from Sam
     Bradshaw.

   - Bug fix for accidentally bouncing IO on 32-bit platforms with
     mtip32xx from Felipe Franciosi"

* 'for-3.15/drivers' of git://git.kernel.dk/linux-block: (103 commits)
  bcache: remove nested function usage
  bcache: Kill bucket->gc_gen
  bcache: Kill unused freelist
  bcache: Rework btree cache reserve handling
  bcache: Kill btree_io_wq
  bcache: btree locking rework
  bcache: Fix a race when freeing btree nodes
  bcache: Add a real GC_MARK_RECLAIMABLE
  bcache: Add bch_keylist_init_single()
  bcache: Improve priority_stats
  bcache: Better alloc tracepoints
  bcache: Kill dead cgroup code
  bcache: stop moving_gc marking buckets that can't be moved.
  bcache: Fix moving_pred()
  bcache: Fix moving_gc deadlocking with a foreground write
  bcache: Fix discard granularity
  bcache: Fix another bug recovering from unclean shutdown
  bcache: Fix a bug recovering from unclean shutdown
  bcache: Fix a journalling reclaim after recovery bug
  bcache: Fix a null ptr deref in journal replay
  ...
This describes the in kernel data structure for DRBD-9. Starting with
Linux v3.14 we are reorganizing DRBD to use this data structure.
Basic Data Structure
====================
A node has a number of DRBD resources. Each such resource has a number of
devices (aka volumes) and connections to other nodes ("peer nodes"). Each DRBD
device is represented by a block device locally.
The DRBD objects are interconnected to form a matrix as depicted below; a
drbd_peer_device object sits at each intersection between a drbd_device and a
drbd_connection:
/--------------+---------------+.....+---------------\
| resource | device | | device |
+--------------+---------------+.....+---------------+
| connection | peer_device | | peer_device |
+--------------+---------------+.....+---------------+
: : : : :
: : : : :
+--------------+---------------+.....+---------------+
| connection | peer_device | | peer_device |
\--------------+---------------+.....+---------------/
In this table, horizontally, devices can be accessed from resources by their
volume number. Likewise, peer_devices can be accessed from connections by
their volume number. Objects in the vertical direction are connected by double
linked lists. There are back pointers from peer_devices to their connections a
devices, and from connections and devices to their resource.
All resources are in the drbd_resources double-linked list. In addition, all
devices can be accessed by their minor device number via the drbd_devices idr.
The drbd_resource, drbd_connection, and drbd_device objects are reference
counted. The peer_device objects only serve to establish the links between
devices and connections; their lifetime is determined by the lifetime of the
device and connection which they reference.
......@@ -6411,12 +6411,12 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
.ScatterGatherSegments[0]
.SegmentByteCount =
CommandMailbox->ControllerInfo.DataTransferSize;
DAC960_ExecuteCommand(Command);
while (Controller->V2.NewControllerInformation->PhysicalScanActive)
{
DAC960_ExecuteCommand(Command);
sleep_on_timeout(&Controller->CommandWaitQueue, HZ);
}
while (1) {
DAC960_ExecuteCommand(Command);
if (!Controller->V2.NewControllerInformation->PhysicalScanActive)
break;
msleep(1000);
}
DAC960_UserCritical("Discovery Completed\n", Controller);
}
}
......@@ -7035,18 +7035,16 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -EFAULT;
break;
}
while (Controller->V2.HealthStatusBuffer->StatusChangeCounter
== HealthStatusBuffer.StatusChangeCounter &&
Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
== HealthStatusBuffer.NextEventSequenceNumber)
{
interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue,
DAC960_MonitoringTimerInterval);
if (signal_pending(current)) {
ErrorCode = -EINTR;
break;
}
}
ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
!(Controller->V2.HealthStatusBuffer->StatusChangeCounter
== HealthStatusBuffer.StatusChangeCounter &&
Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
== HealthStatusBuffer.NextEventSequenceNumber),
DAC960_MonitoringTimerInterval);
if (ErrorCode == -ERESTARTSYS) {
ErrorCode = -EINTR;
break;
}
if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
Controller->V2.HealthStatusBuffer,
sizeof(DAC960_V2_HealthStatusBuffer_T)))
......
......@@ -68,6 +68,8 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/wait.h>
#include <asm/atafd.h>
#include <asm/atafdreg.h>
......@@ -301,7 +303,7 @@ module_param_array(UserSteprate, int, NULL, 0);
/* Synchronization of FDC access. */
static volatile int fdc_busy = 0;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(format_wait);
static DECLARE_COMPLETION(format_wait);
static unsigned long changed_floppies = 0xff, fake_change = 0;
#define CHECK_CHANGE_DELAY HZ/2
......@@ -608,7 +610,7 @@ static void fd_error( void )
if (IsFormatting) {
IsFormatting = 0;
FormatError = 1;
wake_up( &format_wait );
complete(&format_wait);
return;
}
......@@ -650,9 +652,8 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
DPRINT(("do_format( dr=%d tr=%d he=%d offs=%d )\n",
drive, desc->track, desc->head, desc->sect_offset ));
wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
local_irq_save(flags);
while( fdc_busy ) sleep_on( &fdc_wait );
fdc_busy = 1;
stdma_lock(floppy_irq, NULL);
atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */
local_irq_restore(flags);
......@@ -706,7 +707,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
ReqSide = desc->head;
do_fd_action( drive );
sleep_on( &format_wait );
wait_for_completion(&format_wait);
redo_fd_request();
return( FormatError ? -EIO : 0 );
......@@ -1229,7 +1230,7 @@ static void fd_writetrack_done( int status )
goto err_end;
}
wake_up( &format_wait );
complete(&format_wait);
return;
err_end:
......@@ -1497,8 +1498,7 @@ static void redo_fd_request(void)
void do_fd_request(struct request_queue * q)
{
DPRINT(("do_fd_request for pid %d\n",current->pid));
while( fdc_busy ) sleep_on( &fdc_wait );
fdc_busy = 1;
wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
stdma_lock(floppy_irq, NULL);
atari_disable_irq( IRQ_MFP_FDC );
......
......@@ -4092,11 +4092,9 @@ static void cciss_interrupt_mode(ctlr_info_t *h)
if (err > 0) {
dev_warn(&h->pdev->dev,
"only %d MSI-X vectors available\n", err);
goto default_int_mode;
} else {
dev_warn(&h->pdev->dev,
"MSI-X init failed %d\n", err);
goto default_int_mode;
}
}
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -46,7 +46,7 @@ const struct file_operations drbd_proc_fops = {
.release = drbd_proc_release,
};
void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
static void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
{
/* v is in kB/sec. We don't expect TiByte/sec yet. */
if (unlikely(v >= 1000000)) {
......@@ -66,14 +66,14 @@ void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
* [=====>..............] 33.5% (23456/123456)
* finish: 2:20:20 speed: 6,345 (6,456) K/sec
*/
static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq)
{
unsigned long db, dt, dbdt, rt, rs_left;
unsigned int res;
int i, x, y;
int stalled = 0;
drbd_get_syncer_progress(mdev, &rs_left, &res);
drbd_get_syncer_progress(device, &rs_left, &res);
x = res/50;
y = 20-x;
......@@ -85,21 +85,21 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
seq_printf(seq, ".");
seq_printf(seq, "] ");
if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
seq_printf(seq, "verified:");
else
seq_printf(seq, "sync'ed:");
seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
/* if more than a few GB, display in MB */
if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
if (device->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10),
(unsigned long) Bit2KB(mdev->rs_total >> 10));
(unsigned long) Bit2KB(device->rs_total >> 10));
else
seq_printf(seq, "(%lu/%lu)K\n\t",
(unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(mdev->rs_total));
(unsigned long) Bit2KB(device->rs_total));
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
......@@ -114,14 +114,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
* at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at
* least DRBD_SYNC_MARK_STEP time before it will be modified. */
/* ------------------------ ~18s average ------------------------ */
i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS;
dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
i = (device->rs_last_mark + 2) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS))
stalled = 1;
if (!dt)
dt++;
db = mdev->rs_mark_left[i] - rs_left;
db = device->rs_mark_left[i] - rs_left;
rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */
seq_printf(seq, "finish: %lu:%02lu:%02lu",
......@@ -134,11 +134,11 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
/* ------------------------- ~3s average ------------------------ */
if (proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (!dt)
dt++;
db = mdev->rs_mark_left[i] - rs_left;
db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_printf(seq, " -- ");
......@@ -147,34 +147,34 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
/* --------------------- long term average ---------------------- */
/* mean speed since syncer started
* we do account for PausedSync periods */
dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
if (dt == 0)
dt = 1;
db = mdev->rs_total - rs_left;
db = device->rs_total - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_printf(seq, ")");
if (mdev->state.conn == C_SYNC_TARGET ||
mdev->state.conn == C_VERIFY_S) {
if (device->state.conn == C_SYNC_TARGET ||
device->state.conn == C_VERIFY_S) {
seq_printf(seq, " want: ");
seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate);
seq_printf_with_thousands_grouping(seq, device->c_sync_rate);
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
if (proc_details >= 1) {
/* 64 bit:
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(mdev);
unsigned long bm_bits = drbd_bm_bits(device);
unsigned long bit_pos;
unsigned long long stop_sector = 0;
if (mdev->state.conn == C_VERIFY_S ||
mdev->state.conn == C_VERIFY_T) {
bit_pos = bm_bits - mdev->ov_left;
if (verify_can_do_stop_sector(mdev))
stop_sector = mdev->ov_stop_sector;
if (device->state.conn == C_VERIFY_S ||
device->state.conn == C_VERIFY_T) {
bit_pos = bm_bits - device->ov_left;
if (verify_can_do_stop_sector(device))
stop_sector = device->ov_stop_sector;
} else
bit_pos = mdev->bm_resync_fo;
bit_pos = device->bm_resync_fo;
/* Total sectors may be slightly off for oddly
* sized devices. So what. */
seq_printf(seq,
......@@ -202,7 +202,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
{
int i, prev_i = -1;
const char *sn;
struct drbd_conf *mdev;
struct drbd_device *device;
struct net_conf *nc;
char wp;
......@@ -236,72 +236,72 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
*/
rcu_read_lock();
idr_for_each_entry(&minors, mdev, i) {
idr_for_each_entry(&drbd_devices, device, i) {
if (prev_i != i - 1)
seq_printf(seq, "\n");
prev_i = i;
sn = drbd_conn_str(mdev->state.conn);
sn = drbd_conn_str(device->state.conn);
if (mdev->state.conn == C_STANDALONE &&
mdev->state.disk == D_DISKLESS &&
mdev->state.role == R_SECONDARY) {
if (device->state.conn == C_STANDALONE &&
device->state.disk == D_DISKLESS &&
device->state.role == R_SECONDARY) {
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
/* reset mdev->congestion_reason */
bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
/* reset device->congestion_reason */
bdi_rw_congested(&device->rq_queue->backing_dev_info);
nc = rcu_dereference(mdev->tconn->net_conf);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
"lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c",
i, sn,
drbd_role_str(mdev->state.role),
drbd_role_str(mdev->state.peer),
drbd_disk_str(mdev->state.disk),
drbd_disk_str(mdev->state.pdsk),
drbd_role_str(device->state.role),
drbd_role_str(device->state.peer),
drbd_disk_str(device->state.disk),
drbd_disk_str(device->state.pdsk),
wp,
drbd_suspended(mdev) ? 's' : 'r',
mdev->state.aftr_isp ? 'a' : '-',
mdev->state.peer_isp ? 'p' : '-',
mdev->state.user_isp ? 'u' : '-',
mdev->congestion_reason ?: '-',
test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
mdev->send_cnt/2,
mdev->recv_cnt/2,
mdev->writ_cnt/2,
mdev->read_cnt/2,
mdev->al_writ_cnt,
mdev->bm_writ_cnt,
atomic_read(&mdev->local_cnt),
atomic_read(&mdev->ap_pending_cnt) +
atomic_read(&mdev->rs_pending_cnt),
atomic_read(&mdev->unacked_cnt),
atomic_read(&mdev->ap_bio_cnt),
mdev->tconn->epochs,
write_ordering_chars[mdev->tconn->write_ordering]
drbd_suspended(device) ? 's' : 'r',
device->state.aftr_isp ? 'a' : '-',
device->state.peer_isp ? 'p' : '-',
device->state.user_isp ? 'u' : '-',
device->congestion_reason ?: '-',
test_bit(AL_SUSPENDED, &device->flags) ? 's' : '-',
device->send_cnt/2,
device->recv_cnt/2,
device->writ_cnt/2,
device->read_cnt/2,
device->al_writ_cnt,
device->bm_writ_cnt,
atomic_read(&device->local_cnt),
atomic_read(&device->ap_pending_cnt) +
atomic_read(&device->rs_pending_cnt),
atomic_read(&device->unacked_cnt),
atomic_read(&device->ap_bio_cnt),
first_peer_device(device)->connection->epochs,
write_ordering_chars[first_peer_device(device)->connection->write_ordering]
);
seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long)
drbd_bm_total_weight(mdev)));
drbd_bm_total_weight(device)));
}
if (mdev->state.conn == C_SYNC_SOURCE ||
mdev->state.conn == C_SYNC_TARGET ||
mdev->state.conn == C_VERIFY_S ||
mdev->state.conn == C_VERIFY_T)
drbd_syncer_progress(mdev, seq);
if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) {
lc_seq_printf_stats(seq, mdev->resync);
lc_seq_printf_stats(seq, mdev->act_log);
put_ldev(mdev);
if (device->state.conn == C_SYNC_SOURCE ||
device->state.conn == C_SYNC_TARGET ||
device->state.conn == C_VERIFY_S ||
device->state.conn == C_VERIFY_T)
drbd_syncer_progress(device, seq);
if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
lc_seq_printf_stats(seq, device->resync);
lc_seq_printf_stats(seq, device->act_log);
put_ldev(device);
}
if (proc_details >= 2) {
if (mdev->resync) {
lc_seq_dump_details(seq, mdev->resync, "rs_left",
if (device->resync) {
lc_seq_dump_details(seq, device->resync, "rs_left",
resync_dump_detail);
}
}
......
#ifndef __DRBD_PROTOCOL_H
#define __DRBD_PROTOCOL_H
enum drbd_packet {
/* receiver (data socket) */
P_DATA = 0x00,
P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */
P_BARRIER = 0x03,
P_BITMAP = 0x04,
P_BECOME_SYNC_TARGET = 0x05,
P_BECOME_SYNC_SOURCE = 0x06,
P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */
P_DATA_REQUEST = 0x08, /* Used to ask for a data block */
P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */
P_SYNC_PARAM = 0x0a,
P_PROTOCOL = 0x0b,
P_UUIDS = 0x0c,
P_SIZES = 0x0d,
P_STATE = 0x0e,
P_SYNC_UUID = 0x0f,
P_AUTH_CHALLENGE = 0x10,
P_AUTH_RESPONSE = 0x11,
P_STATE_CHG_REQ = 0x12,
/* asender (meta socket */
P_PING = 0x13,
P_PING_ACK = 0x14,
P_RECV_ACK = 0x15, /* Used in protocol B */
P_WRITE_ACK = 0x16, /* Used in protocol C */
P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
P_SUPERSEDED = 0x18, /* Used in proto C, two-primaries conflict detection */
P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
P_BARRIER_ACK = 0x1c,
P_STATE_CHG_REPLY = 0x1d,
/* "new" commands, no longer fitting into the ordering scheme above */
P_OV_REQUEST = 0x1e, /* data socket */
P_OV_REPLY = 0x1f,
P_OV_RESULT = 0x20, /* meta socket */
P_CSUM_RS_REQUEST = 0x21, /* data socket */
P_RS_IS_IN_SYNC = 0x22, /* meta socket */
P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
/* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
/* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
P_CONN_ST_CHG_REQ = 0x2a, /* data sock: Connection wide state request */
P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */
P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */
P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
/* special command ids for handshake */
P_INITIAL_META = 0xfff1, /* First Packet on the MetaSock */
P_INITIAL_DATA = 0xfff2, /* First Packet on the Socket */
P_CONNECTION_FEATURES = 0xfffe /* FIXED for the next century! */
};
#ifndef __packed
#define __packed __attribute__((packed))
#endif
/* This is the layout for a packet on the wire.
* The byteorder is the network byte order.
* (except block_id and barrier fields.
* these are pointers to local structs
* and have no relevance for the partner,
* which just echoes them as received.)
*
* NOTE that the payload starts at a long aligned offset,
* regardless of 32 or 64 bit arch!
*/
struct p_header80 {
u32 magic;
u16 command;
u16 length; /* bytes of data after this header */
} __packed;
/* Header for big packets, Used for data packets exceeding 64kB */
struct p_header95 {
u16 magic; /* use DRBD_MAGIC_BIG here */
u16 command;
u32 length;
} __packed;
struct p_header100 {
u32 magic;
u16 volume;
u16 command;
u32 length;
u32 pad;
} __packed;
/* these defines must not be changed without changing the protocol version */
#define DP_HARDBARRIER 1 /* depricated */
#define DP_RW_SYNC 2 /* equals REQ_SYNC */
#define DP_MAY_SET_IN_SYNC 4
#define DP_UNPLUG 8 /* not used anymore */
#define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_FLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */
#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
struct p_data {
u64 sector; /* 64 bits sector number */
u64 block_id; /* to identify the request in protocol B&C */
u32 seq_num;
u32 dp_flags;
} __packed;
/*
* commands which share a struct:
* p_block_ack:
* P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
* P_SUPERSEDED (proto C, two-primaries conflict detection)
* p_block_req:
* P_DATA_REQUEST, P_RS_DATA_REQUEST
*/
struct p_block_ack {
u64 sector;
u64 block_id;
u32 blksize;
u32 seq_num;
} __packed;
struct p_block_req {
u64 sector;
u64 block_id;
u32 blksize;
u32 pad; /* to multiple of 8 Byte */
} __packed;
/*
* commands with their own struct for additional fields:
* P_CONNECTION_FEATURES
* P_BARRIER
* P_BARRIER_ACK
* P_SYNC_PARAM
* ReportParams
*/
struct p_connection_features {
u32 protocol_min;
u32 feature_flags;
u32 protocol_max;
/* should be more than enough for future enhancements
* for now, feature_flags and the reserved array shall be zero.
*/
u32 _pad;
u64 reserved[7];
} __packed;
struct p_barrier {
u32 barrier; /* barrier number _handle_ only */
u32 pad; /* to multiple of 8 Byte */
} __packed;
struct p_barrier_ack {
u32 barrier;
u32 set_size;
} __packed;
struct p_rs_param {
u32 resync_rate;
/* Since protocol version 88 and higher. */
char verify_alg[0];
} __packed;
struct p_rs_param_89 {
u32 resync_rate;
/* protocol version 89: */
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
} __packed;
struct p_rs_param_95 {
u32 resync_rate;
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
u32 c_plan_ahead;
u32 c_delay_target;
u32 c_fill_target;
u32 c_max_rate;
} __packed;
enum drbd_conn_flags {
CF_DISCARD_MY_DATA = 1,
CF_DRY_RUN = 2,
};
struct p_protocol {
u32 protocol;
u32 after_sb_0p;
u32 after_sb_1p;
u32 after_sb_2p;
u32 conn_flags;
u32 two_primaries;
/* Since protocol version 87 and higher. */
char integrity_alg[0];
} __packed;
struct p_uuids {
u64 uuid[UI_EXTENDED_SIZE];
} __packed;
struct p_rs_uuid {
u64 uuid;
} __packed;
struct p_sizes {
u64 d_size; /* size of disk */
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
u32 max_bio_size; /* Maximal size of a BIO */
u16 queue_order_type; /* not yet implemented in DRBD*/
u16 dds_flags; /* use enum dds_flags here. */
} __packed;
struct p_state {
u32 state;
} __packed;
struct p_req_state {
u32 mask;
u32 val;
} __packed;
struct p_req_state_reply {
u32 retcode;
} __packed;
struct p_drbd06_param {
u64 size;
u32 state;
u32 blksize;
u32 protocol;
u32 version;
u32 gen_cnt[5];
u32 bit_map_gen[5];
} __packed;
struct p_block_desc {
u64 sector;
u32 blksize;
u32 pad; /* to multiple of 8 Byte */
} __packed;
/* Valid values for the encoding field.
* Bump proto version when changing this. */
enum drbd_bitmap_code {
/* RLE_VLI_Bytes = 0,
* and other bit variants had been defined during
* algorithm evaluation. */
RLE_VLI_Bits = 2,
};
struct p_compressed_bm {
/* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
* (encoding & 0x80): polarity (set/unset) of first runlength
* ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
* used to pad up to head.length bytes
*/
u8 encoding;
u8 code[0];
} __packed;
struct p_delay_probe93 {
u32 seq_num; /* sequence number to match the two probe packets */
u32 offset; /* usecs the probe got sent after the reference time point */
} __packed;
/*
* Bitmap packets need to fit within a single page on the sender and receiver,
* so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
*/
#define DRBD_SOCKET_BUFFER_SIZE 4096
#endif /* __DRBD_PROTOCOL_H */
此差异已折叠。
此差异已折叠。
......@@ -275,17 +275,17 @@ struct bio_and_error {
int error;
};
extern void start_new_tl_epoch(struct drbd_tconn *tconn);
extern void start_new_tl_epoch(struct drbd_connection *connection);
extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m);
extern void complete_master_bio(struct drbd_conf *mdev,
extern void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m);
extern void request_timer_fn(unsigned long data);
extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
/* this is in drbd_main.c */
extern void drbd_restart_request(struct drbd_request *req);
......@@ -294,14 +294,14 @@ extern void drbd_restart_request(struct drbd_request *req);
* outside the spinlock, e.g. when walking some list on cleanup. */
static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
{
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *device = req->device;
struct bio_and_error m;
int rv;
/* __req_mod possibly frees req, do not touch req after that! */
rv = __req_mod(req, what, &m);
if (m.bio)
complete_master_bio(mdev, &m);
complete_master_bio(device, &m);
return rv;
}
......@@ -314,16 +314,16 @@ static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
unsigned long flags;
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *device = req->device;
struct bio_and_error m;
int rv;
spin_lock_irqsave(&mdev->tconn->req_lock, flags);
spin_lock_irqsave(&device->resource->req_lock, flags);
rv = __req_mod(req, what, &m);
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (m.bio)
complete_master_bio(mdev, &m);
complete_master_bio(device, &m);
return rv;
}
......
此差异已折叠。
#ifndef DRBD_STATE_H
#define DRBD_STATE_H
struct drbd_conf;
struct drbd_tconn;
struct drbd_device;
struct drbd_connection;
/**
* DOC: DRBD State macros
......@@ -107,36 +107,36 @@ union drbd_dev_state {
unsigned int i;
};
extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
extern enum drbd_state_rv drbd_change_state(struct drbd_device *device,
enum chg_state_flags f,
union drbd_state mask,
union drbd_state val);
extern void drbd_force_state(struct drbd_conf *, union drbd_state,
extern void drbd_force_state(struct drbd_device *, union drbd_state,
union drbd_state);
extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
extern enum drbd_state_rv _drbd_request_state(struct drbd_device *,
union drbd_state,
union drbd_state,
enum chg_state_flags);
extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
extern enum drbd_state_rv __drbd_set_state(struct drbd_device *, union drbd_state,
enum chg_state_flags,
struct completion *done);
extern void print_st_err(struct drbd_conf *, union drbd_state,
extern void print_st_err(struct drbd_device *, union drbd_state,
union drbd_state, int);
enum drbd_state_rv
_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags);
enum drbd_state_rv
conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags);
extern void drbd_resume_al(struct drbd_conf *mdev);
extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
extern void drbd_resume_al(struct drbd_device *device);
extern bool conn_all_vols_unconf(struct drbd_connection *connection);
/**
* drbd_request_state() - Reqest a state change
* @mdev: DRBD device.
* @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
*
......@@ -144,18 +144,18 @@ extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
* quite verbose in case the state change is not possible, and all those
* state changes are globally serialized.
*/
static inline int drbd_request_state(struct drbd_conf *mdev,
static inline int drbd_request_state(struct drbd_device *device,
union drbd_state mask,
union drbd_state val)
{
return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
}
enum drbd_role conn_highest_role(struct drbd_tconn *tconn);
enum drbd_role conn_highest_peer(struct drbd_tconn *tconn);
enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn);
enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn);
enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn);
enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn);
enum drbd_role conn_highest_role(struct drbd_connection *connection);
enum drbd_role conn_highest_peer(struct drbd_connection *connection);
enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection);
enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection);
enum drbd_conns conn_lowest_conn(struct drbd_connection *connection);
#endif
......@@ -24,6 +24,7 @@
*/
#include <linux/drbd.h>
#include "drbd_strings.h"
static const char *drbd_conn_s_names[] = {
[C_STANDALONE] = "StandAlone",
......
#ifndef __DRBD_STRINGS_H
#define __DRBD_STRINGS_H
extern const char *drbd_conn_str(enum drbd_conns);
extern const char *drbd_role_str(enum drbd_role);
extern const char *drbd_disk_str(enum drbd_disk_state);
extern const char *drbd_set_st_err_str(enum drbd_state_rv);
#endif /* __DRBD_STRINGS_H */
此差异已折叠。
......@@ -9,12 +9,12 @@
extern char *drbd_sec_holder;
/* sets the number of 512 byte sectors of our virtual device */
static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
static inline void drbd_set_my_capacity(struct drbd_device *device,
sector_t size)
{
/* set_capacity(mdev->this_bdev->bd_disk, size); */
set_capacity(mdev->vdisk, size);
mdev->this_bdev->bd_inode->i_size = (loff_t)size << 9;
/* set_capacity(device->this_bdev->bd_disk, size); */
set_capacity(device->vdisk, size);
device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
}
#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
......@@ -27,20 +27,20 @@ extern void drbd_request_endio(struct bio *bio, int error);
/*
* used to submit our private bio
*/
static inline void drbd_generic_make_request(struct drbd_conf *mdev,
static inline void drbd_generic_make_request(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
if (!bio->bi_bdev) {
printk(KERN_ERR "drbd%d: drbd_generic_make_request: "
"bio->bi_bdev == NULL\n",
mdev_to_minor(mdev));
device_to_minor(device));
dump_stack();
bio_endio(bio, -ENODEV);
return;
}
if (drbd_insert_fault(mdev, fault_type))
if (drbd_insert_fault(device, fault_type))
bio_endio(bio, -EIO);
else
generic_make_request(bio);
......
此差异已折叠。
......@@ -92,7 +92,7 @@
/* Driver name and version strings */
#define MTIP_DRV_NAME "mtip32xx"
#define MTIP_DRV_VERSION "1.3.0"
#define MTIP_DRV_VERSION "1.3.1"
/* Maximum number of minor device numbers per device. */
#define MTIP_MAX_MINORS 16
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -24,11 +24,3 @@ config BCACHE_CLOSURES_DEBUG
Keeps all active closures in a linked list and provides a debugfs
interface to list them, which makes it possible to see asynchronous
operations that get stuck.
# cgroup code needs to be updated:
#
#config CGROUP_BCACHE
# bool "Cgroup controls for bcache"
# depends on BCACHE && BLK_CGROUP
# ---help---
# TODO
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册