diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/async_tx.h | 156 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 293 | ||||
-rw-r--r-- | include/linux/pci_ids.h | 3 | ||||
-rw-r--r-- | include/linux/raid/raid5.h | 97 | ||||
-rw-r--r-- | include/linux/raid/xor.h | 5 |
5 files changed, 439 insertions, 115 deletions
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h new file mode 100644 index 00000000000..ff1255079fa --- /dev/null +++ b/include/linux/async_tx.h @@ -0,0 +1,156 @@ +/* + * Copyright © 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef _ASYNC_TX_H_ +#define _ASYNC_TX_H_ +#include <linux/dmaengine.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> + +/** + * dma_chan_ref - object used to manage dma channels received from the + * dmaengine core. + * @chan - the channel being tracked + * @node - node for the channel to be placed on async_tx_master_list + * @rcu - for list_del_rcu + * @count - number of times this channel is listed in the pool + * (for channels with multiple capabiities) + */ +struct dma_chan_ref { + struct dma_chan *chan; + struct list_head node; + struct rcu_head rcu; + atomic_t count; +}; + +/** + * async_tx_flags - modifiers for the async_* calls + * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the + * the destination address is not a source. The asynchronous case handles this + * implicitly, the synchronous case needs to zero the destination block. + * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is + * also one of the source addresses. In the synchronous case the destination + * address is an implied source, whereas the asynchronous case it must be listed + * as a source. The destination address must be the first address in the source + * array. + * @ASYNC_TX_ASSUME_COHERENT: skip cache maintenance operations + * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a + * dependency chain + * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. + * @ASYNC_TX_KMAP_SRC: if the transaction is to be performed synchronously + * take an atomic mapping (KM_USER0) on the source page(s) + * @ASYNC_TX_KMAP_DST: if the transaction is to be performed synchronously + * take an atomic mapping (KM_USER0) on the dest page(s) + */ +enum async_tx_flags { + ASYNC_TX_XOR_ZERO_DST = (1 << 0), + ASYNC_TX_XOR_DROP_DST = (1 << 1), + ASYNC_TX_ASSUME_COHERENT = (1 << 2), + ASYNC_TX_ACK = (1 << 3), + ASYNC_TX_DEP_ACK = (1 << 4), + ASYNC_TX_KMAP_SRC = (1 << 5), + ASYNC_TX_KMAP_DST = (1 << 6), +}; + +#ifdef CONFIG_DMA_ENGINE +void async_tx_issue_pending_all(void); +enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); +void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx); +struct dma_chan * +async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, + enum dma_transaction_type tx_type); +#else +static inline void async_tx_issue_pending_all(void) +{ + do { } while (0); +} + +static inline enum dma_status +dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) +{ + return DMA_SUCCESS; +} + +static inline void +async_tx_run_dependencies(struct dma_async_tx_descriptor *tx, + struct dma_chan *host_chan) +{ + do { } while (0); +} + +static inline struct dma_chan * +async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, + enum dma_transaction_type tx_type) +{ + return NULL; +} +#endif + +/** + * async_tx_sync_epilog - actions to take if an operation is run synchronously + * @flags: async_tx flags + * @depend_tx: transaction depends on depend_tx + * @cb_fn: function to call when the transaction completes + * @cb_fn_param: parameter to pass to the callback routine + */ +static inline void +async_tx_sync_epilog(unsigned long flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param) +{ + if (cb_fn) + cb_fn(cb_fn_param); + + if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) + async_tx_ack(depend_tx); +} + +void +async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, + enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_xor(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_xor_zero_sum(struct page *dest, struct page **src_list, + unsigned int offset, int src_cnt, size_t len, + u32 *result, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, + unsigned int src_offset, size_t len, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_memset(struct page *dest, int val, unsigned int offset, + size_t len, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_trigger_callback(enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); +#endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c94d8f1d62e..a3b6035b6c8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -21,29 +21,40 @@ #ifndef DMAENGINE_H #define DMAENGINE_H -#ifdef CONFIG_DMA_ENGINE - #include <linux/device.h> #include <linux/uio.h> #include <linux/kref.h> #include <linux/completion.h> #include <linux/rcupdate.h> +#include <linux/dma-mapping.h> /** - * enum dma_event - resource PNP/power managment events + * enum dma_state - resource PNP/power managment state * @DMA_RESOURCE_SUSPEND: DMA device going into low power state * @DMA_RESOURCE_RESUME: DMA device returning to full power - * @DMA_RESOURCE_ADDED: DMA device added to the system + * @DMA_RESOURCE_AVAILABLE: DMA device available to the system * @DMA_RESOURCE_REMOVED: DMA device removed from the system */ -enum dma_event { +enum dma_state { DMA_RESOURCE_SUSPEND, DMA_RESOURCE_RESUME, - DMA_RESOURCE_ADDED, + DMA_RESOURCE_AVAILABLE, DMA_RESOURCE_REMOVED, }; /** + * enum dma_state_client - state of the channel in the client + * @DMA_ACK: client would like to use, or was using this channel + * @DMA_DUP: client has already seen this channel, or is not using this channel + * @DMA_NAK: client does not want to see any more channels + */ +enum dma_state_client { + DMA_ACK, + DMA_DUP, + DMA_NAK, +}; + +/** * typedef dma_cookie_t - an opaque DMA cookie * * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code @@ -65,6 +76,31 @@ enum dma_status { }; /** + * enum dma_transaction_type - DMA transaction types/indexes + */ +enum dma_transaction_type { + DMA_MEMCPY, + DMA_XOR, + DMA_PQ_XOR, + DMA_DUAL_XOR, + DMA_PQ_UPDATE, + DMA_ZERO_SUM, + DMA_PQ_ZERO_SUM, + DMA_MEMSET, + DMA_MEMCPY_CRC32C, + DMA_INTERRUPT, +}; + +/* last transaction type for creation of the capabilities mask */ +#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) + +/** + * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. + * See linux/cpumask.h + */ +typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; + +/** * struct dma_chan_percpu - the per-CPU part of struct dma_chan * @refcount: local_t used for open-coded "bigref" counting * @memcpy_count: transaction counter @@ -80,7 +116,6 @@ struct dma_chan_percpu { /** * struct dma_chan - devices supply DMA channels, clients use them - * @client: ptr to the client user of this chan, will be %NULL when unused * @device: ptr to the dma device who supplies this channel, always !%NULL * @cookie: last cookie value returned to client * @chan_id: channel ID for sysfs @@ -88,12 +123,10 @@ struct dma_chan_percpu { * @refcount: kref, used in "bigref" slow-mode * @slow_ref: indicates that the DMA channel is free * @rcu: the DMA channel's RCU head - * @client_node: used to add this to the client chan list * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu */ struct dma_chan { - struct dma_client *client; struct dma_device *device; dma_cookie_t cookie; @@ -105,11 +138,11 @@ struct dma_chan { int slow_ref; struct rcu_head rcu; - struct list_head client_node; struct list_head device_node; struct dma_chan_percpu *local; }; + void dma_chan_cleanup(struct kref *kref); static inline void dma_chan_get(struct dma_chan *chan) @@ -134,169 +167,206 @@ static inline void dma_chan_put(struct dma_chan *chan) /* * typedef dma_event_callback - function pointer to a DMA event callback + * For each channel added to the system this routine is called for each client. + * If the client would like to use the channel it returns '1' to signal (ack) + * the dmaengine core to take out a reference on the channel and its + * corresponding device. A client must not 'ack' an available channel more + * than once. When a channel is removed all clients are notified. If a client + * is using the channel it must 'ack' the removal. A client must not 'ack' a + * removed channel more than once. + * @client - 'this' pointer for the client context + * @chan - channel to be acted upon + * @state - available or removed */ -typedef void (*dma_event_callback) (struct dma_client *client, - struct dma_chan *chan, enum dma_event event); +struct dma_client; +typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, + struct dma_chan *chan, enum dma_state state); /** * struct dma_client - info on the entity making use of DMA services * @event_callback: func ptr to call when something happens - * @chan_count: number of chans allocated - * @chans_desired: number of chans requested. Can be +/- chan_count - * @lock: protects access to the channels list - * @channels: the list of DMA channels allocated + * @cap_mask: only return channels that satisfy the requested capabilities + * a value of zero corresponds to any capability * @global_node: list_head for global dma_client_list */ struct dma_client { dma_event_callback event_callback; - unsigned int chan_count; - unsigned int chans_desired; - - spinlock_t lock; - struct list_head channels; + dma_cap_mask_t cap_mask; struct list_head global_node; }; +typedef void (*dma_async_tx_callback)(void *dma_async_param); +/** + * struct dma_async_tx_descriptor - async transaction descriptor + * ---dma generic offload fields--- + * @cookie: tracking cookie for this transaction, set to -EBUSY if + * this tx is sitting on a dependency list + * @ack: the descriptor can not be reused until the client acknowledges + * receipt, i.e. has has a chance to establish any dependency chains + * @phys: physical address of the descriptor + * @tx_list: driver common field for operations that require multiple + * descriptors + * @chan: target channel for this operation + * @tx_submit: set the prepared descriptor(s) to be executed by the engine + * @tx_set_dest: set a destination address in a hardware descriptor + * @tx_set_src: set a source address in a hardware descriptor + * @callback: routine to call after this operation is complete + * @callback_param: general parameter to pass to the callback routine + * ---async_tx api specific fields--- + * @depend_list: at completion this list of transactions are submitted + * @depend_node: allow this transaction to be executed after another + * transaction has completed, possibly on another channel + * @parent: pointer to the next level up in the dependency chain + * @lock: protect the dependency list + */ +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + int ack; + dma_addr_t phys; + struct list_head tx_list; + struct dma_chan *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); + void (*tx_set_dest)(dma_addr_t addr, + struct dma_async_tx_descriptor *tx, int index); + void (*tx_set_src)(dma_addr_t addr, + struct dma_async_tx_descriptor *tx, int index); + dma_async_tx_callback callback; + void *callback_param; + struct list_head depend_list; + struct list_head depend_node; + struct dma_async_tx_descriptor *parent; + spinlock_t lock; +}; + /** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list + * @cap_mask: one or more dma_capability flags + * @max_xor: maximum number of xor sources, 0 if no capability * @refcount: reference count * @done: IO completion struct * @dev_id: unique device ID + * @dev: struct device reference for dma mapping api * @device_alloc_chan_resources: allocate resources and return the * number of allocated descriptors * @device_free_chan_resources: release DMA channel's resources - * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer - * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page - * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset - * @device_memcpy_complete: poll the status of an IOAT DMA transaction - * @device_memcpy_issue_pending: push appended descriptors to hardware + * @device_prep_dma_memcpy: prepares a memcpy operation + * @device_prep_dma_xor: prepares a xor operation + * @device_prep_dma_zero_sum: prepares a zero_sum operation + * @device_prep_dma_memset: prepares a memset operation + * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_dependency_added: async_tx notifies the channel about new deps + * @device_issue_pending: push pending transactions to hardware */ struct dma_device { unsigned int chancnt; struct list_head channels; struct list_head global_node; + dma_cap_mask_t cap_mask; + int max_xor; struct kref refcount; struct completion done; int dev_id; + struct device *dev; int (*device_alloc_chan_resources)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan); - dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan, - void *dest, void *src, size_t len); - dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan, - struct page *page, unsigned int offset, void *kdata, - size_t len); - dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan, - struct page *dest_pg, unsigned int dest_off, - struct page *src_pg, unsigned int src_off, size_t len); - enum dma_status (*device_memcpy_complete)(struct dma_chan *chan, + + struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( + struct dma_chan *chan, size_t len, int int_en); + struct dma_async_tx_descriptor *(*device_prep_dma_xor)( + struct dma_chan *chan, unsigned int src_cnt, size_t len, + int int_en); + struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( + struct dma_chan *chan, unsigned int src_cnt, size_t len, + u32 *result, int int_en); + struct dma_async_tx_descriptor *(*device_prep_dma_memset)( + struct dma_chan *chan, int value, size_t len, int int_en); + struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( + struct dma_chan *chan); + + void (*device_dependency_added)(struct dma_chan *chan); + enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used); - void (*device_memcpy_issue_pending)(struct dma_chan *chan); + void (*device_issue_pending)(struct dma_chan *chan); }; /* --- public DMA engine API --- */ -struct dma_client *dma_async_client_register(dma_event_callback event_callback); +void dma_async_client_register(struct dma_client *client); void dma_async_client_unregister(struct dma_client *client); -void dma_async_client_chan_request(struct dma_client *client, - unsigned int number); +void dma_async_client_chan_request(struct dma_client *client); +dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, + void *dest, void *src, size_t len); +dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, + struct page *page, unsigned int offset, void *kdata, size_t len); +dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, + struct page *dest_pg, unsigned int dest_off, struct page *src_pg, + unsigned int src_off, size_t len); +void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, + struct dma_chan *chan); -/** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses - * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) - * @len: length - * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). - */ -static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, - void *dest, void *src, size_t len) +static inline void +async_tx_ack(struct dma_async_tx_descriptor *tx) { - int cpu = get_cpu(); - per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; - per_cpu_ptr(chan->local, cpu)->memcpy_count++; - put_cpu(); - - return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len); + tx->ack = 1; } -/** - * dma_async_memcpy_buf_to_pg - offloaded copy from address to page - * @chan: DMA channel to offload copy to - * @page: destination page - * @offset: offset in page to copy to - * @kdata: source address (virtual) - * @len: length - * - * Both @page/@offset and @kdata must be mappable to a bus address according - * to the DMA mapping API rules for streaming mappings. - * Both @page/@offset and @kdata must stay memory resident (kernel memory or - * locked user space pages) - */ -static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, - struct page *page, unsigned int offset, void *kdata, size_t len) +#define first_dma_cap(mask) __first_dma_cap(&(mask)) +static inline int __first_dma_cap(const dma_cap_mask_t *srcp) { - int cpu = get_cpu(); - per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; - per_cpu_ptr(chan->local, cpu)->memcpy_count++; - put_cpu(); + return min_t(int, DMA_TX_TYPE_END, + find_first_bit(srcp->bits, DMA_TX_TYPE_END)); +} - return chan->device->device_memcpy_buf_to_pg(chan, page, offset, - kdata, len); +#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) +static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) +{ + return min_t(int, DMA_TX_TYPE_END, + find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); } -/** - * dma_async_memcpy_pg_to_pg - offloaded copy from page to page - * @chan: DMA channel to offload copy to - * @dest_pg: destination page - * @dest_off: offset in page to copy to - * @src_pg: source page - * @src_off: offset in page to copy from - * @len: length - * - * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus - * address according to the DMA mapping API rules for streaming mappings. - * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident - * (kernel memory or locked user space pages). - */ -static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, - struct page *dest_pg, unsigned int dest_off, struct page *src_pg, - unsigned int src_off, size_t len) +#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) +static inline void +__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) { - int cpu = get_cpu(); - per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; - per_cpu_ptr(chan->local, cpu)->memcpy_count++; - put_cpu(); + set_bit(tx_type, dstp->bits); +} - return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off, - src_pg, src_off, len); +#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) +static inline int +__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) +{ + return test_bit(tx_type, srcp->bits); } +#define for_each_dma_cap_mask(cap, mask) \ + for ((cap) = first_dma_cap(mask); \ + (cap) < DMA_TX_TYPE_END; \ + (cap) = next_dma_cap((cap), (mask))) + /** - * dma_async_memcpy_issue_pending - flush pending copies to HW + * dma_async_issue_pending - flush pending transactions to HW * @chan: target DMA channel * * This allows drivers to push copies to HW in batches, * reducing MMIO writes where possible. */ -static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan) +static inline void dma_async_issue_pending(struct dma_chan *chan) { - return chan->device->device_memcpy_issue_pending(chan); + return chan->device->device_issue_pending(chan); } +#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) + /** - * dma_async_memcpy_complete - poll for transaction completion + * dma_async_is_tx_complete - poll for transaction completion * @chan: DMA channel * @cookie: transaction identifier to check status of * @last: returns last completed cookie, can be NULL @@ -306,12 +376,15 @@ static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan) * internal state and can be used with dma_async_is_complete() to check * the status of multiple cookies without re-checking hardware state. */ -static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan, +static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) { - return chan->device->device_memcpy_complete(chan, cookie, last, used); + return chan->device->device_is_tx_complete(chan, cookie, last, used); } +#define dma_async_memcpy_complete(chan, cookie, last, used)\ + dma_async_is_tx_complete(chan, cookie, last, used) + /** * dma_async_is_complete - test a cookie against chan state * @cookie: transaction identifier to test status of @@ -334,6 +407,7 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, return DMA_IN_PROGRESS; } +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); /* --- DMA device --- */ @@ -362,5 +436,4 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, struct dma_pinned_list *pinned_list, struct page *page, unsigned int offset, size_t len); -#endif /* CONFIG_DMA_ENGINE */ #endif /* DMAENGINE_H */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 9366182fffa..2c7add16953 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -479,6 +479,9 @@ #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 +#define PCI_VENDOR_ID_UNISYS 0x1018 +#define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C + #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ #define PCI_DEVICE_ID_COMPEX2_100VG 0x0005 diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index d8286db60b9..93678f57ccb 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -116,13 +116,46 @@ * attach a request to an active stripe (add_stripe_bh()) * lockdev attach-buffer unlockdev * handle a stripe (handle_stripe()) - * lockstripe clrSTRIPE_HANDLE ... (lockdev check-buffers unlockdev) .. change-state .. record io needed unlockstripe schedule io + * lockstripe clrSTRIPE_HANDLE ... + * (lockdev check-buffers unlockdev) .. + * change-state .. + * record io/ops needed unlockstripe schedule io/ops * release an active stripe (release_stripe()) * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev * * The refcount counts each thread that have activated the stripe, * plus raid5d if it is handling it, plus one for each active request - * on a cached buffer. + * on a cached buffer, and plus one if the stripe is undergoing stripe + * operations. + * + * Stripe operations are performed outside the stripe lock, + * the stripe operations are: + * -copying data between the stripe cache and user application buffers + * -computing blocks to save a disk access, or to recover a missing block + * -updating the parity on a write operation (reconstruct write and + * read-modify-write) + * -checking parity correctness + * -running i/o to disk + * These operations are carried out by raid5_run_ops which uses the async_tx + * api to (optionally) offload operations to dedicated hardware engines. + * When requesting an operation handle_stripe sets the pending bit for the + * operation and increments the count. raid5_run_ops is then run whenever + * the count is non-zero. + * There are some critical dependencies between the operations that prevent some + * from being requested while another is in flight. + * 1/ Parity check operations destroy the in cache version of the parity block, + * so we prevent parity dependent operations like writes and compute_blocks + * from starting while a check is in progress. Some dma engines can perform + * the check without damaging the parity block, in these cases the parity + * block is re-marked up to date (assuming the check was successful) and is + * not re-read from disk. + * 2/ When a write operation is requested we immediately lock the affected + * blocks, and mark them as not up to date. This causes new read requests + * to be held off, as well as parity checks and compute block operations. + * 3/ Once a compute block operation has been requested handle_stripe treats + * that block as if it is up to date. raid5_run_ops guaruntees that any + * operation that is dependent on the compute block result is initiated after + * the compute block completes. */ struct stripe_head { @@ -136,15 +169,46 @@ struct stripe_head { spinlock_t lock; int bm_seq; /* sequence number for bitmap flushes */ int disks; /* disks in stripe */ + /* stripe_operations + * @pending - pending ops flags (set for request->issue->complete) + * @ack - submitted ops flags (set for issue->complete) + * @complete - completed ops flags (set for complete) + * @target - STRIPE_OP_COMPUTE_BLK target + * @count - raid5_runs_ops is set to run when this is non-zero + */ + struct stripe_operations { + unsigned long pending; + unsigned long ack; + unsigned long complete; + int target; + int count; + u32 zero_sum_result; + } ops; struct r5dev { struct bio req; struct bio_vec vec; struct page *page; - struct bio *toread, *towrite, *written; + struct bio *toread, *read, *towrite, *written; sector_t sector; /* sector of this page */ unsigned long flags; } dev[1]; /* allocated with extra space depending of RAID geometry */ }; + +/* stripe_head_state - collects and tracks the dynamic state of a stripe_head + * for handle_stripe. It is only valid under spin_lock(sh->lock); + */ +struct stripe_head_state { + int syncing, expanding, expanded; + int locked, uptodate, to_read, to_write, failed, written; + int to_fill, compute, req_compute, non_overwrite; + int failed_num; +}; + +/* r6_state - extra state data only relevant to r6 */ +struct r6_state { + int p_failed, q_failed, qd_idx, failed_num[2]; +}; + /* Flags */ #define R5_UPTODATE 0 /* page contains current data */ #define R5_LOCKED 1 /* IO has been submitted on "req" */ @@ -158,6 +222,15 @@ struct stripe_head { #define R5_ReWrite 9 /* have tried to over-write the readerror */ #define R5_Expanded 10 /* This block now has post-expand data */ +#define R5_Wantcompute 11 /* compute_block in progress treat as + * uptodate + */ +#define R5_Wantfill 12 /* dev->toread contains a bio that needs + * filling + */ +#define R5_Wantprexor 13 /* distinguish blocks ready for rmw from + * other "towrites" + */ /* * Write method */ @@ -180,6 +253,24 @@ struct stripe_head { #define STRIPE_EXPAND_SOURCE 10 #define STRIPE_EXPAND_READY 11 /* + * Operations flags (in issue order) + */ +#define STRIPE_OP_BIOFILL 0 +#define STRIPE_OP_COMPUTE_BLK 1 +#define STRIPE_OP_PREXOR 2 +#define STRIPE_OP_BIODRAIN 3 +#define STRIPE_OP_POSTXOR 4 +#define STRIPE_OP_CHECK 5 +#define STRIPE_OP_IO 6 + +/* modifiers to the base operations + * STRIPE_OP_MOD_REPAIR_PD - compute the parity block and write it back + * STRIPE_OP_MOD_DMA_CHECK - parity is not corrupted by the check + */ +#define STRIPE_OP_MOD_REPAIR_PD 7 +#define STRIPE_OP_MOD_DMA_CHECK 8 + +/* * Plugging: * * To improve write throughput, we need to delay the handling of some diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index f0d67cbdea4..3e120587ead 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h @@ -3,9 +3,10 @@ #include <linux/raid/md.h> -#define MAX_XOR_BLOCKS 5 +#define MAX_XOR_BLOCKS 4 -extern void xor_block(unsigned int count, unsigned int bytes, void **ptr); +extern void xor_blocks(unsigned int count, unsigned int bytes, + void *dest, void **srcs); struct xor_block_template { struct xor_block_template *next; |