aboutsummaryrefslogtreecommitdiff
path: root/include/linux/async_tx.h
blob: 28e3cf1465ab6d3034929c0e255be9f3830a49ee (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
/*
 * Copyright © 2006, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 */
#ifndef _ASYNC_TX_H_
#define _ASYNC_TX_H_
#include <linux/dmaengine.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>

/* on architectures without dma-mapping capabilities we need to ensure
 * that the asynchronous path compiles away
 */
#ifdef CONFIG_HAS_DMA
#define __async_inline
#else
#define __async_inline __always_inline
#endif

/**
 * dma_chan_ref - object used to manage dma channels received from the
 *   dmaengine core.
 * @chan - the channel being tracked
 * @node - node for the channel to be placed on async_tx_master_list
 * @rcu - for list_del_rcu
 * @count - number of times this channel is listed in the pool
 *	(for channels with multiple capabiities)
 */
struct dma_chan_ref {
	struct dma_chan *chan;
	struct list_head node;
	struct rcu_head rcu;
	atomic_t count;
};

/**
 * async_tx_flags - modifiers for the async_* calls
 * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
 * the destination address is not a source.  The asynchronous case handles this
 * implicitly, the synchronous case needs to zero the destination block.
 * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
 * also one of the source addresses.  In the synchronous case the destination
 * address is an implied source, whereas the asynchronous case it must be listed
 * as a source.  The destination address must be the first address in the source
 * array.
 * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
 * dependency chain
 * @ASYNC_TX_FENCE: specify that the next operation in the dependency
 * chain uses this operation's result as an input
 * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
 * input data. Required for rmw case.
 */
enum async_tx_flags {
	ASYNC_TX_XOR_ZERO_DST	 = (1 << 0),
	ASYNC_TX_XOR_DROP_DST	 = (1 << 1),
	ASYNC_TX_ACK		 = (1 << 2),
	ASYNC_TX_FENCE		 = (1 << 3),
	ASYNC_TX_PQ_XOR_DST	 = (1 << 4),
};

/**
 * struct async_submit_ctl - async_tx submission/completion modifiers
 * @flags: submission modifiers
 * @depend_tx: parent dependency of the current operation being submitted
 * @cb_fn: callback routine to run at operation completion
 * @cb_param: parameter for the callback routine
 * @scribble: caller provided space for dma/page address conversions
 */
struct async_submit_ctl {
	enum async_tx_flags flags;
	struct dma_async_tx_descriptor *depend_tx;
	dma_async_tx_callback cb_fn;
	void *cb_param;
	void *scribble;
};

#if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH)
#define async_tx_issue_pending_all dma_issue_pending_all

/**
 * async_tx_issue_pending - send pending descriptor to the hardware channel
 * @tx: descriptor handle to retrieve hardware context
 *
 * Note: any dependent operations will have already been issued by
 * async_tx_channel_switch, or (in the case of no channel switch) will
 * be already pending on this channel.
 */
static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
{
	if (likely(tx)) {
		struct dma_chan *chan = tx->chan;
		struct dma_device *dma = chan->device;

		dma->device_issue_pending(chan);
	}
}
#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
#include <asm/async_tx.h>
#else
#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
	 __async_tx_find_channel(dep, type)
struct dma_chan *
__async_tx_find_channel(struct async_submit_ctl *submit,
			enum dma_transaction_type tx_type);
#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
#else
static inline void async_tx_issue_pending_all(void)
{
	do { } while (0);
}

static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
{
	do { } while (0);
}

static inline struct dma_chan *
async_tx_find_channel(struct async_submit_ctl *submit,
		      enum dma_transaction_type tx_type, struct page **dst,
		      int dst_count, struct page **src, int src_count,
		      size_t len)
{
	return NULL;
}
#endif

/**
 * async_tx_sync_epilog - actions to take if an operation is run synchronously
 * @cb_fn: function to call when the transaction completes
 * @cb_fn_param: parameter to pass to the callback routine
 */
static inline void
async_tx_sync_epilog(struct async_submit_ctl *submit)
{
	if (submit->cb_fn)
		submit->cb_fn(submit->cb_param);
}

typedef union {
	unsigned long addr;
	struct page *page;
	dma_addr_t dma;
} addr_conv_t;

static inline void
init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
		  struct dma_async_tx_descriptor *tx,
		  dma_async_tx_callback cb_fn, void *cb_param,
		  addr_conv_t *scribble)
{
	args->flags = flags;
	args->depend_tx = tx;
	args->cb_fn = cb_fn;
	args->cb_param = cb_param;
	args->scribble = scribble;
}

void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
		     struct async_submit_ctl *submit);

struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
	  int src_cnt, size_t len, struct async_submit_ctl *submit);

struct dma_async_tx_descriptor *
async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
	      int src_cnt, size_t len, enum sum_check_flags *result,
	      struct async_submit_ctl *submit);

struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
	     unsigned int src_offset, size_t len,
	     struct async_submit_ctl *submit);

struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);

struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
		   size_t len, struct async_submit_ctl *submit);

struct dma_async_tx_descriptor *
async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
		   size_t len, enum sum_check_flags *pqres, struct page *spare,
		   struct async_submit_ctl *submit);

struct dma_async_tx_descriptor *
async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
			struct page **ptrs, struct async_submit_ctl *submit);

struct dma_async_tx_descriptor *
async_raid6_datap_recov(int src_num, size_t bytes, int faila,
			struct page **ptrs, struct async_submit_ctl *submit);

void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
#endif /* _ASYNC_TX_H_ */