aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/img_mdc_dma.c
blob: fc2ee217f29820fcf969a60b9f16e7e6161a24f9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
/*
 * IMG Meta DMA Controller (MDC) specific DMA code.
 *
 * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/ratelimit.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/img_mdc_dma.h>

#include "dmaengine.h"

#define MAX_MDC_DMA_CHANNELS 32
#define MAX_MDC_DMA_BUSY_RETRY 5
#define MDC_DMA_INT_ACTIVE (1<<8) /* INT_ACTIVE bit of Cmds Processed reg */

DEFINE_SPINLOCK(mdc_dma_lock);
static struct device_driver *wrapper_driver;

struct mdc_config_data {
	int dma_threads;
	int dma_channels;
	int bus_width;
};

struct mdc_chan {
	struct mdc_dmadev		*mdma;
	struct dma_chan			dchan;
	spinlock_t			lock;
	char				name[30];
	enum img_dma_channel_state	alloc_status;
	int				a_chan_nr; /* Channel NR */
	int				irq; /* MDC IRQ */
	int				periph; /* Peripheral NR */
	int				thread; /* Thread for this channel */
	/* virt/dma buffers for channel */
	void				*virt_addr;
	dma_addr_t			dma_addr;
	/* List of current DMA descriptors */
	struct list_head		active_desc; /* Active descriptors */
	struct list_head		free_desc; /* Used descriptors */
	struct list_head		ready_desc;
	bool				sg; /* true for sg xfer */
	bool				cyclic; /* true for cyclic xfer */
	bool				is_list; /* list-based xfer */
	bool				finished; /* xfer finished */
	int				irq_en; /* MDC IRQ status */
	/* Slave specific configuration */
	struct dma_slave_config		dma_config; /* config for channel */
	int				access_delay;
	int				priority;
	bool				skip_callback;
	/* tasklet for channel */
	struct tasklet_struct		tasklet; /* deferred work */
};

struct mdc_dmadev {
	struct dma_device		dma_slave;
	void __iomem			*base_addr;
	spinlock_t			lock;
	struct mdc_chan			slave_channel[MAX_MDC_DMA_CHANNELS];
	struct mdc_config_data		config;
	const struct img_mdc_soc_callbacks *callbacks;
	int				last_fthread; /* Current fast thread */
	int				last_sthread; /* Current slow thread */
#ifdef CONFIG_PM_SLEEP
	void				*pm_data;
#endif
};

struct mdc_dma_desc {
	struct dma_async_tx_descriptor	txd;
	struct list_head		node;
	enum dma_status			status;
	dma_addr_t			start_list;
	int				total_samples;
	int				buffer_size;
	int				sample_size;
	int				sample_count;
	bool				irq_acked;
};

/* Forward declaration for dma driver */
static struct platform_driver img_mdc_dma_driver;
static int mdc_terminate_all(struct dma_chan *chan);

static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
{
	return container_of(c, struct mdc_chan, dchan);
}

static inline struct mdc_dma_desc *txd_to_mdc_desc(
				struct dma_async_tx_descriptor *t)
{
	return container_of(t, struct mdc_dma_desc, txd);
}

static inline struct device *mchan2dev(struct mdc_chan *c)
{
	return &c->dchan.dev->device;
}

/*
 * Burst Size (expressed in bytes) must be equal to or greater than the
 * system bus width for memory to memory accesses.
 * So use a simple lookup to find the size in bytes based on the system bus
 * width which is reported as log2 of the width in bits:
 *              width (2^n)  0, 1, 2, 3, 4 , 5, 6, 7
 *              width (bits) 1, 2, 4, 8, 16, 32,64,128
 */
static const unsigned burst_size_lookup[] = { 0, 0, 0, 1, 2, 4, 8, 16 };


/*rate limit for warning message*/
static DEFINE_RATELIMIT_STATE(rl_align_warn, 300 * HZ, 1); /* 1 per 5mins*/

/*
 * mdc_dma_filter_fn: Check if the DMA channel is free for allocation.
 * @chan: DMA channel for allocation requested by the dmaengine.
 * @param: Struct containing the requested DMA channel (if any) and the
 * peripheral device number requesting this channel. On return,
 * the req_channel member contains the channel that will be allocated
 * by the MDC DMA device. This is useful when the caller passed -1 (as in,
 * the first available channel) and then he wishes to know what channel will
 * be picked by the DMA device.
 *
 * This callback should be passed to dma_request_channel whenever it is used
 * by a slave device.
 */
bool mdc_dma_filter_fn(struct dma_chan *chan, void *param)
{
	struct device_driver *driver;
	spin_lock(&mdc_dma_lock);
	driver	= (wrapper_driver) ? wrapper_driver :
		&img_mdc_dma_driver.driver;
	spin_unlock(&mdc_dma_lock);

	if (chan->device->dev->driver == driver) {
		struct mdc_chan *mchan = to_mdc_chan(chan);
		struct mdc_dma_cookie *c = (struct mdc_dma_cookie *)param;
		if (mchan->alloc_status == IMG_DMA_CHANNEL_AVAILABLE) {
			/* Did the device request a specific channel? */
			if ((c->req_channel > -1) &&
			    (c->req_channel != mchan->a_chan_nr))
				/* Wrong channel */
				return false;
			mchan->periph = c->periph;
			c->req_channel = mchan->a_chan_nr;
			return true;
		}
	}
	return false;
}
EXPORT_SYMBOL_GPL(mdc_dma_filter_fn);

static struct dma_chan *of_dma_mdc_xlate(struct of_phandle_args *dma_spec,
					struct of_dma *ofdma)
{
	struct mdc_dma_cookie cookie;
	dma_cap_mask_t cap;
	int count = dma_spec->args_count;

	/*
	 * 1st argument = peripheral
	 * 2nd argument = dma channel
	 */
	if (count != 2)
		return NULL;

	cookie.periph = dma_spec->args[0];
	cookie.req_channel = dma_spec->args[1];

	dma_cap_zero(cap);
	dma_cap_set(DMA_SLAVE, cap);
	dma_cap_set(DMA_CYCLIC, cap);

	return dma_request_channel(cap, mdc_dma_filter_fn, &cookie);
}

static int check_widths(struct mdc_dmadev *mdma, u32 address)
{
	/*
	 *  check alignment, we can do accesses to/from unaligned address but
	 *  we must set width_w and width_r appropriately and it will impact
	 *  on performance
	 */
	int width = -1;
	if (address & 0x1) { /*byte addresses*/
		if (mdma->config.bus_width > 3) /*2^3 = 8bits = 1byte*/
			width = 0; /*2^0 = 1 byte.*/
	} else if (address & 0x2) { /*word address*/
		if (mdma->config.bus_width > 4) /*2^4 = 16bits = 2 bytes*/
			width = 1;
	} else if (address & 0x4) {
		if (mdma->config.bus_width > 5)
			width = 2;
	}

	if (width < 0) { /*We are aligned*/

		/*
		 * system bus width is in log2(bits)
		 * we need log2(bytes) so subtract 3
		 */
		width = mdma->config.bus_width - 3;
	} else {
		if (__ratelimit(&rl_align_warn))
			dev_warn(mdma->dma_slave.dev,
				 "Using address not aligned to system bus width, this will impact performance\n");
	}

	return width;
}

/**
 * img_reset() - resets a channel
 * @mchan:	The channel to reset
 *
 * Resets a channel by clearing all of its context to zero
 * Then sets up the default settings.
 */
static void img_dma_reset(struct mdc_chan *mchan)
{
	u32 genconf = 0;
	u32 rpconf = 0;
	int dma_channel = mchan->a_chan_nr;
	int systembus_width = mchan->mdma->config.bus_width;

	unsigned long mdc_base_address = (unsigned long)mchan->mdma->base_addr;

	MDC_REG_RESET_CONTEXT(mdc_base_address, dma_channel);

	/*ensure probe has setup base address before proceeding*/
	BUG_ON(!mdc_base_address);

	/*Setup General Config */

	/*enable list interrupts*/
	MDC_SET_FIELD(genconf, MDC_LIST_IEN, mchan->irq_en);
	/*endian swap TODO make user configurable*/
	MDC_SET_FIELD(genconf, MDC_BSWAP, 0);
	/*enable interrupts*/
	MDC_SET_FIELD(genconf, MDC_IEN, mchan->irq_en);
	/*don't latch interrupts*/
	MDC_SET_FIELD(genconf, MDC_LEVEL_INT, 1);
	/* Physical channel.*/
	MDC_SET_FIELD(genconf, MDC_CHANNEL, dma_channel);
	/*256 cycle delay on burst accesses */
	MDC_SET_FIELD(genconf, MDC_ACC_DEL, mchan->access_delay);
	/* ?See manual? delays recognition of DREQ
	 * until burst has reached the unpacker */
	MDC_SET_FIELD(genconf, MDC_WAIT_UNPACK, 0);
	/* Inc write address TODO make user configurable*/
	MDC_SET_FIELD(genconf, MDC_INC_W, 1);
	/* ?See manual? delays recognition of DREQ until burst
	 * has reached the packer.*/
	MDC_SET_FIELD(genconf, MDC_WAIT_PACK, 0);
	/* Incr read address TODO make user configurable*/
	MDC_SET_FIELD(genconf, MDC_INC_R, 1);
	/* Should generally be set unless using a ram
	 * narrower than the system bus*/
	MDC_SET_FIELD(genconf, MDC_PHYSICAL_R, 1);
	MDC_SET_FIELD(genconf, MDC_PHYSICAL_W, 1);
	/* Note Read and Write widths get set when specifying direction. */

	MDC_RSET_GENERAL_CONFIG(mdc_base_address, dma_channel, genconf);

	/*Setup read port: */

	/*
	 * NJ:
	 * We are going to split the channels equally across the number of
	 * available threads in the DMA controller. Ideally we should assign
	 * a different threads to peripherals with high latency than to those
	 * without but we don't know what peripherals are attached, we could
	 * give the user an interface to set this in a more advanced driver.
	 *
	 * Email from Paul Welton (did the hardware design) to NJ on 8/7/09:
	 * " A different thread id should be used for peripherals with different
	 *  latency characteristics. In the case of reads, the fabric guarantees
	 *  that return data within a thread is returned in the same order as
	 *  the read requests. Therefore, if one peripheral or memory is slow to
	 *  return data, then return data from another peripheral or memory on
	 *  the same thread will be blocked. If they are allocated different
	 *  threads then the second one could continue independently.
	 *
	 *  There are also restrictions or bursts. Once a burst begins on one
	 *  thread it must complete before any other burst can begin on the same
	 *  thread. A burst can be blocked by the "READY" signal for that thread
	 *  going low. Note that the READY, unlike the ENABLE, is provided on a
	 *  per-thread basis. Therefore, even for writes, it is advantageous to
	 *  place peripherals or memories which are likely to block on a
	 *  different thread from other critical peripherals which should not
	 *  be blocked."
	 */

	/*
	 *  we split the available threads equally between channels
	 *  so a 16 channel system with 2 threads, channels 0-7 will use
	 *  thread 0 and channels 8-15 will use thread 1
	 */

	/*thread id used in tag for reads issued from list*/
	MDC_SET_FIELD(rpconf, MDC_STHREAD, mchan->thread);
	/*thread id used in tag for reads*/
	MDC_SET_FIELD(rpconf, MDC_RTHREAD, mchan->thread);
	/*thread id used in tag for writes*/
	MDC_SET_FIELD(rpconf, MDC_WTHREAD, mchan->thread);

	/*priority of transfers*/
	MDC_SET_FIELD(rpconf, MDC_PRIORITY, mchan->priority);
	/* no of clock cycles before recognising DREQ following end-of-burst(
	 * at unpacker when WAIT_UNPACK=1)*/
	MDC_SET_FIELD(rpconf, MDC_HOLD_OFF, 0);
	/*burst size.*/
	MDC_SET_FIELD(rpconf, MDC_BURST_SIZE,
			(burst_size_lookup[systembus_width & 0x7] - 1));
	/*enable the use of the DREQ signal*/
	MDC_SET_FIELD(rpconf, MDC_DREQ_ENABLE, 0);
	/*perform read back on last write of transaction.*/
	MDC_SET_FIELD(rpconf, MDC_READBACK, 0);

	MDC_RSET_READ_PORT_CONFIG(mdc_base_address, dma_channel, rpconf);

	MDC_RSET_CMDS_PROCESSED(mdc_base_address, dma_channel, 0);

	wmb();
}

/*
 * mdc_handler_isr: Clear the IRQ for the DMA channel and
 * schedule the tasklet
 */
static irqreturn_t mdc_handler_isr(int irq, void *chan_id)
{

	u32 irq_status;
	struct mdc_chan *mchan = chan_id;
	struct mdc_dma_desc *desc;

	spin_lock(&mchan->lock);

	irq_status = MDC_RGET_CMDS_PROCESSED((unsigned long)
					     mchan->mdma->base_addr,
					     mchan->a_chan_nr);

	if (irq_status & MDC_DMA_INT_ACTIVE) {
		/* reset irq */
		MDC_RSET_CMDS_PROCESSED((unsigned long)
					mchan->mdma->base_addr,
					mchan->a_chan_nr, 0);
		/* Skip tasklet? */
		if (mchan->skip_callback) {
			mchan->skip_callback = false;
		} else {
			list_for_each_entry(desc, &mchan->active_desc, node) {
				if (!desc->irq_acked || mchan->cyclic) {
					desc->sample_count++;
					desc->irq_acked = true;
					break;
				}
			}
			/* Schedule the tasklet */
			tasklet_schedule(&mchan->tasklet);
		}
	}

	spin_unlock(&mchan->lock);

	return IRQ_HANDLED;
}

/*
 * mdc_dma_tasklet: Post IRQ work for a DMA channel
 * @unsigned long data: MDC channel pointer
 *
 * Updates descriptor xfer parameters, moves finished descriptors
 * to the free list, calls callback function.
 */
static void mdc_dma_tasklet(unsigned long data)
{
	struct mdc_chan *mchan = (struct mdc_chan *)data;
	struct mdc_dma_desc *desc, *safe;
	unsigned long flags;

	spin_lock_irqsave(&mchan->lock, flags);
	if (list_empty(&mchan->active_desc)) {
		spin_unlock_irqrestore(&mchan->lock, flags);
		return;
	}

	list_for_each_entry_safe(desc, safe, &mchan->active_desc, node) {
		dev_vdbg(mchan2dev(mchan),
			 "Tasklet descriptor\n"
			 "Address      : 0x%p\n"
			 "Sample count : %d\n"
			 "Total count  : %d\n"
			 "Acked        : %d\n",
			 desc, desc->sample_count, desc->total_samples,
			 desc->irq_acked);
		if (desc->sample_count >= desc->total_samples) {
			if (desc->irq_acked) {
				desc->sample_count = 0;
				mchan->finished = true;
				desc->irq_acked = false;
			}
			/*
			 * Move it to the ready list.
			 * For cyclic we keep it active.
			 */
			if (!mchan->cyclic)
				list_move_tail(&desc->node, &mchan->ready_desc);
		}
	}
	spin_unlock_irqrestore(&mchan->lock, flags);

	/*
	 * If cyclic, that means we only have one descriptor so get it and
	 * see if there is a callback to call now
	 */
	if (mchan->cyclic) {
		desc = list_first_entry(&mchan->active_desc, typeof(*desc), node);
		if (desc->txd.callback)
			desc->txd.callback(desc->txd.callback_param);
	} else {
		list_for_each_entry_safe(desc, safe, &mchan->ready_desc, node) {
			if (desc->txd.callback)
				desc->txd.callback(desc->txd.callback_param);
			async_tx_ack(&desc->txd);
			/* Move it back to the free list */
			list_move_tail(&desc->node, &mchan->free_desc);
		}
	}
}

/*
 * mdc_dma_tx_submit: Start a DMA transfer for txd descriptor
 * @txd: Descriptor for the DMA transfer
 */
static dma_cookie_t mdc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
{
	struct mdc_dma_desc *dma_desc = txd_to_mdc_desc(txd);
	struct mdc_chan *mchan = to_mdc_chan(txd->chan);
	unsigned long flags;
	dma_cookie_t cookie;

	spin_lock_irqsave(&mchan->lock, flags);

	cookie = dma_cookie_assign(&dma_desc->txd);
	dma_desc->status = DMA_IN_PROGRESS;
	/* Add descriptor to active list */
	list_add_tail(&dma_desc->node, &mchan->active_desc);

	spin_unlock_irqrestore(&mchan->lock, flags);

	dev_vdbg(txd->chan->device->dev,
		"New DMA descriptor\n"
		"Address           : 0x%p\n"
		"Cookie            : 0x%08x\n"
		"Channel           : %d\n"
		"Callback function : 0x%p\n"
		"Callback parameter: 0x%p\n",
		dma_desc, cookie, mchan->a_chan_nr,
		dma_desc->txd.callback,
		dma_desc->txd.callback_param);

	return cookie;
}
/*
 * map_to_mdc_width: Convert a dma engine width to the MDC one
 * @width: The dma_slave_buswidth value
 */
static enum img_dma_width map_to_mdc_width(enum dma_slave_buswidth width)
{
	/*
	 * mchan->dma_config.dst_addr uses enum dma_slave_buswidth
	 * Convert from dma_slave_buswidth to img_dma_width:
	 * DMA_SLAVE_BUSWIDTH_1_BYTE,
	 * DMA_SLAVE_BUSWIDTH_2_BYTES,
	 * DMA_SLAVE_BUSWIDTH_4_BYTES,
	 * DMA_SLAVE_BUSWIDTH_8_BYTES,
	 * to
	 * IMG_DMA_WIDTH_8,
	 * IMG_DMA_WIDTH_16,
	 * IMG_DMA_WIDTH_32,
	 * IMG_DMA_WIDTH_64,
	 * IMG_DMA_WIDTH_128,
	 */
	switch (width) {
	case DMA_SLAVE_BUSWIDTH_1_BYTE:
		return IMG_DMA_WIDTH_8;
	case DMA_SLAVE_BUSWIDTH_2_BYTES:
		return IMG_DMA_WIDTH_16;
	case DMA_SLAVE_BUSWIDTH_4_BYTES:
		return IMG_DMA_WIDTH_32;
	case DMA_SLAVE_BUSWIDTH_8_BYTES:
		return IMG_DMA_WIDTH_64;
	default:
		return IMG_DMA_WIDTH_128;
	}
}

/*
 * mdc_desc_init: Initialize an MDC transfer descriptor for a given channel
 * @desc: DMA descriptor
 * @mchan: DMA channel
 * @flags: Transfer flags
 *
 * Returns 0 on success or a negative number otherwise.
 */
static void mdc_desc_init(struct mdc_dma_desc *desc, struct mdc_chan *mchan,
			  unsigned long flags)
{
	dma_async_tx_descriptor_init(&desc->txd, &mchan->dchan);
	desc->txd.tx_submit = mdc_dma_tx_submit;
	desc->txd.flags = flags; /* Ignore the MDC tx flags */

	INIT_LIST_HEAD(&desc->node);
};

/*
 * mdc_prep_irq_status: Enable/Disable an IRQ for an MDC channel
 * @chan: The MDC channel
 * @flags: Transfer flags
 */
static void mdc_prep_irq_status(struct mdc_chan *chan, unsigned long flags)
{
	/* Disable/Enable (if needed) allocated IRQ for this channel */
	if (!(flags & DMA_PREP_INTERRUPT))
		chan->irq_en = 0;
	else
		chan->irq_en = 1;
}

/*
 * mdc_get_desc: Prepare a used descriptor or allocate a new one
 * @chan: The MDC channel
 * @flags: Transfer flags
 */
static struct mdc_dma_desc *mdc_dma_get_desc(struct mdc_chan *chan,
					     unsigned long flags)
{
	unsigned long irq_flags;
	struct mdc_dma_desc *desc;

	/* Find a suitable descriptor */
	spin_lock_irqsave(&chan->lock, irq_flags);
	list_for_each_entry(desc, &chan->free_desc, node) {
		if (async_tx_test_ack(&desc->txd)) {
			/* Found one. Delete it from the list */
			list_del(&desc->node);
			spin_unlock_irqrestore(&chan->lock, irq_flags);
			desc->txd.flags = flags;
			return desc;
		}
	}
	/* We couldn't find a suitable descriptor */
	spin_unlock_irqrestore(&chan->lock, irq_flags);
	desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
	if (!desc) {
		dev_err(mchan2dev(chan),
			"Failed to allocate DMA descriptor\n");
		return NULL;
	}

	mdc_desc_init(desc, chan, flags);

	return desc;
}
/*
 * alloc_thead_for_chan: Allocate a suitable thread for a given DMA channel
 * @mchan: The MDC DMA channel
 * @type: The type of thread to be allocated for this channel
 */
static void alloc_thread_for_chan(struct mdc_chan *mchan,
				  enum mdc_dma_thread_type type)
{
	struct mdc_dmadev *mdma = mchan->mdma;
	int total_threads = mdma->config.dma_threads;

	dev_vdbg(mdma->dma_slave.dev,
		 "Requested thread type %d for channel %d\n",
		 type, mchan->a_chan_nr);

	spin_lock(&mdma->lock);
	if (type == MDC_THREAD_FAST) {
		mdma->last_fthread = ++mdma->last_fthread &
			(total_threads / 2 - 1);
		mchan->thread = total_threads / 2 + mdma->last_fthread;
	} else if (type == MDC_THREAD_SLOW) {
		mdma->last_sthread = ++mdma->last_sthread &
			(total_threads / 2 - 1);
		mchan->thread = total_threads % 2 + mdma->last_sthread;
	} else {
		mdma->last_sthread = ++mdma->last_sthread &
			(total_threads / 2 - 1);
		mchan->thread = total_threads % 2 + mdma->last_sthread;
		dev_warn(mdma->dma_slave.dev,
			 "Caller did not use a valid thread_type\n"
			 "Defaulting to MDC_THREAD_SLOW\n");
	}
	spin_unlock(&mdma->lock);
}

/*
 * parse_dma_chan_flags: Configure channel based on the MDC specific xfer flags
 * @chan: The dmaengine channel
 */
static void parse_dma_chan_flags(struct dma_chan *chan)
{
	struct mdc_dma_tx_control *tx_control;
	struct mdc_chan *mchan = to_mdc_chan(chan);
	if (chan->private) {
		tx_control = (struct mdc_dma_tx_control *)chan->private;

		if (tx_control->flags & MDC_PRIORITY)
			mchan->priority = tx_control->prio;

		if (tx_control->flags & MDC_ACCESS_DELAY)
			mchan->access_delay = tx_control->access_delay;

		if (tx_control->flags & MDC_NO_CALLBACK)
			mchan->skip_callback = true;

		if (tx_control->flags & MDC_NEED_THREAD)
			alloc_thread_for_chan(mchan, tx_control->thread_type);
	}
}

/*
 * mdc_prep_memcpy: Prepare a descriptor for memory to memory transfers
 * @chan: DMA channel
 * @dst: Destination buffer
 * @src: Source buffer
 * @len: Total bytes to transfer
 * @flags: DMA xfer flags
 */
static struct dma_async_tx_descriptor *mdc_prep_memcpy(struct dma_chan *chan,
						       dma_addr_t dst,
						       dma_addr_t src,
						       size_t len,
						       unsigned long flags)
{
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct mdc_dma_desc *mdesc;
	unsigned long irq_flags;
	int width;
	u32 genconf, rpconf;

	mdc_prep_irq_status(mchan, flags);

	mdesc = mdc_dma_get_desc(mchan, flags);

	if (!mdesc)
		return NULL;

	mchan->is_list = false;
	mchan->cyclic = false;
	/* tx defaults for tx_status. single transfer */
	mdesc->irq_acked = false;
	mdesc->sample_count = 0;
	mdesc->sample_size = 1;
	mdesc->total_samples = mdesc->buffer_size = 1;

	parse_dma_chan_flags(chan);

	img_dma_reset(mchan);

	spin_lock_irqsave(&mchan->lock, irq_flags);

	genconf = MDC_RGET_GENERAL_CONFIG(
			(unsigned long)mchan->mdma->base_addr,
			mchan->a_chan_nr);

	rpconf = MDC_RGET_READ_PORT_CONFIG(
			(unsigned long)mchan->mdma->base_addr,
			mchan->a_chan_nr);

	/* Prepare src */
	width = check_widths(mchan->mdma, src);
	MDC_RSET_READ_ADDRESS((unsigned long)mchan->mdma->base_addr,
			      mchan->a_chan_nr, src);
	MDC_SET_FIELD(genconf, MDC_WIDTH_R, width);
	MDC_SET_FIELD(genconf, MDC_INC_R, 1);
	/* Prepare dst */
	width = check_widths(mchan->mdma, dst);
	MDC_RSET_WRITE_ADDRESS((unsigned long)mchan->mdma->base_addr,
			       mchan->a_chan_nr, dst);
	MDC_SET_FIELD(genconf, MDC_WIDTH_W, width);
	MDC_SET_FIELD(genconf, MDC_INC_W, 1);
	MDC_SET_FIELD(rpconf, MDC_DREQ_ENABLE, 0);

	/* Set priority */
	MDC_SET_FIELD(rpconf, MDC_PRIORITY, mchan->priority);

	MDC_RSET_GENERAL_CONFIG((unsigned long)mchan->mdma->base_addr,
				mchan->a_chan_nr, genconf);

	MDC_RSET_READ_PORT_CONFIG((unsigned long)mchan->mdma->base_addr,
				  mchan->a_chan_nr, rpconf);
	MDC_RSET_TRANSFER_SIZE((unsigned long)mchan->mdma->base_addr,
			       mchan->a_chan_nr, len - 1);
	wmb();

	spin_unlock_irqrestore(&mchan->lock, irq_flags);

	return &mdesc->txd;
}

/*
 * mdc_prep_dma_cyclic: Prepare a descriptor channel for cyclic transfer
 * @chan: DMA channel
 * @buf_addr: Source buffer
 * @buf_len: Total bytes to transfer
 * @direction: Transfer direction
 * @flags: Transfer flags
 * @context: context
 */
static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
	size_t period_len, enum dma_transfer_direction direction,
	unsigned long flags, void *context)
{
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct mdc_dma_desc *mdesc;
	struct img_dma_mdc_list *dma_desc;
	dma_addr_t list_base, next_list;
	int width;

	if (!buf_len && !period_len) {
		dev_err(mchan2dev(mchan), "Invalid buffer/period len\n");
		return NULL;
	}


	mdc_prep_irq_status(mchan, flags);

	mdesc = mdc_dma_get_desc(mchan, flags);

	if (!mdesc)
		return NULL;

	mdesc->irq_acked = false;
	mdesc->sample_count = 0;
	mdesc->total_samples = 0;
	mdesc->sample_size = period_len;
	mdesc->buffer_size = buf_len;
	mchan->is_list = true;
	mchan->cyclic = true;

	parse_dma_chan_flags(chan);

	dev_vdbg(mchan2dev(mchan), "DMA cyclic xfer setup:\n"
		 "Peripheral dev  : %d\n"
		 "DMA channel     : %d\n"
		 "Buffer size     : %zu\n"
		 "Period size     : %zu\n"
		 "Direction       : %d\n"
		 "Flags           : %lu\n"
		 "Thread          : %d\n"
		 "DMA Buffer(bus) : 0x%08llx\n",
		 mchan->periph, mchan->a_chan_nr, buf_len,
		 period_len, direction, flags, mchan->thread,
		 (u64)buf_addr);

	img_dma_reset(mchan);
	/* This is for the MDC linked-list */
	dma_desc = (struct img_dma_mdc_list *)mchan->virt_addr;
	mdesc->start_list = list_base = next_list = mchan->dma_addr;

	/* Hand back the DMA buffer to the CPU */
	dma_sync_single_for_cpu(mchan->mdma->dma_slave.dev,
				mchan->dma_addr,
				PAGE_SIZE, DMA_BIDIRECTIONAL);
	width = check_widths(mchan->mdma, buf_addr);

	do {
		next_list += sizeof(struct img_dma_mdc_list);

		dma_desc->gen_conf = 0xB00000AA /* 2 byte width */
			| ((mchan->a_chan_nr & 0x3f) << 20);

		dma_desc->readport_conf = 0x00000002;
		MDC_SET_FIELD(dma_desc->readport_conf, MDC_PRIORITY,
			      mchan->priority);

		if (direction == DMA_MEM_TO_DEV) {
			dma_desc->gen_conf |= _MDC_INC_R_MASK;
			dma_desc->read_addr = buf_addr;
			dma_desc->write_addr = mchan->dma_config.dst_addr;
			dma_desc->readport_conf |=
				(mchan->dma_config.dst_maxburst
				 & 0xFF) << 4;
		} else {
			dma_desc->gen_conf |= _MDC_INC_W_MASK;
			dma_desc->read_addr = mchan->dma_config.src_addr;
			dma_desc->write_addr = buf_addr;
			dma_desc->readport_conf |=
				(mchan->dma_config.src_maxburst
				 & 0xFF) << 4;
		}
		dma_desc->xfer_size = period_len - 1;
		dma_desc->node_addr = next_list;
		dma_desc->cmds_done = 0;
		dma_desc->ctrl_status = 0x11;

		if (period_len > buf_len)
			period_len = buf_len;

		dma_desc++;
		buf_addr += period_len;
		mdesc->total_samples++;
	} while (buf_len -= period_len);
	/* Point back to the first item so we can get an infinite loop */
	dma_desc[-1].node_addr = list_base;


	/* we are done with the DMA buffer, give it back to the device */
	dma_sync_single_for_device(mchan->mdma->dma_slave.dev,
				   mchan->dma_addr,
				   PAGE_SIZE,
				   DMA_BIDIRECTIONAL);

	return &mdesc->txd;
}

/*
 * mdc_prep_slave_sg: Prepare a descriptor for sg transfer
 * @chan: DMA channel
 * @sgl: Scattergather list to transfer
 * @sg_len: Size of the scattergather list
 * @direction: Transfer direction
 * @flags: Transfer flags
 * @context: context
 */
static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
	struct dma_chan *chan, struct scatterlist *sgl,
	unsigned int sg_len, enum dma_transfer_direction direction,
	unsigned long flags, void *context)
{
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct mdc_dmadev *mdma = mchan->mdma;
	struct mdc_dma_desc *mdesc = NULL;
	struct device *dev = chan->device->dev;
	struct scatterlist *sg;
	struct img_dma_mdc_list *desc_list;
	dma_addr_t list_base, next_list, addr;
	int i, width, temp, burst_size_min, burst_size, req_width;
	u32 genconf, rpconf;
	dma_addr_t dst, src;
	u32 len;

	if (unlikely(!sg_len || !sgl || !mchan))
		return NULL;


	mdc_prep_irq_status(mchan, flags);

	mdesc = mdc_dma_get_desc(mchan, flags);

	if (!mdesc)
		return NULL;

	mchan->is_list = (sg_len > 1);
	mchan->cyclic = false;
	mchan->sg = true;
	mdesc->irq_acked = false;
	mdesc->sample_count = 0;
	mdesc->sample_size = 1; /* single list item */
	mdesc->total_samples = mdesc->buffer_size = sg_len;

	parse_dma_chan_flags(chan);

	dev_vdbg(dev, "DMA xfer setup:\n"
		 "Peripheral dev  : %d\n"
		 "DMA channel     : %d\n"
		 "sg list         : 0x%p\n"
		 "sg list length  : %d\n"
		 "Direction       : %d\n"
		 "Priority        : %d\n"
		 "Thread          : %d\n"
		 "Access Delay    : %d\n"
		 "DMA Buffer      : 0x%p\n"
		 "DMA Buffer(bus) : 0x%08llx\n",
		 mchan->periph, mchan->a_chan_nr, sgl,
		 sg_len, direction,
		 mchan->priority, mchan->thread,
		 mchan->access_delay,
		 (u64 *)mchan->virt_addr,
		 (u64)mchan->dma_addr);

	img_dma_reset(mchan);

	burst_size_min = burst_size_lookup[mdma->config.bus_width & 0x7];

	if (!mchan->is_list) {
		len = sg_dma_len(sgl);
		addr = sg_dma_address(sgl);
		width = check_widths(mchan->mdma, addr);
		genconf = 0x30000088
			| ((mchan->a_chan_nr & 0x3f) << 20)
			| ((mchan->access_delay & 0x7) << 16);

		temp = (mchan->thread & 0xf);
		rpconf = 0x00000002 | temp << 2
			| temp << 24 | temp << 16;

		MDC_SET_FIELD(rpconf, MDC_PRIORITY,
			      mchan->priority);

		if (direction == DMA_MEM_TO_DEV) {
			MDC_SET_FIELD(genconf, MDC_INC_R, 1);
			MDC_SET_FIELD(genconf, MDC_WIDTH_R, width);
			req_width = mchan->dma_config.dst_addr_width;
			MDC_SET_FIELD(genconf, MDC_WIDTH_W,
				      map_to_mdc_width(req_width));
			src = addr;
			dst = mchan->dma_config.dst_addr;
			burst_size = mchan->dma_config.dst_maxburst;
		} else {
			MDC_SET_FIELD(genconf, MDC_INC_W, 1);
			MDC_SET_FIELD(genconf, MDC_WIDTH_W, width);
			req_width = mchan->dma_config.src_addr_width;
			MDC_SET_FIELD(genconf, MDC_WIDTH_R,
				      map_to_mdc_width(req_width));
			src = mchan->dma_config.src_addr;
			dst = addr;
			burst_size = mchan->dma_config.src_maxburst;
		}
		rpconf |= (burst_size < burst_size_min)
			? (burst_size_min - 1) << 4
			: (burst_size - 1) << 4;

		/* Write the single sg entry to hardware */
		MDC_RSET_READ_ADDRESS((unsigned long)mchan->mdma->base_addr,
				      mchan->a_chan_nr, src);
		MDC_RSET_WRITE_ADDRESS((unsigned long)mchan->mdma->base_addr,
				       mchan->a_chan_nr, dst);
		MDC_RSET_GENERAL_CONFIG((unsigned long)mchan->mdma->base_addr,
					mchan->a_chan_nr, genconf);
		MDC_RSET_READ_PORT_CONFIG((unsigned long)mchan->mdma->base_addr,
					  mchan->a_chan_nr, rpconf);
		MDC_RSET_TRANSFER_SIZE((unsigned long)mchan->mdma->base_addr,
				       mchan->a_chan_nr, len - 1);
		wmb();

	} else {
		/* This is for the MDC linked-list */
		desc_list = (struct img_dma_mdc_list *)mchan->virt_addr;
		mdesc->start_list = list_base = next_list = mchan->dma_addr;

		/* Hand back the DMA buffer to the CPU */
		dma_sync_single_for_cpu(mchan->mdma->dma_slave.dev,
					mchan->dma_addr,
					PAGE_SIZE, DMA_BIDIRECTIONAL);

		for_each_sg(sgl, sg, sg_len, i) {
			/*
			 * Each list item is a 32-byte packet represented by the
			 * img_dma_mdc_list struct. Every member of that struct
			 * corresponds to the channel register
			 */
			next_list += sizeof(struct img_dma_mdc_list);
			len = sg_dma_len(sg);
			addr = sg_dma_address(sg);
			width = check_widths(mchan->mdma, addr);
			desc_list->gen_conf = 0x30000088
				| ((mchan->a_chan_nr & 0x3f) << 20)
				| ((mchan->access_delay & 0x7) << 16);

			temp = (mchan->thread & 0xf);
			desc_list->readport_conf = 0x00000002 | temp << 2
				| temp << 24 | temp << 16;

			MDC_SET_FIELD(desc_list->readport_conf, MDC_PRIORITY,
				      mchan->priority);

			if (direction == DMA_MEM_TO_DEV) {
				MDC_SET_FIELD(desc_list->gen_conf, MDC_INC_R,
					      1);
				MDC_SET_FIELD(desc_list->gen_conf, MDC_WIDTH_R,
					      width);
				req_width = mchan->dma_config.dst_addr_width;
				MDC_SET_FIELD(desc_list->gen_conf, MDC_WIDTH_W,
					      map_to_mdc_width(req_width));
				desc_list->read_addr = addr;
				desc_list->write_addr =
						mchan->dma_config.dst_addr;
				burst_size = mchan->dma_config.dst_maxburst;
			} else {
				MDC_SET_FIELD(desc_list->gen_conf, MDC_INC_W,
					      1);
				MDC_SET_FIELD(desc_list->gen_conf, MDC_WIDTH_W,
					      width);
				req_width = mchan->dma_config.src_addr_width;
				MDC_SET_FIELD(desc_list->gen_conf, MDC_WIDTH_R,
					      map_to_mdc_width(req_width));
				desc_list->read_addr =
						mchan->dma_config.src_addr;
				desc_list->write_addr = addr;
				burst_size = mchan->dma_config.src_maxburst;
			}
			desc_list->readport_conf |=
				(burst_size < burst_size_min)
				? (burst_size_min - 1) << 4
				: (burst_size - 1) << 4;

			desc_list->xfer_size = len - 1;
			desc_list->node_addr = next_list;
			desc_list->cmds_done = 0;
			desc_list->ctrl_status = 0x11;

			desc_list++;
		}

		desc_list[-1].node_addr = 0;

		/* we are done with the DMA buffer, give it back to the device */
		dma_sync_single_for_device(mchan->mdma->dma_slave.dev,
					   mchan->dma_addr,
					   PAGE_SIZE,
					   DMA_BIDIRECTIONAL);
	}

	return &mdesc->txd;
}

/*
 * mdc_alloc_chan_resources: Allocate resources for an MDC channel
 * @chan: The MDC channel
 */
static int mdc_alloc_chan_resources(struct dma_chan *chan)
{
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct mdc_dmadev *mdma = mchan->mdma;
	struct device *dev = chan->device->dev;
	int total_threads = mdma->config.dma_threads;
	int ret;

	mchan->cyclic = false;
	mchan->sg = false;
	mchan->is_list = false;
	mchan->finished = false;
	mchan->irq_en = 1;
	mchan->priority = 0; /* Assume bulk priority */
	mchan->access_delay = 0; /* Assume fast peripheral */
	mchan->skip_callback = false;
	/* Clear private data from previous allocations */
	mchan->dchan.private = NULL;
	/* Defaults to slow threads */
	spin_lock(&mdma->lock);
	mdma->last_sthread = ++mdma->last_sthread % (total_threads / 2);
	mchan->thread = (total_threads % 2) + mdma->last_sthread;
	spin_unlock(&mdma->lock);

	BUG_ON(!mdma->callbacks->allocate);

	ret = mdma->callbacks->allocate(mchan->a_chan_nr, mchan->periph);

	if (ret < 0) {
		dev_err(dev,
			"Failed to allocate channel %d for device %u with err=%d\n",
			mchan->a_chan_nr, mchan->periph, ret);
		return ret;
	}

	mchan->alloc_status = IMG_DMA_CHANNEL_INUSE;

	dma_cookie_init(&mchan->dchan);

	dev_dbg(dev, "DMA channel %d allocated for peripheral device %u\n",
		 mchan->a_chan_nr, mchan->periph);

	/* Reset channel before we request an IRQ for it */

	img_dma_reset(mchan);

	/* Allocate an IRQ for this channel */

	mchan->irq = platform_get_irq(to_platform_device(dev),
				      mchan->a_chan_nr);

	ret = request_irq(mchan->irq, mdc_handler_isr, 0,
			  mchan->name, mchan);

	if (ret) {
		dev_err(dev,
			"Failed to allocate IRQ %d for channel %d\n",
			mchan->irq, mchan->a_chan_nr);
		return -ENXIO;
	}

	dev_dbg(dev,
		"IRQ %d (%s) allocated for channel %d\n",
		mchan->irq, mchan->name, mchan->a_chan_nr);

	/*
	 * We need to allocate a DMA buffer for the MDC linked-list
	 * operation
	 */
	mchan->virt_addr = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!mchan->virt_addr) {
		dev_err(mchan->mdma->dma_slave.dev,
			"Failed to allocate memory for channel %d\n",
			mchan->a_chan_nr);
		ret = -ENOMEM;
		goto free_irq;
	}
	/*
	 * Since we don't know the direction yet, map using
	 * DMA_BIDRECTIONAL to cover both cases
	 */
	mchan->dma_addr = dma_map_single(mchan->mdma->dma_slave.dev,
					     mchan->virt_addr,
					     PAGE_SIZE,
					     DMA_BIDIRECTIONAL);

	if (dma_mapping_error(dev, mchan->dma_addr))
		goto free_buf;

	return 0;

free_irq:
	free_irq(mchan->irq, mchan);
free_buf:
	kfree(mchan->virt_addr);

	return ret;
}

/*
 * mdc_free_chan_resources: Free resources for an MDC channel
 * @chan: The MDC DMA channel
 */
static void mdc_free_chan_resources(struct dma_chan *chan)
{
	struct mdc_chan *dchan = to_mdc_chan(chan);
	struct mdc_dmadev *mdma = dchan->mdma;
	struct device *dev = chan->device->dev;
	int ret;

	/* Stop transfers and remove descriptors */
	mdc_terminate_all(chan);

	BUG_ON(!mdma->callbacks->free);

	ret = mdma->callbacks->free(dchan->a_chan_nr);
	if (ret < 0) {
		dev_err(dev,
			"Failed to unregister channel %d for device %u\n",
			dchan->a_chan_nr, dchan->periph);
	} else {
		dchan->alloc_status = IMG_DMA_CHANNEL_AVAILABLE;
		free_irq(dchan->irq, dchan);

		dma_unmap_single(dchan->mdma->dma_slave.dev,
				 dchan->dma_addr, PAGE_SIZE,
				 DMA_BIDIRECTIONAL);
		kfree(dchan->virt_addr);

		dchan->periph = 0;

		dev_vdbg(dev,
			 "DMA channel %d for device %u deallocated\n",
			 dchan->a_chan_nr, dchan->periph);
	}
}

/*
 * slave_check_width: Check the slave bus width or default to a good one
 * @chan: The MDC DMA channel
 * @req_width: Requested width for transfer
 */
static int slave_check_width(struct mdc_chan *chan, int req_width)
{
	if (chan->mdma->config.bus_width < req_width) {
		dev_err(chan->mdma->dma_slave.dev,
			"Invalid transfer width\n"
			"System    : %d\n"
			"Requested : %d\n",
			chan->mdma->config.bus_width,
			req_width);
		return chan->mdma->config.bus_width;
	} else {
		return req_width;
	}
}

/*
 * mdc_tx_status: Get DMA status for a given cookie
 * @chan: The MDC DMA channel
 * @cookie: Transfer cookie
 * @txstate: Struct containing the cookie status
 */
static enum dma_status mdc_tx_status(struct dma_chan *chan,
			 dma_cookie_t cookie,
			 struct dma_tx_state *txstate)
{

	struct mdc_chan *mchan = to_mdc_chan(chan);
	int dma_status, ret;
	int dma_retry = 0;
	int total_xfered, residue;
	unsigned long flags;
	struct mdc_dma_desc *desc, *safe;
	struct list_head *root = &mchan->active_desc;

	ret = dma_cookie_status(chan, cookie, txstate);

	if (ret == DMA_SUCCESS) {
		dma_set_residue(txstate, 0);
		return ret;
	}

	if (!mchan->irq_en)
		mchan->finished = true;

	if (!mchan->cyclic && mchan->finished) {
		do {
			dma_status = MDC_REG_IS_BUSY((unsigned long)
						     mchan->mdma->base_addr,
						     mchan->a_chan_nr);
			if (!dma_status)
				break;
			if (++dma_retry > MAX_MDC_DMA_BUSY_RETRY)
				return DMA_IN_PROGRESS;
		} while (1);
	}

	if (mchan->finished) {
		/*
		 * For cyclic or disabled irqs, we will
		 * look in the active list
		 */
		root = (mchan->irq_en && !mchan->cyclic) ?
			&mchan->free_desc : root;
		dma_status = DMA_SUCCESS;
	} else {
		dma_status = DMA_IN_PROGRESS;
	}

	spin_lock_irqsave(&mchan->lock, flags);
	list_for_each_entry_safe(desc, safe, root, node) {
		if (desc->txd.cookie == cookie) {
			if (mchan->finished) {
				dma_set_residue(txstate, 0);
				if (!mchan->cyclic)
					dma_cookie_complete(&desc->txd);
				/*
				 * If IRQ is disabled, we need to move the
				 * descriptor to the free list and ack it now
				 */
				if (!mchan->irq_en) {
					async_tx_ack(&desc->txd);
					list_move_tail(&desc->node,
						       &mchan->free_desc);
				}
			} else {
				total_xfered = desc->sample_count *
					desc->sample_size;
				residue = desc->buffer_size - total_xfered;
				dma_set_residue(txstate, residue);
			}
		}
	}

	/* Reset status */
	mchan->finished = false;

	spin_unlock_irqrestore(&mchan->lock, flags);

	return dma_status;
}

/*
 * mdc_slave_config: Configure slave config for a DMA channel
 * @mchan: The MDC DMA channel
 * @config: The slave configuration passed by the caller
 */
static int mdc_slave_config(struct mdc_chan *mchan,
			    struct dma_slave_config *config)
{

	if (config->direction == DMA_MEM_TO_DEV) {
		config->dst_addr_width = slave_check_width(mchan,
						   config->dst_addr_width);
	} else if (config->direction == DMA_DEV_TO_MEM) {
		config->src_addr_width = slave_check_width(mchan,
						   config->src_addr_width);
	} else {
		dev_err(mchan->mdma->dma_slave.dev,
			"Unsupported slave direction\n");
		/*
		 * The caller needs to be fixed
		 */
		BUG();
		return -1;
	}

	/* Copy the rest of the slave config */
	memcpy(&mchan->dma_config, config, sizeof(*config));

	return 0;
}

/*
 * mdc_terminate_all: Stop transfers and free lists
 * @chan: The MDC DMA channel
 */
static int mdc_terminate_all(struct dma_chan *chan)
{
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct mdc_dma_desc *desc, *safe;
	unsigned long flags;

	/* Remove all descriptors */
	spin_lock_irqsave(&mchan->lock, flags);

	MDC_CANCEL((unsigned long)mchan->mdma->base_addr,
			   mchan->a_chan_nr);
	wmb();

	/* Safe removal of list items */
	list_for_each_entry_safe(desc, safe, &mchan->free_desc, node) {
		list_del(&desc->node);
		kfree(desc);
	}
	/* Safe removal of list items */
	list_for_each_entry_safe(desc, safe, &mchan->active_desc, node) {
		list_del(&desc->node);
		kfree(desc);
	}

	/* Safe removal of list items */
	list_for_each_entry_safe(desc, safe, &mchan->ready_desc, node) {
		list_del(&desc->node);
		kfree(desc);
	}

	/* Reset cookie for this channel */
	dma_cookie_init(chan);

	spin_unlock_irqrestore(&mchan->lock, flags);

	return 0;
}

/*
 * mdc_control: Control cmds for the DMA channel
 * @chan: DMA channel
 * @cmd: Command (as passed by the dmaengine infrastracture)
 * @arg: Opaque data. Can be anything depending on the cmd argument
 */
static int mdc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
		       unsigned long arg)
{
	int ret = 0;
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct dma_slave_config *config = NULL;

	switch (cmd) {
	case DMA_TERMINATE_ALL:
		return mdc_terminate_all(chan);
	case DMA_SLAVE_CONFIG:
		config = (struct dma_slave_config *)arg;
		ret = mdc_slave_config(mchan, config);
		break;
	default:
	case DMA_PAUSE:
	case DMA_RESUME:
		ret = -ENOSYS;
	}

	return ret;
}

/*
 * mdc_issue_pending: Make the actual transfer
 * @chan: The MDC DMA channel
 */
static void mdc_issue_pending(struct dma_chan *chan)
{
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct mdc_dma_desc *desc = NULL;
	unsigned long flags;

	spin_lock_irqsave(&mchan->lock, flags);

	/* Make sure the xfer list is not empty */
	if (list_empty(&mchan->active_desc)) {
		spin_unlock_irqrestore(&mchan->lock, flags);
		return;
	}

	/* Fetch first descriptor */
	desc = list_first_entry(&mchan->active_desc,
				typeof(*desc), node);

	spin_unlock_irqrestore(&mchan->lock, flags);

	/* Make the transfer */
	if (mchan->is_list) {
		MDC_RSET_LIST_NODE_ADDR((unsigned long)mchan->mdma->base_addr,
					mchan->a_chan_nr, desc->start_list);
		wmb();
		MDC_LIST_ENABLE((unsigned long)mchan->mdma->base_addr,
				mchan->a_chan_nr);
		dev_dbg(mchan->mdma->dma_slave.dev,
			"Starting list transfer for channel %d\n",
			mchan->a_chan_nr);
	} else { /* Single shot */
		MDC_REG_ENABLE((unsigned long)mchan->mdma->base_addr,
		       mchan->a_chan_nr);
		dev_dbg(mchan->mdma->dma_slave.dev,
			"Starting single transfer for channel %d\n",
			mchan->a_chan_nr);

	}
}

/*
 * mdc_dma_init: Initialize the dma_device structure.
 * @dma: The dma_device structure to initialize.
 * @dev: Device where the 'dma' structure belongs to.
 */
static void mdc_dma_init(struct mdc_dmadev *mdma, struct device *dev)
{
	mdma->dma_slave.chancnt = MAX_MDC_DMA_CHANNELS;
	mdma->dma_slave.device_prep_slave_sg = mdc_prep_slave_sg;
	mdma->dma_slave.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
	mdma->dma_slave.device_prep_dma_memcpy = mdc_prep_memcpy;
	mdma->dma_slave.device_alloc_chan_resources = mdc_alloc_chan_resources;
	mdma->dma_slave.device_free_chan_resources = mdc_free_chan_resources;
	mdma->dma_slave.device_tx_status = mdc_tx_status;
	mdma->dma_slave.device_issue_pending = mdc_issue_pending;
	mdma->dma_slave.device_control = mdc_control;
	mdma->dma_slave.dev = dev;

	INIT_LIST_HEAD(&mdma->dma_slave.channels);
}

/*
 * mdc_get_current_config: Get current DMA configuration.
 * @mdma: MDC DMA device
 */
static int __init mdc_get_current_config(struct mdc_dmadev *mdma)
{
	unsigned long mdc_base_address = (unsigned long)mdma->base_addr;
	mdma->config.dma_channels = _MDC_READ_GLOBAL_REG_FIELD(mdc_base_address,
							MDC_NUM_CONTEXTS);
	mdma->config.dma_threads = 1 << _MDC_READ_GLOBAL_REG_FIELD(
							mdc_base_address,
							MDC_THREADID_WIDTH);
	mdma->config.bus_width = _MDC_READ_GLOBAL_REG_FIELD(mdc_base_address,
							MDC_SYS_DATA_WIDTH);

	if (!(mdma->config.bus_width || mdma->config.dma_channels))
		return -1;

	mdma->last_fthread = mdma->config.dma_threads / 2;
	mdma->last_sthread = mdma->config.dma_threads % 2;

	return 0;
}

/*
 * mdc_chan_init: Initialize all DMA channels
 * @mdma: MDC DMA device
 * @mchan: Array of DMA channels for this device
 */
static void __init mdc_chan_init(struct mdc_dmadev *mdma,
				 struct mdc_chan *mchan)
{
	int i;
	for (i = 0; i < MAX_MDC_DMA_CHANNELS; i++) {
		struct mdc_chan *mdc_chan = &mchan[i];
		mdc_chan->mdma = mdma;
		mdc_chan->dchan.device = &mdma->dma_slave;
		mdc_chan->a_chan_nr = i;
		mdc_chan->periph = 0;
		spin_lock_init(&mdc_chan->lock);
		if ((i < mdma->config.dma_channels) &&
		    (mdma->callbacks->available(i)))
			mdma->slave_channel[i].alloc_status =
				IMG_DMA_CHANNEL_AVAILABLE;
		else
			mdma->slave_channel[i].alloc_status =
				IMG_DMA_CHANNEL_RESERVED;
		snprintf(mdc_chan->name, sizeof(mdc_chan->name), "mdc-chan-%d",
			mdc_chan->a_chan_nr);
		/* init tasklet for this channel */
		tasklet_init(&mdc_chan->tasklet, mdc_dma_tasklet,
			     (unsigned long)mdc_chan);

		/* init the list of descriptors for this channel */
		INIT_LIST_HEAD(&mdc_chan->active_desc);
		INIT_LIST_HEAD(&mdc_chan->free_desc);
		INIT_LIST_HEAD(&mdc_chan->ready_desc);

		/* Add channel to the DMA channel linked-list */
		list_add_tail(&mdc_chan->dchan.device_node,
			      &mdma->dma_slave.channels);
	}
}

int mdc_dma_probe(struct platform_device *pdev,
		  const struct img_mdc_soc_callbacks *callbacks)
{
	struct device *dev = &pdev->dev;
	struct mdc_dmadev *mdma;
	struct resource *mem_resource;
	int status = 0;

	/* Are we using a wrapper to initialize this driver? */
	spin_lock(&mdc_dma_lock);
	if (pdev->dev.driver != &img_mdc_dma_driver.driver)
		wrapper_driver = pdev->dev.driver;
	spin_unlock(&mdc_dma_lock);

	if (!pdev->dev.of_node)
		return -ENOENT;

	mdma = devm_kzalloc(dev, sizeof(*mdma), GFP_KERNEL);
	if (!mdma) {
		dev_err(dev, "Can't allocate controller\n");
		return -ENOMEM;
	}

	spin_lock_init(&mdma->lock);

	mem_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	mdma->base_addr = devm_request_and_ioremap(dev, mem_resource);
	if (!mdma->base_addr) {
		dev_err(dev, "unable to ioremap registers\n");
		status = -ENOMEM;
		goto out;
	}

	/*
	 * Set DMA controller capabilities.
	 * The controller can do DEV <-> MEM and MEM <-> MEM transfers.
	 */
	dma_cap_zero(mdma->dma_slave.cap_mask);
	dma_cap_set(DMA_SLAVE, mdma->dma_slave.cap_mask);
	dma_cap_set(DMA_MEMCPY, mdma->dma_slave.cap_mask);
	dma_cap_set(DMA_CYCLIC, mdma->dma_slave.cap_mask);

	/* Set callbacks */
	mdc_dma_init(mdma, dev);

	/*
	 * Set SoC callbacks. It's very unlikely
	 * for the driver to work without SoC specific
	 * alloc/free callbacks
	 */
	if (wrapper_driver)
		BUG_ON(!callbacks);

	mdma->callbacks = callbacks;

	/* Get configuration */
	if (mdc_get_current_config(mdma)) {
		status = -EINVAL;
		goto out;
	}

	/* Initialize channels */
	mdc_chan_init(mdma, mdma->slave_channel);

	/* Register the device */
	status = dma_async_device_register(&mdma->dma_slave);

	if (status)
		goto out;

	platform_set_drvdata(pdev, mdma);

	dev_dbg(dev, "MDC DMA hardware supports %d channels and %d threads\n",
		mdma->config.dma_channels,
		mdma->config.dma_threads);

	status = of_dma_controller_register(pdev->dev.of_node,
					    of_dma_mdc_xlate, mdma);

	return 0;

out:
	kfree(mdma);
	return status;
}
EXPORT_SYMBOL_GPL(mdc_dma_probe);

static int mdc_probe(struct platform_device *pdev)
{
	return mdc_dma_probe(pdev, NULL);
}

/* stop hardware and remove the driver */
static int mdc_remove(struct platform_device *pdev)
{
	platform_device_unregister(pdev);
	return 0;
}

static const struct of_device_id mdc_dma_id[] = {
		{ .compatible = "img,mdc-dma" },
		{},
};
MODULE_DEVICE_TABLE(of, mdc_dma_id);

#ifdef CONFIG_PM_SLEEP

static int img_mdc_dma_suspend(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct mdc_dmadev *mdma = platform_get_drvdata(pdev);

	if (mdma->callbacks->suspend)
		mdma->pm_data = mdma->callbacks->suspend();

	return 0;
}

static int img_mdc_dma_resume(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct mdc_dmadev *mdma = platform_get_drvdata(pdev);

	if (mdma->callbacks->resume)
		mdma->callbacks->resume(mdma->pm_data);
	return 0;
}
#else
#define img_mdc_dma_suspend NULL
#define img_mdc_dma_resume NULL
#endif /* CONFIG_PM_SLEEP */

static const struct dev_pm_ops img_mdc_dma_pm_ops = {
	.suspend_noirq = img_mdc_dma_suspend,
	.resume_noirq = img_mdc_dma_resume,
};

static struct platform_driver img_mdc_dma_driver = {
	.driver	= {
		.name	= "img-mdc-dma",
		.owner	= THIS_MODULE,
		.pm	= &img_mdc_dma_pm_ops,
		.of_match_table = mdc_dma_id,
	},
	.remove		= mdc_remove,
};

static int __init mdc_init(void)
{
	return platform_driver_probe(&img_mdc_dma_driver, mdc_probe);
}
subsys_initcall(mdc_init);


static void mdc_exit(void)
{
	platform_driver_unregister(&img_mdc_dma_driver);
}
module_exit(mdc_exit);

MODULE_ALIAS("img-mdc-dma");	/* for platform bus hotplug */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Imagination Technologies LTD.");
MODULE_DESCRIPTION("IMG - MDC DMA Controller");