1  /*
2   *  linux/include/asm-arm/io.h
3   *
4   *  Copyright (C) 1996-2000 Russell King
5   *
6   * This program is free software; you can redistribute it and/or modify
7   * it under the terms of the GNU General Public License version 2 as
8   * published by the Free Software Foundation.
9   *
10   * Modifications:
11   *  16-Sep-1996	RMK	Inlined the inx/outx functions & optimised for both
12   *			constant addresses and variable addresses.
13   *  04-Dec-1997	RMK	Moved a lot of this stuff to the new architecture
14   *			specific IO header files.
15   *  27-Mar-1999	PJB	Second parameter of memcpy_toio is const..
16   *  04-Apr-1999	PJB	Added check_signature.
17   *  12-Dec-1999	RMK	More cleanups
18   *  18-Jun-2000 RMK	Removed virt_to_* and friends definitions
19   */
20  #ifndef __ASM_ARM_IO_H
21  #define __ASM_ARM_IO_H
22  
23  #ifdef __KERNEL__
24  
25  #include <linux/types.h>
26  #include <linux/kernel.h>
27  #include <asm/byteorder.h>
28  #include <asm/memory.h>
29  #include <asm/barriers.h>
30  #if 0	/* XXX###XXX */
31  #include <asm/arch/hardware.h>
32  #endif	/* XXX###XXX */
33  
sync(void)34  static inline void sync(void)
35  {
36  }
37  
38  /*
39   * Generic virtual read/write.  Note that we don't support half-word
40   * read/writes.  We define __arch_*[bl] here, and leave __arch_*w
41   * to the architecture specific code.
42   */
43  #define __arch_getb(a)			(*(volatile unsigned char *)(a))
44  #define __arch_getw(a)			(*(volatile unsigned short *)(a))
45  #define __arch_getl(a)			(*(volatile unsigned int *)(a))
46  #define __arch_getq(a)			(*(volatile unsigned long long *)(a))
47  
48  #define __arch_putb(v,a)		(*(volatile unsigned char *)(a) = (v))
49  #define __arch_putw(v,a)		(*(volatile unsigned short *)(a) = (v))
50  #define __arch_putl(v,a)		(*(volatile unsigned int *)(a) = (v))
51  #define __arch_putq(v,a)		(*(volatile unsigned long long *)(a) = (v))
52  
__raw_writesb(unsigned long addr,const void * data,int bytelen)53  static inline void __raw_writesb(unsigned long addr, const void *data,
54  				 int bytelen)
55  {
56  	uint8_t *buf = (uint8_t *)data;
57  	while(bytelen--)
58  		__arch_putb(*buf++, addr);
59  }
60  
__raw_writesw(unsigned long addr,const void * data,int wordlen)61  static inline void __raw_writesw(unsigned long addr, const void *data,
62  				 int wordlen)
63  {
64  	uint16_t *buf = (uint16_t *)data;
65  	while(wordlen--)
66  		__arch_putw(*buf++, addr);
67  }
68  
__raw_writesl(unsigned long addr,const void * data,int longlen)69  static inline void __raw_writesl(unsigned long addr, const void *data,
70  				 int longlen)
71  {
72  	uint32_t *buf = (uint32_t *)data;
73  	while(longlen--)
74  		__arch_putl(*buf++, addr);
75  }
76  
__raw_readsb(unsigned long addr,void * data,int bytelen)77  static inline void __raw_readsb(unsigned long addr, void *data, int bytelen)
78  {
79  	uint8_t *buf = (uint8_t *)data;
80  	while(bytelen--)
81  		*buf++ = __arch_getb(addr);
82  }
83  
__raw_readsw(unsigned long addr,void * data,int wordlen)84  static inline void __raw_readsw(unsigned long addr, void *data, int wordlen)
85  {
86  	uint16_t *buf = (uint16_t *)data;
87  	while(wordlen--)
88  		*buf++ = __arch_getw(addr);
89  }
90  
__raw_readsl(unsigned long addr,void * data,int longlen)91  static inline void __raw_readsl(unsigned long addr, void *data, int longlen)
92  {
93  	uint32_t *buf = (uint32_t *)data;
94  	while(longlen--)
95  		*buf++ = __arch_getl(addr);
96  }
97  
98  #define __raw_writeb(v,a)	__arch_putb(v,a)
99  #define __raw_writew(v,a)	__arch_putw(v,a)
100  #define __raw_writel(v,a)	__arch_putl(v,a)
101  #define __raw_writeq(v,a)	__arch_putq(v,a)
102  
103  #define __raw_readb(a)		__arch_getb(a)
104  #define __raw_readw(a)		__arch_getw(a)
105  #define __raw_readl(a)		__arch_getl(a)
106  #define __raw_readq(a)		__arch_getq(a)
107  
108  /*
109   * TODO: The kernel offers some more advanced versions of barriers, it might
110   * have some advantages to use them instead of the simple one here.
111   */
112  #define mb()		dsb()
113  #define rmb()		dsb()
114  #define wmb()		dsb()
115  #define __iormb()	dmb()
116  #define __iowmb()	dmb()
117  
118  #define smp_processor_id()	0
119  
120  #define writeb(v,c)	({ u8  __v = v; __iowmb(); __arch_putb(__v,c); __v; })
121  #define writew(v,c)	({ u16 __v = v; __iowmb(); __arch_putw(__v,c); __v; })
122  #define writel(v,c)	({ u32 __v = v; __iowmb(); __arch_putl(__v,c); __v; })
123  #define writeq(v,c)	({ u64 __v = v; __iowmb(); __arch_putq(__v,c); __v; })
124  
125  #define readb(c)	({ u8  __v = __arch_getb(c); __iormb(); __v; })
126  #define readw(c)	({ u16 __v = __arch_getw(c); __iormb(); __v; })
127  #define readl(c)	({ u32 __v = __arch_getl(c); __iormb(); __v; })
128  #define readq(c)	({ u64 __v = __arch_getq(c); __iormb(); __v; })
129  
130  /*
131   * Relaxed I/O memory access primitives. These follow the Device memory
132   * ordering rules but do not guarantee any ordering relative to Normal memory
133   * accesses.
134   */
135  #define readb_relaxed(c)	({ u8  __r = __raw_readb(c); __r; })
136  #define readw_relaxed(c)	({ u16 __r = le16_to_cpu((__force __le16) \
137  						__raw_readw(c)); __r; })
138  #define readl_relaxed(c)	({ u32 __r = le32_to_cpu((__force __le32) \
139  						__raw_readl(c)); __r; })
140  #define readq_relaxed(c)	({ u64 __r = le64_to_cpu((__force __le64) \
141  						__raw_readq(c)); __r; })
142  
143  #define writeb_relaxed(v, c)	((void)__raw_writeb((v), (c)))
144  #define writew_relaxed(v, c)	((void)__raw_writew((__force u16) \
145  						    cpu_to_le16(v), (c)))
146  #define writel_relaxed(v, c)	((void)__raw_writel((__force u32) \
147  						    cpu_to_le32(v), (c)))
148  #define writeq_relaxed(v, c)	((void)__raw_writeq((__force u64) \
149  						    cpu_to_le64(v), (c)))
150  
151  /*
152   * The compiler seems to be incapable of optimising constants
153   * properly.  Spell it out to the compiler in some cases.
154   * These are only valid for small values of "off" (< 1<<12)
155   */
156  #define __raw_base_writeb(val,base,off)	__arch_base_putb(val,base,off)
157  #define __raw_base_writew(val,base,off)	__arch_base_putw(val,base,off)
158  #define __raw_base_writel(val,base,off)	__arch_base_putl(val,base,off)
159  
160  #define __raw_base_readb(base,off)	__arch_base_getb(base,off)
161  #define __raw_base_readw(base,off)	__arch_base_getw(base,off)
162  #define __raw_base_readl(base,off)	__arch_base_getl(base,off)
163  
164  /*
165   * Clear and set bits in one shot. These macros can be used to clear and
166   * set multiple bits in a register using a single call. These macros can
167   * also be used to set a multiple-bit bit pattern using a mask, by
168   * specifying the mask in the 'clear' parameter and the new bit pattern
169   * in the 'set' parameter.
170   */
171  
172  #define out_arch(type,endian,a,v)	__raw_write##type(cpu_to_##endian(v),a)
173  #define in_arch(type,endian,a)		endian##_to_cpu(__raw_read##type(a))
174  
175  #define out_le64(a,v)	out_arch(q,le64,a,v)
176  #define out_le32(a,v)	out_arch(l,le32,a,v)
177  #define out_le16(a,v)	out_arch(w,le16,a,v)
178  
179  #define in_le64(a)	in_arch(q,le64,a)
180  #define in_le32(a)	in_arch(l,le32,a)
181  #define in_le16(a)	in_arch(w,le16,a)
182  
183  #define out_be64(a,v)	out_arch(l,be64,a,v)
184  #define out_be32(a,v)	out_arch(l,be32,a,v)
185  #define out_be16(a,v)	out_arch(w,be16,a,v)
186  
187  #define in_be64(a)	in_arch(l,be64,a)
188  #define in_be32(a)	in_arch(l,be32,a)
189  #define in_be16(a)	in_arch(w,be16,a)
190  
191  #define out_64(a,v)	__raw_writeq(v,a)
192  #define out_32(a,v)	__raw_writel(v,a)
193  #define out_16(a,v)	__raw_writew(v,a)
194  #define out_8(a,v)	__raw_writeb(v,a)
195  
196  #define in_64(a)	__raw_readq(a)
197  #define in_32(a)	__raw_readl(a)
198  #define in_16(a)	__raw_readw(a)
199  #define in_8(a)		__raw_readb(a)
200  
201  #define clrbits(type, addr, clear) \
202  	out_##type((addr), in_##type(addr) & ~(clear))
203  
204  #define setbits(type, addr, set) \
205  	out_##type((addr), in_##type(addr) | (set))
206  
207  #define clrsetbits(type, addr, clear, set) \
208  	out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
209  
210  #define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
211  #define setbits_be32(addr, set) setbits(be32, addr, set)
212  #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
213  
214  #define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
215  #define setbits_le32(addr, set) setbits(le32, addr, set)
216  #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
217  
218  #define clrbits_32(addr, clear) clrbits(32, addr, clear)
219  #define setbits_32(addr, set) setbits(32, addr, set)
220  #define clrsetbits_32(addr, clear, set) clrsetbits(32, addr, clear, set)
221  
222  #define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
223  #define setbits_be16(addr, set) setbits(be16, addr, set)
224  #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
225  
226  #define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
227  #define setbits_le16(addr, set) setbits(le16, addr, set)
228  #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
229  
230  #define clrbits_16(addr, clear) clrbits(16, addr, clear)
231  #define setbits_16(addr, set) setbits(16, addr, set)
232  #define clrsetbits_16(addr, clear, set) clrsetbits(16, addr, clear, set)
233  
234  #define clrbits_8(addr, clear) clrbits(8, addr, clear)
235  #define setbits_8(addr, set) setbits(8, addr, set)
236  #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
237  
238  #define clrbits_be64(addr, clear) clrbits(be64, addr, clear)
239  #define setbits_be64(addr, set) setbits(be64, addr, set)
240  #define clrsetbits_be64(addr, clear, set) clrsetbits(be64, addr, clear, set)
241  
242  #define clrbits_le64(addr, clear) clrbits(le64, addr, clear)
243  #define setbits_le64(addr, set) setbits(le64, addr, set)
244  #define clrsetbits_le64(addr, clear, set) clrsetbits(le64, addr, clear, set)
245  
246  #define clrbits_64(addr, clear) clrbits(64, addr, clear)
247  #define setbits_64(addr, set) setbits(64, addr, set)
248  #define clrsetbits_64(addr, clear, set) clrsetbits(64, addr, clear, set)
249  
250  /*
251   * Now, pick up the machine-defined IO definitions
252   */
253  #if 0	/* XXX###XXX */
254  #include <asm/arch/io.h>
255  #endif	/* XXX###XXX */
256  
257  /*
258   *  IO port access primitives
259   *  -------------------------
260   *
261   * The ARM doesn't have special IO access instructions; all IO is memory
262   * mapped.  Note that these are defined to perform little endian accesses
263   * only.  Their primary purpose is to access PCI and ISA peripherals.
264   *
265   * Note that for a big endian machine, this implies that the following
266   * big endian mode connectivity is in place, as described by numerous
267   * ARM documents:
268   *
269   *    PCI:  D0-D7   D8-D15 D16-D23 D24-D31
270   *    ARM: D24-D31 D16-D23  D8-D15  D0-D7
271   *
272   * The machine specific io.h include defines __io to translate an "IO"
273   * address to a memory address.
274   *
275   * Note that we prevent GCC re-ordering or caching values in expressions
276   * by introducing sequence points into the in*() definitions.  Note that
277   * __raw_* do not guarantee this behaviour.
278   *
279   * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
280   */
281  #ifdef __io
282  #define outb(v,p)			__raw_writeb(v,__io(p))
283  #define outw(v,p)			__raw_writew(cpu_to_le16(v),__io(p))
284  #define outl(v,p)			__raw_writel(cpu_to_le32(v),__io(p))
285  
286  #define inb(p)	({ unsigned int __v = __raw_readb(__io(p)); __v; })
287  #define inw(p)	({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; })
288  #define inl(p)	({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; })
289  
290  #define outsb(p,d,l)			__raw_writesb(__io(p),d,l)
291  #define outsw(p,d,l)			__raw_writesw(__io(p),d,l)
292  #define outsl(p,d,l)			__raw_writesl(__io(p),d,l)
293  
294  #define insb(p,d,l)			__raw_readsb(__io(p),d,l)
295  #define insw(p,d,l)			__raw_readsw(__io(p),d,l)
296  #define insl(p,d,l)			__raw_readsl(__io(p),d,l)
297  #endif
298  
299  #define outb_p(val,port)		outb((val),(port))
300  #define outw_p(val,port)		outw((val),(port))
301  #define outl_p(val,port)		outl((val),(port))
302  #define inb_p(port)			inb((port))
303  #define inw_p(port)			inw((port))
304  #define inl_p(port)			inl((port))
305  
306  #define outsb_p(port,from,len)		outsb(port,from,len)
307  #define outsw_p(port,from,len)		outsw(port,from,len)
308  #define outsl_p(port,from,len)		outsl(port,from,len)
309  #define insb_p(port,to,len)		insb(port,to,len)
310  #define insw_p(port,to,len)		insw(port,to,len)
311  #define insl_p(port,to,len)		insl(port,to,len)
312  
313  #define writesl(a, d, s)	__raw_writesl((unsigned long)a, d, s)
314  #define readsl(a, d, s)		__raw_readsl((unsigned long)a, d, s)
315  #define writesw(a, d, s)	__raw_writesw((unsigned long)a, d, s)
316  #define readsw(a, d, s)		__raw_readsw((unsigned long)a, d, s)
317  #define writesb(a, d, s)	__raw_writesb((unsigned long)a, d, s)
318  #define readsb(a, d, s)		__raw_readsb((unsigned long)a, d, s)
319  
320  /*
321   * DMA-consistent mapping functions.  These allocate/free a region of
322   * uncached, unwrite-buffered mapped memory space for use with DMA
323   * devices.  This is the "generic" version.  The PCI specific version
324   * is in pci.h
325   */
326  extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
327  extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
328  extern void consistent_sync(void *vaddr, size_t size, int rw);
329  
330  /*
331   * String version of IO memory access ops:
332   */
333  extern void _memcpy_fromio(void *, unsigned long, size_t);
334  extern void _memcpy_toio(unsigned long, const void *, size_t);
335  extern void _memset_io(unsigned long, int, size_t);
336  
337  extern void __readwrite_bug(const char *fn);
338  
339  /* Optimized copy functions to read from/write to IO sapce */
340  #ifdef CONFIG_ARM64
341  /*
342   * Copy data from IO memory space to "real" memory space.
343   */
344  static inline
__memcpy_fromio(void * to,const volatile void __iomem * from,size_t count)345  void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
346  {
347  	while (count && !IS_ALIGNED((unsigned long)from, 8)) {
348  		*(u8 *)to = __raw_readb(from);
349  		from++;
350  		to++;
351  		count--;
352  	}
353  
354  	while (count >= 8) {
355  		*(u64 *)to = __raw_readq(from);
356  		from += 8;
357  		to += 8;
358  		count -= 8;
359  	}
360  
361  	while (count) {
362  		*(u8 *)to = __raw_readb(from);
363  		from++;
364  		to++;
365  		count--;
366  	}
367  }
368  
369  /*
370   * Copy data from "real" memory space to IO memory space.
371   */
372  static inline
__memcpy_toio(volatile void __iomem * to,const void * from,size_t count)373  void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
374  {
375  	while (count && !IS_ALIGNED((unsigned long)to, 8)) {
376  		__raw_writeb(*(u8 *)from, to);
377  		from++;
378  		to++;
379  		count--;
380  	}
381  
382  	while (count >= 8) {
383  		__raw_writeq(*(u64 *)from, to);
384  		from += 8;
385  		to += 8;
386  		count -= 8;
387  	}
388  
389  	while (count) {
390  		__raw_writeb(*(u8 *)from, to);
391  		from++;
392  		to++;
393  		count--;
394  	}
395  }
396  
397  /*
398   * "memset" on IO memory space.
399   */
400  static inline
__memset_io(volatile void __iomem * dst,int c,size_t count)401  void __memset_io(volatile void __iomem *dst, int c, size_t count)
402  {
403  	u64 qc = (u8)c;
404  
405  	qc |= qc << 8;
406  	qc |= qc << 16;
407  	qc |= qc << 32;
408  
409  	while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
410  		__raw_writeb(c, dst);
411  		dst++;
412  		count--;
413  	}
414  
415  	while (count >= 8) {
416  		__raw_writeq(qc, dst);
417  		dst += 8;
418  		count -= 8;
419  	}
420  
421  	while (count) {
422  		__raw_writeb(c, dst);
423  		dst++;
424  		count--;
425  	}
426  }
427  #endif /* CONFIG_ARM64 */
428  
429  #ifdef CONFIG_ARM64
430  #define memset_io(a, b, c)		__memset_io((a), (b), (c))
431  #define memcpy_fromio(a, b, c)		__memcpy_fromio((a), (b), (c))
432  #define memcpy_toio(a, b, c)		__memcpy_toio((a), (b), (c))
433  #else
434  #define memset_io(a, b, c)		memset((void *)(a), (b), (c))
435  #define memcpy_fromio(a, b, c)		memcpy((a), (void *)(b), (c))
436  #define memcpy_toio(a, b, c)		memcpy((void *)(a), (b), (c))
437  #endif
438  
439  /*
440   * If this architecture has ISA IO, then define the isa_read/isa_write
441   * macros.
442   */
443  #ifdef __mem_isa
444  
445  #define isa_readb(addr)			__raw_readb(__mem_isa(addr))
446  #define isa_readw(addr)			__raw_readw(__mem_isa(addr))
447  #define isa_readl(addr)			__raw_readl(__mem_isa(addr))
448  #define isa_writeb(val,addr)		__raw_writeb(val,__mem_isa(addr))
449  #define isa_writew(val,addr)		__raw_writew(val,__mem_isa(addr))
450  #define isa_writel(val,addr)		__raw_writel(val,__mem_isa(addr))
451  #define isa_memset_io(a,b,c)		_memset_io(__mem_isa(a),(b),(c))
452  #define isa_memcpy_fromio(a,b,c)	_memcpy_fromio((a),__mem_isa(b),(c))
453  #define isa_memcpy_toio(a,b,c)		_memcpy_toio(__mem_isa((a)),(b),(c))
454  
455  #define isa_eth_io_copy_and_sum(a,b,c,d) \
456  				eth_copy_and_sum((a),__mem_isa(b),(c),(d))
457  
458  static inline int
isa_check_signature(unsigned long io_addr,const unsigned char * signature,int length)459  isa_check_signature(unsigned long io_addr, const unsigned char *signature,
460  		    int length)
461  {
462  	int retval = 0;
463  	do {
464  		if (isa_readb(io_addr) != *signature)
465  			goto out;
466  		io_addr++;
467  		signature++;
468  		length--;
469  	} while (length);
470  	retval = 1;
471  out:
472  	return retval;
473  }
474  
475  #else	/* __mem_isa */
476  
477  #define isa_readb(addr)			(__readwrite_bug("isa_readb"),0)
478  #define isa_readw(addr)			(__readwrite_bug("isa_readw"),0)
479  #define isa_readl(addr)			(__readwrite_bug("isa_readl"),0)
480  #define isa_writeb(val,addr)		__readwrite_bug("isa_writeb")
481  #define isa_writew(val,addr)		__readwrite_bug("isa_writew")
482  #define isa_writel(val,addr)		__readwrite_bug("isa_writel")
483  #define isa_memset_io(a,b,c)		__readwrite_bug("isa_memset_io")
484  #define isa_memcpy_fromio(a,b,c)	__readwrite_bug("isa_memcpy_fromio")
485  #define isa_memcpy_toio(a,b,c)		__readwrite_bug("isa_memcpy_toio")
486  
487  #define isa_eth_io_copy_and_sum(a,b,c,d) \
488  				__readwrite_bug("isa_eth_io_copy_and_sum")
489  
490  #define isa_check_signature(io,sig,len)	(0)
491  
492  #endif	/* __mem_isa */
493  #endif	/* __KERNEL__ */
494  
495  #include <asm-generic/io.h>
496  #include <iotrace.h>
497  
498  #endif	/* __ASM_ARM_IO_H */
499