5 #include <linux/percpu_counter.h>
7 #include <linux/atomic.h>
8 #include <uapi/linux/mman.h>
10 extern int sysctl_overcommit_memory;
11 extern int sysctl_overcommit_ratio;
12 extern struct percpu_counter vm_committed_as;
14 static inline void vm_acct_memory(long pages)
16 percpu_counter_add(&vm_committed_as, pages);
19 static inline void vm_unacct_memory(long pages)
21 vm_acct_memory(-pages);
25 * Allow architectures to handle additional protection bits
28 #ifndef arch_calc_vm_prot_bits
29 #define arch_calc_vm_prot_bits(prot) 0
32 #ifndef arch_vm_get_page_prot
33 #define arch_vm_get_page_prot(vm_flags) __pgprot(0)
36 #ifndef arch_validate_prot
38 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
39 * already been masked out.
41 * Returns true if the prot flags are valid
43 static inline int arch_validate_prot(unsigned long prot)
45 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
47 #define arch_validate_prot arch_validate_prot
51 * Optimisation macro. It is equivalent to:
52 * (x & bit1) ? bit2 : 0
53 * but this version is faster.
54 * ("bit1" and "bit2" must be single bits)
56 #define _calc_vm_trans(x, bit1, bit2) \
57 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
58 : ((x) & (bit1)) / ((bit1) / (bit2)))
61 * Combine the mmap "prot" argument into "vm_flags" used internally.
63 static inline unsigned long
64 calc_vm_prot_bits(unsigned long prot)
66 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
67 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
68 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
69 arch_calc_vm_prot_bits(prot);
73 * Combine the mmap "flags" argument into "vm_flags" used internally.
75 static inline unsigned long
76 calc_vm_flag_bits(unsigned long flags)
78 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
79 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
80 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
82 #endif /* _LINUX_MMAN_H */