const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
-const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
-const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
-
static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
finalize);
-const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
- sst.count);
-const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
- finalize);
-
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <linux/psci.h>
+#include <linux/arm-smccc.h>
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
cbz \tmp2, \targ
- ldr \tmp2, [tsk, #TI_FLAGS]
+ ldr \tmp2, [tsk, #TSK_TI_FLAGS]
tbnz \tmp2, #TIF_SSBD, \targ
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), inf->name, sig,
addr, esr);
- show_pte(tsk->mm, addr);
+ show_pte(addr);
#ifdef CONFIG_AMLOGIC_USER_FAULT
show_all_pfn(tsk, regs);
#endif /* CONFIG_AMLOGIC_USER_FAULT */
}
static ssize_t show_hotplug_max_cpus(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
unsigned int max = 0;
unsigned int c = 0;
}
static ssize_t store_hotplug_max_cpus(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
+ struct kobj_attribute *attr, const char *buf, size_t count)
{
unsigned int input;
unsigned int max;
#define __inline __inline __attribute__((unused)) notrace
#endif
-#define __inline__ inline
-#define __inline inline
+//#define __inline__ inline
+//#define __inline inline
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))
}
extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
#define get_task_comm(buf, tsk) ({ \
- BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
__get_task_comm(buf, sizeof(buf), tsk); \
})
if (pending && !ksoftirqd_running(pending))
do_softirq_own_stack();
- }
local_irq_restore(flags);
}
{
if (ksoftirqd_running(local_softirq_pending()))
return;
-#endif
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
if (rule->l3mdev && rule->table)
goto errout_free;
- if (tb[FRA_UID_RANGE]) {
- if (current_user_ns() != net->user_ns) {
- err = -EPERM;
- goto errout_free;
- }
-
- rule->uid_range = nla_get_kuid_range(tb);
-
- if (!uid_range_set(&rule->uid_range) ||
- !uid_lte(rule->uid_range.start, rule->uid_range.end))
- goto errout_free;
- } else {
- rule->uid_range = fib_kuid_range_unset;
- }
-
- if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
- rule_exists(ops, frh, tb, rule)) {
- err = -EEXIST;
if (rule_exists(ops, frh, tb, rule)) {
if (nlh->nlmsg_flags & NLM_F_EXCL)
err = -EEXIST;
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
- int oif = sk->sk_bound_dev_if;
struct dst_entry *dst;
- if (!oif && skb->dev)
- oif = l3mdev_master_ifindex(skb->dev);
-
- ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark);
+ ip6_update_pmtu(skb, sock_net(sk), mtu,
+ sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
dst = __sk_dst_get(sk);
if (!dst || !dst->obsolete ||