struct sve_context {
struct _aarch64_ctx head;
__u16 vl;
- __u16 __reserved[3];
+ __u16 flags;
+ __u16 __reserved[2];
};
+#define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */
+
#endif /* !__ASSEMBLY__ */
#include <asm/sve_context.h>
* sve_context.vl must equal the thread's current vector length when
* doing a sigreturn.
*
+ * On systems with support for SME the SVE register state may reflect either
+ * streaming or non-streaming mode. In streaming mode the streaming mode
+ * vector length will be used and the flag SVE_SIG_FLAG_SM will be set in
+ * the flags field. It is permitted to enter or leave streaming mode in
+ * a signal return, applications should take care to ensure that any difference
+ * in vector length between the two modes is handled, including any resizing
+ * and movement of context blocks.
*
- * Note: for all these macros, the "vq" argument denotes the SVE
- * vector length in quadwords (i.e., units of 128 bits).
+ * Note: for all these macros, the "vq" argument denotes the vector length
+ * in quadwords (i.e., units of 128 bits).
*
* The correct way to obtain vq is to use sve_vq_from_vl(vl). The
* result is valid if and only if sve_vl_valid(vl) is true. This is
{
int err = 0;
u16 reserved[ARRAY_SIZE(ctx->__reserved)];
+ u16 flags = 0;
unsigned int vl = task_get_sve_vl(current);
unsigned int vq = 0;
- if (test_thread_flag(TIF_SVE))
+ if (thread_sm_enabled(¤t->thread)) {
+ vl = task_get_sme_vl(current);
vq = sve_vq_from_vl(vl);
+ flags |= SVE_SIG_FLAG_SM;
+ } else if (test_thread_flag(TIF_SVE)) {
+ vq = sve_vq_from_vl(vl);
+ }
memset(reserved, 0, sizeof(reserved));
__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
&ctx->head.size, err);
__put_user_error(vl, &ctx->vl, err);
+ __put_user_error(flags, &ctx->flags, err);
BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
static int restore_sve_fpsimd_context(struct user_ctxs *user)
{
int err;
- unsigned int vq;
+ unsigned int vl, vq;
struct user_fpsimd_state fpsimd;
struct sve_context sve;
if (__copy_from_user(&sve, user->sve, sizeof(sve)))
return -EFAULT;
- if (sve.vl != task_get_sve_vl(current))
+ if (sve.flags & SVE_SIG_FLAG_SM) {
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ vl = task_get_sme_vl(current);
+ } else {
+ vl = task_get_sve_vl(current);
+ }
+
+ if (sve.vl != vl)
return -EINVAL;
if (sve.head.size <= sizeof(*user->sve)) {
clear_thread_flag(TIF_SVE);
+ current->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK;
goto fpsimd_only;
}
if (err)
return -EFAULT;
- set_thread_flag(TIF_SVE);
+ if (sve.flags & SVE_SIG_FLAG_SM)
+ current->thread.svcr |= SYS_SVCR_EL0_SM_MASK;
+ else
+ set_thread_flag(TIF_SVE);
fpsimd_only:
/* copy the FP and status/control registers */
break;
case SVE_MAGIC:
- if (!system_supports_sve())
+ if (!system_supports_sve() && !system_supports_sme())
goto invalid;
if (user->sve)
if (system_supports_sve()) {
unsigned int vq = 0;
- if (add_all || test_thread_flag(TIF_SVE)) {
- int vl = sve_max_vl();
+ if (add_all || test_thread_flag(TIF_SVE) ||
+ thread_sm_enabled(¤t->thread)) {
+ int vl = max(sve_max_vl(), sme_max_vl());
if (!add_all)
- vl = task_get_sve_vl(current);
+ vl = thread_get_cur_vl(¤t->thread);
vq = sve_vq_from_vl(vl);
}
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
}
- /* Scalable Vector Extension state, if present */
- if (system_supports_sve() && err == 0 && user->sve_offset) {
+ /* Scalable Vector Extension state (including streaming), if present */
+ if ((system_supports_sve() || system_supports_sme()) &&
+ err == 0 && user->sve_offset) {
struct sve_context __user *sve_ctx =
apply_user_offset(user, user->sve_offset);
err |= preserve_sve_context(sve_ctx);