summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeka2023-10-16 12:45:58 +0300
committerJeka2023-10-16 12:45:58 +0300
commit2aeff2597533494445640addf822767a0e1ad62b (patch)
tree4527b1273762095e746a95b348ff49b40e31611e
parentd6c4b9298dbe30331764702bd49f6ed1c057c341 (diff)
downloadaur-2aeff2597533494445640addf822767a0e1ad62b.tar.gz
kernel release 6.5.7
-rw-r--r--0001-HID.patch1653
-rw-r--r--0001-ROG-ALLY-bmi323-device.patch2672
-rw-r--r--0002-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch89
-rw-r--r--0003-sphinx-kfigure.py-Convert-outdir-to-str-before-using.patch33
-rw-r--r--0004-ASoC-Intel-soc-acpi-fix-Dell-SKU-0B34.patch51
-rw-r--r--0005-Revert-ASoC-Intel-soc-acpi-add-tables-for-Dell-SKU-0.patch79
-rw-r--r--0005-btrfs-wait-on-uncached-block-groups-on-every-allocat.patch255
-rw-r--r--0006-btrfs-set-last-dir-index-to-the-current-last-index-w.patch86
-rw-r--r--0007-btrfs-refresh-dir-last-index-during-a-rewinddir-3-ca.patch102
-rw-r--r--0008-btrfs-fix-race-between-reading-a-directory-and-addin.patch143
-rw-r--r--0102-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch89
-rw-r--r--0103-ASoC-Intel-soc-acpi-fix-Dell-SKU-0B34.patch51
-rw-r--r--0104-btrfs-wait-on-uncached-block-groups-on-every-allocat.patch255
-rw-r--r--0105-net_wwan_t7xx_add-AP-CLDMA.patch504
-rw-r--r--PKGBUILD35
-rw-r--r--ROG-ALLY-LED-fix.patch32
-rw-r--r--ROG-ALLY-NCT6775-PLATFORM.patch12
-rw-r--r--ROG_ALLY_OLDER_BIOS_AUDIO.patch18
-rw-r--r--config10
19 files changed, 6154 insertions, 15 deletions
diff --git a/0001-HID.patch b/0001-HID.patch
new file mode 100644
index 000000000000..eab0dd5e61b8
--- /dev/null
+++ b/0001-HID.patch
@@ -0,0 +1,1653 @@
+From b899859fe49cccda9e8739d29d883dbd6dd057f3 Mon Sep 17 00:00:00 2001
+From: Vicki Pfau <vi@endrift.com>
+Date: Thu, 30 Jun 2022 18:42:10 -0700
+Subject: [PATCH 01/10] USB: gadget: f_hid: Add Get-Feature report
+
+While the HID gadget implementation has been sufficient for devices that only
+use INTERRUPT transfers, the USB HID standard includes provisions for Set- and
+Get-Feature report CONTROL transfers that go over endpoint 0. These were
+previously impossible with the existing implementation, and would either send
+an empty reply, or stall out.
+
+As the feature is a standard part of USB HID, it stands to reason that devices
+would use it, and that the HID gadget should support it. This patch adds
+support for (polled) device-to-host Get-Feature reports through a new ioctl
+interface to the hidg class dev nodes.
+
+Signed-off-by: Vicki Pfau <vi@endrift.com>
+(cherry picked from commit 8437fa3861c7198a3e286f393c8637c4fc08d2bc)
+Signed-off-by: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+---
+ drivers/usb/gadget/function/f_hid.c | 121 ++++++++++++++++++++++++++--
+ include/uapi/linux/usb/g_hid.h | 38 +++++++++
+ include/uapi/linux/usb/gadgetfs.h | 2 +-
+ 3 files changed, 154 insertions(+), 7 deletions(-)
+ create mode 100644 include/uapi/linux/usb/g_hid.h
+
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index ea85e2c701a15..6fec92b5a0bd9 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -16,6 +16,7 @@
+ #include <linux/wait.h>
+ #include <linux/sched.h>
+ #include <linux/usb/g_hid.h>
++#include <uapi/linux/usb/g_hid.h>
+
+ #include "u_f.h"
+ #include "u_hid.h"
+@@ -75,6 +76,13 @@ struct f_hidg {
+ wait_queue_head_t write_queue;
+ struct usb_request *req;
+
++ /* get report */
++ struct usb_request *get_req;
++ struct usb_hidg_report get_report;
++ spinlock_t get_spinlock;
++ bool get_pending;
++ wait_queue_head_t get_queue;
++
+ struct device dev;
+ struct cdev cdev;
+ struct usb_function func;
+@@ -523,6 +531,64 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
+ return status;
+ }
+
++
++static int f_hidg_get_report(struct file *file, struct usb_hidg_report __user *buffer)
++{
++ struct f_hidg *hidg = file->private_data;
++ struct usb_composite_dev *cdev = hidg->func.config->cdev;
++
++ int status = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&hidg->get_spinlock, flags);
++
++#define GET_REPORT_COND (!hidg->get_pending)
++
++ while (!GET_REPORT_COND) {
++ spin_unlock_irqrestore(&hidg->get_spinlock, flags);
++
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ if (wait_event_interruptible_exclusive(hidg->get_queue,
++ GET_REPORT_COND))
++ return -ERESTARTSYS;
++
++ spin_lock_irqsave(&hidg->get_spinlock, flags);
++ if (!hidg->get_pending) {
++ spin_unlock_irqrestore(&hidg->get_spinlock, flags);
++ return -EINVAL;
++ }
++ }
++
++ hidg->get_pending = true;
++ spin_unlock_irqrestore(&hidg->get_spinlock, flags);
++
++ status = copy_from_user(&hidg->get_report, buffer,
++ sizeof(struct usb_hidg_report));
++ if (status != 0) {
++ ERROR(cdev, "copy_from_user error\n");
++ status = -EINVAL;
++ }
++
++ spin_lock_irqsave(&hidg->get_spinlock, flags);
++ hidg->get_pending = false;
++ spin_unlock_irqrestore(&hidg->get_spinlock, flags);
++
++ wake_up(&hidg->get_queue);
++ return status;
++}
++
++static long f_hidg_ioctl(struct file *file, unsigned int code, unsigned long arg)
++{
++ switch (code) {
++ case GADGET_HID_WRITE_GET_REPORT:
++ return f_hidg_get_report(file, (struct usb_hidg_report __user *)arg);
++ default:
++ return -ENOTTY;
++ }
++}
++
+ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
+ {
+ struct f_hidg *hidg = file->private_data;
+@@ -548,6 +614,7 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
+ #undef WRITE_COND
+ #undef READ_COND_SSREPORT
+ #undef READ_COND_INTOUT
++#undef GET_REPORT_COND
+
+ static int f_hidg_release(struct inode *inode, struct file *fd)
+ {
+@@ -640,6 +707,10 @@ static void hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req)
+ wake_up(&hidg->read_queue);
+ }
+
++static void hidg_get_report_complete(struct usb_ep *ep, struct usb_request *req)
++{
++}
++
+ static int hidg_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+ {
+@@ -647,6 +718,8 @@ static int hidg_setup(struct usb_function *f,
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int status = 0;
++ unsigned long flags;
++ bool do_wake = false;
+ __u16 value, length;
+
+ value = __le16_to_cpu(ctrl->wValue);
+@@ -659,14 +732,29 @@ static int hidg_setup(struct usb_function *f,
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_GET_REPORT):
+- VDBG(cdev, "get_report\n");
++ VDBG(cdev, "get_report | wLength=%d\n", ctrl->wLength);
+
+- /* send an empty report */
+- length = min_t(unsigned, length, hidg->report_length);
+- memset(req->buf, 0x0, length);
++ req = hidg->get_req;
++ req->zero = 0;
++ req->length = min_t(unsigned, length, hidg->report_length);
++ status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
++ if (status < 0) {
++ ERROR(cdev, "usb_ep_queue error on get_report %d\n",
++ status);
+
+- goto respond;
+- break;
++ spin_lock_irqsave(&hidg->get_spinlock, flags);
++ if (hidg->get_pending) {
++ hidg->get_pending = false;
++ do_wake = true;
++ }
++ spin_unlock_irqrestore(&hidg->get_spinlock, flags);
++
++ if (do_wake) {
++ wake_up(&hidg->get_queue);
++ }
++ }
++
++ return status;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_GET_PROTOCOL):
+@@ -800,6 +888,14 @@ static void hidg_disable(struct usb_function *f)
+
+ hidg->req = NULL;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
++
++ spin_lock_irqsave(&hidg->get_spinlock, flags);
++ if (!hidg->get_pending) {
++ usb_ep_free_request(f->config->cdev->gadget->ep0, hidg->get_req);
++ hidg->get_pending = true;
++ }
++ hidg->get_req = NULL;
++ spin_unlock_irqrestore(&hidg->get_spinlock, flags);
+ }
+
+ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+@@ -908,6 +1004,7 @@ static const struct file_operations f_hidg_fops = {
+ .write = f_hidg_write,
+ .read = f_hidg_read,
+ .poll = f_hidg_poll,
++ .unlocked_ioctl = f_hidg_ioctl,
+ .llseek = noop_llseek,
+ };
+
+@@ -918,6 +1015,14 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ struct usb_string *us;
+ int status;
+
++ hidg->get_req = usb_ep_alloc_request(c->cdev->gadget->ep0, GFP_ATOMIC);
++ if (!hidg->get_req)
++ return -ENOMEM;
++ hidg->get_req->buf = hidg->get_report.data;
++ hidg->get_req->zero = 0;
++ hidg->get_req->complete = hidg_get_report_complete;
++ hidg->get_req->context = hidg;
++
+ /* maybe allocate device-global string IDs, and patch descriptors */
+ us = usb_gstrings_attach(c->cdev, ct_func_strings,
+ ARRAY_SIZE(ct_func_string_defs));
+@@ -1003,8 +1108,10 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ hidg->write_pending = 1;
+ hidg->req = NULL;
+ spin_lock_init(&hidg->read_spinlock);
++ spin_lock_init(&hidg->get_spinlock);
+ init_waitqueue_head(&hidg->write_queue);
+ init_waitqueue_head(&hidg->read_queue);
++ init_waitqueue_head(&hidg->get_queue);
+ INIT_LIST_HEAD(&hidg->completed_out_req);
+
+ /* create char device */
+@@ -1021,6 +1128,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ if (hidg->req != NULL)
+ free_ep_req(hidg->in_ep, hidg->req);
+
++ usb_ep_free_request(c->cdev->gadget->ep0, hidg->get_req);
++
+ return status;
+ }
+
+diff --git a/include/uapi/linux/usb/g_hid.h b/include/uapi/linux/usb/g_hid.h
+new file mode 100644
+index 0000000000000..c6068b4863543
+--- /dev/null
++++ b/include/uapi/linux/usb/g_hid.h
+@@ -0,0 +1,38 @@
++/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
++/*
++ * g_hid.h -- Header file for USB HID gadget driver
++ *
++ * Copyright (C) 2022 Valve Software
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef __UAPI_LINUX_USB_G_HID_H
++#define __UAPI_LINUX_USB_G_HID_H
++
++#include <linux/types.h>
++
++struct usb_hidg_report {
++ __u16 length;
++ __u8 data[512];
++};
++
++/* The 'g' code is also used by gadgetfs and hid gadget ioctl requests.
++ * Don't add any colliding codes to either driver, and keep
++ * them in unique ranges (size 0x20 for now).
++ */
++#define GADGET_HID_WRITE_GET_REPORT _IOW('g', 0x42, struct usb_hidg_report)
++
++#endif /* __UAPI_LINUX_USB_G_HID_H */
+diff --git a/include/uapi/linux/usb/gadgetfs.h b/include/uapi/linux/usb/gadgetfs.h
+index 835473910a498..9754822b2a409 100644
+--- a/include/uapi/linux/usb/gadgetfs.h
++++ b/include/uapi/linux/usb/gadgetfs.h
+@@ -62,7 +62,7 @@ struct usb_gadgetfs_event {
+ };
+
+
+-/* The 'g' code is also used by printer gadget ioctl requests.
++/* The 'g' code is also used by printer and hid gadget ioctl requests.
+ * Don't add any colliding codes to either driver, and keep
+ * them in unique ranges (size 0x20 for now).
+ */
+--
+2.41.0
+
+
+From 20ebaf7b44ff03078cf53e43306d6c5a3d0613e6 Mon Sep 17 00:00:00 2001
+From: Vicki Pfau <vi@endrift.com>
+Date: Thu, 30 Jun 2022 18:43:10 -0700
+Subject: [PATCH 02/10] USB: gadget: f_hid: Add Set-Feature report
+
+While the HID gadget implementation has been sufficient for devices that only
+use INTERRUPT transfers, the USB HID standard includes provisions for Set- and
+Get-Feature report CONTROL transfers that go over endpoint 0. These were
+previously impossible with the existing implementation, and would either send
+an empty reply, or stall out.
+
+As the feature is a standard part of USB HID, it stands to reason that devices
+would use it, and that the HID gadget should support it. This patch adds
+support for host-to-device Set-Feature reports through a new ioctl
+interface to the hidg class dev nodes.
+
+Signed-off-by: Vicki Pfau <vi@endrift.com>
+(cherry picked from commit 3d82be0ec3aa3b947d9c927d7b06c433de15be8b)
+Signed-off-by: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+---
+ drivers/usb/gadget/function/f_hid.c | 110 ++++++++++++++++++++++++++--
+ include/uapi/linux/usb/g_hid.h | 24 +-----
+ 2 files changed, 106 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index 6fec92b5a0bd9..172cba91aded1 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -76,6 +76,11 @@ struct f_hidg {
+ wait_queue_head_t write_queue;
+ struct usb_request *req;
+
++ /* set report */
++ struct list_head completed_set_req;
++ spinlock_t set_spinlock;
++ wait_queue_head_t set_queue;
++
+ /* get report */
+ struct usb_request *get_req;
+ struct usb_hidg_report get_report;
+@@ -531,6 +536,54 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
+ return status;
+ }
+
++static int f_hidg_set_report(struct file *file, struct usb_hidg_report __user *buffer)
++{
++ struct f_hidg *hidg = file->private_data;
++ struct f_hidg_req_list *list;
++ struct usb_request *req;
++ unsigned long flags;
++ unsigned short length;
++ int status;
++
++ spin_lock_irqsave(&hidg->set_spinlock, flags);
++
++#define SET_REPORT_COND (!list_empty(&hidg->completed_set_req))
++
++ /* wait for at least one buffer to complete */
++ while (!SET_REPORT_COND) {
++ spin_unlock_irqrestore(&hidg->set_spinlock, flags);
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ if (wait_event_interruptible(hidg->set_queue, SET_REPORT_COND))
++ return -ERESTARTSYS;
++
++ spin_lock_irqsave(&hidg->set_spinlock, flags);
++ }
++
++ /* pick the first one */
++ list = list_first_entry(&hidg->completed_set_req,
++ struct f_hidg_req_list, list);
++
++ /*
++ * Remove this from list to protect it from being free()
++ * while host disables our function
++ */
++ list_del(&list->list);
++
++ req = list->req;
++ spin_unlock_irqrestore(&hidg->set_spinlock, flags);
++
++ /* copy to user outside spinlock */
++ length = min_t(unsigned short, sizeof(buffer->data), req->actual);
++ status = copy_to_user(&buffer->length, &length, sizeof(buffer->length));
++ if (!status) {
++ status = copy_to_user(&buffer->data, req->buf, length);
++ }
++ kfree(list);
++ free_ep_req(hidg->func.config->cdev->gadget->ep0, req);
++ return status;
++}
+
+ static int f_hidg_get_report(struct file *file, struct usb_hidg_report __user *buffer)
+ {
+@@ -582,6 +635,8 @@ static int f_hidg_get_report(struct file *file, struct usb_hidg_report __user *b
+ static long f_hidg_ioctl(struct file *file, unsigned int code, unsigned long arg)
+ {
+ switch (code) {
++ case GADGET_HID_READ_SET_REPORT:
++ return f_hidg_set_report(file, (struct usb_hidg_report __user *)arg);
+ case GADGET_HID_WRITE_GET_REPORT:
+ return f_hidg_get_report(file, (struct usb_hidg_report __user *)arg);
+ default:
+@@ -596,6 +651,7 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
+
+ poll_wait(file, &hidg->read_queue, wait);
+ poll_wait(file, &hidg->write_queue, wait);
++ poll_wait(file, &hidg->set_queue, wait);
+
+ if (WRITE_COND)
+ ret |= EPOLLOUT | EPOLLWRNORM;
+@@ -608,12 +664,16 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
+ ret |= EPOLLIN | EPOLLRDNORM;
+ }
+
++ if (SET_REPORT_COND)
++ ret |= EPOLLPRI;
++
+ return ret;
+ }
+
+ #undef WRITE_COND
+ #undef READ_COND_SSREPORT
+ #undef READ_COND_INTOUT
++#undef SET_REPORT_COND
+ #undef GET_REPORT_COND
+
+ static int f_hidg_release(struct inode *inode, struct file *fd)
+@@ -658,11 +718,19 @@ static void hidg_intout_complete(struct usb_ep *ep, struct usb_request *req)
+
+ req_list->req = req;
+
+- spin_lock_irqsave(&hidg->read_spinlock, flags);
+- list_add_tail(&req_list->list, &hidg->completed_out_req);
+- spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++ if (ep == cdev->gadget->ep0) {
++ spin_lock_irqsave(&hidg->set_spinlock, flags);
++ list_add_tail(&req_list->list, &hidg->completed_set_req);
++ spin_unlock_irqrestore(&hidg->set_spinlock, flags);
+
+- wake_up(&hidg->read_queue);
++ wake_up(&hidg->set_queue);
++ } else {
++ spin_lock_irqsave(&hidg->read_spinlock, flags);
++ list_add_tail(&req_list->list, &hidg->completed_out_req);
++ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++
++ wake_up(&hidg->read_queue);
++ }
+ break;
+ default:
+ ERROR(cdev, "Set report failed %d\n", req->status);
+@@ -775,12 +843,27 @@ static int hidg_setup(struct usb_function *f,
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_SET_REPORT):
+ VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
+- if (hidg->use_out_ep)
++ if (!hidg->use_out_ep) {
++ req->complete = hidg_ssreport_complete;
++ req->context = hidg;
++ goto respond;
++ }
++ if (!length)
+ goto stall;
+- req->complete = hidg_ssreport_complete;
++ req = alloc_ep_req(cdev->gadget->ep0, GFP_ATOMIC);
++ if (!req)
++ return -ENOMEM;
++ req->complete = hidg_intout_complete;
+ req->context = hidg;
+- goto respond;
+- break;
++ req->zero = 0;
++ req->length = length;
++ status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
++ if (status < 0) {
++ ERROR(cdev, "usb_ep_queue error on set_report %d\n", status);
++ free_ep_req(cdev->gadget->ep0, req);
++ }
++
++ return status;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_SET_PROTOCOL):
+@@ -880,6 +963,14 @@ static void hidg_disable(struct usb_function *f)
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+ }
+
++ spin_lock_irqsave(&hidg->set_spinlock, flags);
++ list_for_each_entry_safe(list, next, &hidg->completed_set_req, list) {
++ free_ep_req(f->config->cdev->gadget->ep0, list->req);
++ list_del(&list->list);
++ kfree(list);
++ }
++ spin_unlock_irqrestore(&hidg->set_spinlock, flags);
++
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ if (!hidg->write_pending) {
+ free_ep_req(hidg->in_ep, hidg->req);
+@@ -1108,11 +1199,14 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ hidg->write_pending = 1;
+ hidg->req = NULL;
+ spin_lock_init(&hidg->read_spinlock);
++ spin_lock_init(&hidg->set_spinlock);
+ spin_lock_init(&hidg->get_spinlock);
+ init_waitqueue_head(&hidg->write_queue);
+ init_waitqueue_head(&hidg->read_queue);
++ init_waitqueue_head(&hidg->set_queue);
+ init_waitqueue_head(&hidg->get_queue);
+ INIT_LIST_HEAD(&hidg->completed_out_req);
++ INIT_LIST_HEAD(&hidg->completed_set_req);
+
+ /* create char device */
+ cdev_init(&hidg->cdev, &f_hidg_fops);
+diff --git a/include/uapi/linux/usb/g_hid.h b/include/uapi/linux/usb/g_hid.h
+index c6068b4863543..54814c2c68d60 100644
+--- a/include/uapi/linux/usb/g_hid.h
++++ b/include/uapi/linux/usb/g_hid.h
+@@ -1,38 +1,22 @@
+ /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+-/*
+- * g_hid.h -- Header file for USB HID gadget driver
+- *
+- * Copyright (C) 2022 Valve Software
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+
+ #ifndef __UAPI_LINUX_USB_G_HID_H
+ #define __UAPI_LINUX_USB_G_HID_H
+
+ #include <linux/types.h>
+
++#define HIDG_REPORT_SIZE_MAX 64
++
+ struct usb_hidg_report {
+ __u16 length;
+- __u8 data[512];
++ __u8 data[HIDG_REPORT_SIZE_MAX];
+ };
+
+ /* The 'g' code is also used by gadgetfs and hid gadget ioctl requests.
+ * Don't add any colliding codes to either driver, and keep
+ * them in unique ranges (size 0x20 for now).
+ */
++#define GADGET_HID_READ_SET_REPORT _IOR('g', 0x41, struct usb_hidg_report)
+ #define GADGET_HID_WRITE_GET_REPORT _IOW('g', 0x42, struct usb_hidg_report)
+
+ #endif /* __UAPI_LINUX_USB_G_HID_H */
+--
+2.41.0
+
+
+From 146e98d6f595e3a4e6c09a00ee9ec2d48a6703cb Mon Sep 17 00:00:00 2001
+From: Vicki Pfau <vi@endrift.com>
+Date: Tue, 29 Nov 2022 18:32:58 -0800
+Subject: [PATCH 03/10] HID: hid-steam: Update list of identifiers from SDL
+
+SDL includes a list of settings (registers), reports (cmds), and various other
+identifiers that were provided by Valve. This commit imports a significant
+chunk of that list as well as updating the guessed names and replacing a
+handful of magic constants. It also replaces bitmask definitions that used hex
+with the BIT macro.
+
+Signed-off-by: Vicki Pfau <vi@endrift.com>
+---
+ drivers/hid/hid-steam.c | 156 +++++++++++++++++++++++++++++++---------
+ 1 file changed, 121 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index b110818fc9458..39a9bf3b7f77d 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -71,7 +71,7 @@ static LIST_HEAD(steam_devices);
+
+ /*
+ * Commands that can be sent in a feature report.
+- * Thanks to Valve for some valuable hints.
++ * Thanks to Valve and SDL for some valuable hints.
+ */
+ #define STEAM_CMD_SET_MAPPINGS 0x80
+ #define STEAM_CMD_CLEAR_MAPPINGS 0x81
+@@ -80,27 +80,98 @@ static LIST_HEAD(steam_devices);
+ #define STEAM_CMD_GET_ATTRIB_LABEL 0x84
+ #define STEAM_CMD_DEFAULT_MAPPINGS 0x85
+ #define STEAM_CMD_FACTORY_RESET 0x86
+-#define STEAM_CMD_WRITE_REGISTER 0x87
++#define STEAM_CMD_SET_REGISTER 0x87
+ #define STEAM_CMD_CLEAR_REGISTER 0x88
+-#define STEAM_CMD_READ_REGISTER 0x89
++#define STEAM_CMD_GET_REGISTER 0x89
+ #define STEAM_CMD_GET_REGISTER_LABEL 0x8a
+ #define STEAM_CMD_GET_REGISTER_MAX 0x8b
+ #define STEAM_CMD_GET_REGISTER_DEFAULT 0x8c
+ #define STEAM_CMD_SET_MODE 0x8d
+-#define STEAM_CMD_DEFAULT_MOUSE 0x8e
+-#define STEAM_CMD_FORCEFEEDBAK 0x8f
+-#define STEAM_CMD_REQUEST_COMM_STATUS 0xb4
+-#define STEAM_CMD_GET_SERIAL 0xae
++#define STEAM_CMD_DEFAULT_REGISTER 0x8e
++#define STEAM_CMD_HAPTIC_PULSE 0x8f
++#define STEAM_CMD_TURN_OFF_CONTROLLER 0x9f
++#define STEAM_CMD_GET_DEVICE_IFNO 0xa1
++#define STEAM_CMD_CALIBRATE_TRACKPADS 0xa7
++#define STEAM_CMD_SET_SERIAL 0xa9
++#define STEAM_CMD_GET_TRACKPAD_CALIB 0xaa
++#define STEAM_CMD_GET_TRACKPAD_FACTORY_CALIB 0xab
++#define STEAM_CMD_GET_TRACKPAD_RAW_DATA 0xac
++#define STEAM_CMD_ENABLE_PAIRING 0xad
++#define STEAM_CMD_GET_STRING_ATTRIB 0xae
++#define STEAM_CMD_RADIO_ERASE_RECORDS 0xaf
++#define STEAM_CMD_RADIO_WRITE_RECORD 0xb0
++#define STEAM_CMD_SET_DONGLE_SETTING 0xb1
++#define STEAM_CMD_DONGLE_DISCONNECT_DEV 0xb2
++#define STEAM_CMD_DONGLE_COMMIT_DEV 0xb3
++#define STEAM_CMD_DONGLE_GET_STATE 0xb4
++#define STEAM_CMD_CALIBRATE_GYRO 0xb5
++#define STEAM_CMD_PLAY_AUDIO 0xb6
++#define STEAM_CMD_AUDIO_UPDATE_START 0xb7
++#define STEAM_CMD_AUDIO_UPDATE_DATA 0xb8
++#define STEAM_CMD_AUDIO_UPDATE_COMPLETE 0xb9
++#define STEAM_CMD_GET_CHIPID 0xba
++#define STEAM_CMD_CALIBRATE_JOYSTICK 0xbf
++#define STEAM_CMD_CALIBRATE_TRIGGERS 0xc0
++#define STEAM_CMD_SET_AUDIO_MAPPING 0xc1
++#define STEAM_CMD_CHECK_GYRO_FW_LOAD 0xc2
++#define STEAM_CMD_CALIBRATE_ANALOG 0xc3
++#define STEAM_CMD_DONGLE_GET_CONN_SLOTS 0xc4
++#define STEAM_CMD_HAPTIC_CMD 0xea
+ #define STEAM_CMD_HAPTIC_RUMBLE 0xeb
+
+ /* Some useful register ids */
+-#define STEAM_REG_LPAD_MODE 0x07
+-#define STEAM_REG_RPAD_MODE 0x08
+-#define STEAM_REG_RPAD_MARGIN 0x18
+-#define STEAM_REG_LED 0x2d
+-#define STEAM_REG_GYRO_MODE 0x30
+-#define STEAM_REG_LPAD_CLICK_PRESSURE 0x34
+-#define STEAM_REG_RPAD_CLICK_PRESSURE 0x35
++#define STEAM_REG_MOUSE_SENSITIVITY 0x00
++#define STEAM_REG_MOUSE_ACCELERATION 0x01
++#define STEAM_REG_TRACKBALL_ROTATION_ANGLE 0x02
++#define STEAM_REG_HAPTIC_INTENSITY 0x03
++#define STEAM_REG_LEFT_GAMEPAD_STICK_ENABLED 0x04
++#define STEAM_REG_RIGHT_GAMEPAD_STICK_ENABLED 0x05
++#define STEAM_REG_USB_DEBUG_MODE 0x06
++#define STEAM_REG_LEFT_TRACKPAD_MODE 0x07
++#define STEAM_REG_RIGHT_TRACKPAD_MODE 0x08
++#define STEAM_REG_MOUSE_POINTER_ENABLED 0x09
++#define STEAM_REG_DPAD_DEADZONE 0x0a
++#define STEAM_REG_MINIMUM_MOMENTUM_VEL 0x0b
++#define STEAM_REG_MOMENTUM_DECAY_AMOUNT 0x0c
++#define STEAM_REG_PAD_REL_MODE_TICKS_PER_PIXEL 0x0d
++#define STEAM_REG_HAPTIC_INCREMENT 0x0e
++#define STEAM_REG_DPAD_ANGLE_SIN 0x0f
++#define STEAM_REG_DPAD_ANGLE_COS 0x10
++#define STEAM_REG_MOMENTUM_VERTICAL_DIVISOR 0x11
++#define STEAM_REG_MOMENTUM_MAXIMUM_VELOCITY 0x12
++#define STEAM_REG_TRACKPAD_Z_ON 0x13
++#define STEAM_REG_TRACKPAD_Z_OFF 0x14
++#define STEAM_REG_SENSITIVY_SCALE_AMOUNT 0x15
++#define STEAM_REG_LEFT_TRACKPAD_SECONDARY_MODE 0x16
++#define STEAM_REG_RIGHT_TRACKPAD_SECONDARY_MODE 0x17
++#define STEAM_REG_SMOOTH_ABSOLUTE_MOUSE 0x18
++#define STEAM_REG_STEAMBUTTON_POWEROFF_TIME 0x19
++#define STEAM_REG_TRACKPAD_OUTER_RADIUS 0x1b
++#define STEAM_REG_TRACKPAD_Z_ON_LEFT 0x1c
++#define STEAM_REG_TRACKPAD_Z_OFF_LEFT 0x1d
++#define STEAM_REG_TRACKPAD_OUTER_SPIN_VEL 0x1e
++#define STEAM_REG_TRACKPAD_OUTER_SPIN_RADIUS 0x1f
++#define STEAM_REG_TRACKPAD_OUTER_SPIN_HORIZONTAL_ONLY 0x20
++#define STEAM_REG_TRACKPAD_RELATIVE_MODE_DEADZONE 0x21
++#define STEAM_REG_TRACKPAD_RELATIVE_MODE_MAX_VEL 0x22
++#define STEAM_REG_TRACKPAD_RELATIVE_MODE_INVERT_Y 0x23
++#define STEAM_REG_TRACKPAD_DOUBLE_TAP_BEEP_ENABLED 0x24
++#define STEAM_REG_TRACKPAD_DOUBLE_TAP_BEEP_PERIOD 0x25
++#define STEAM_REG_TRACKPAD_DOUBLE_TAP_BEEP_COUNT 0x26
++#define STEAM_REG_TRACKPAD_OUTER_RADIUS_RELEASE_ON_TRANSITION 0x27
++#define STEAM_REG_RADIAL_MODE_ANGLE 0x28
++#define STEAM_REG_HAPTIC_INTENSITY_MOUSE_MODE 0x29
++#define STEAM_REG_LEFT_DPAD_REQUIRES_CLICK 0x2a
++#define STEAM_REG_RIGHT_DPAD_REQUIRES_CLICK 0x2b
++#define STEAM_REG_LED_BASELINE_BRIGHTNESS 0x2c
++#define STEAM_REG_LED_USER_BRIGHTNESS 0x2d
++#define STEAM_REG_ENABLE_RAW_JOYSTICK 0x2e
++#define STEAM_REG_ENABLE_FAST_SCAN 0x2f
++#define STEAM_REG_GYRO_MODE 0x30
++#define STEAM_REG_WIRELESS_PACKET_VERSION 0x31
++#define STEAM_REG_SLEEP_INACTIVITY_TIMEOUT 0x32
++#define STEAM_REG_LEFT_TRACKPAD_CLICK_PRESSURE 0x34
++#define STEAM_REG_RIGHT_TRACKPAD_CLICK_PRESSURE 0x35
+
+ /* Raw event identifiers */
+ #define STEAM_EV_INPUT_DATA 0x01
+@@ -108,13 +179,28 @@ static LIST_HEAD(steam_devices);
+ #define STEAM_EV_BATTERY 0x04
+ #define STEAM_EV_DECK_INPUT_DATA 0x09
+
++/* String attribute idenitifiers */
++#define STEAM_ATTRIB_STR_BOARD_SERIAL 0x00
++#define STEAM_ATTRIB_STR_UNIT_SERIAL 0x01
++
+ /* Values for GYRO_MODE (bitmask) */
+-#define STEAM_GYRO_MODE_OFF 0x0000
+-#define STEAM_GYRO_MODE_STEERING 0x0001
+-#define STEAM_GYRO_MODE_TILT 0x0002
+-#define STEAM_GYRO_MODE_SEND_ORIENTATION 0x0004
+-#define STEAM_GYRO_MODE_SEND_RAW_ACCEL 0x0008
+-#define STEAM_GYRO_MODE_SEND_RAW_GYRO 0x0010
++#define STEAM_GYRO_MODE_OFF 0
++#define STEAM_GYRO_MODE_STEERING BIT(0)
++#define STEAM_GYRO_MODE_TILT BIT(1)
++#define STEAM_GYRO_MODE_SEND_ORIENTATION BIT(2)
++#define STEAM_GYRO_MODE_SEND_RAW_ACCEL BIT(3)
++#define STEAM_GYRO_MODE_SEND_RAW_GYRO BIT(4)
++
++/* Trackpad modes */
++#define STEAM_TRACKPAD_ABSOLUTE_MOUSE 0x00
++#define STEAM_TRACKPAD_RELATIVE_MOUSE 0x01
++#define STEAM_TRACKPAD_DPAD_FOUR_WAY_DISCRETE 0x02
++#define STEAM_TRACKPAD_DPAD_FOUR_WAY_OVERLAP 0x03
++#define STEAM_TRACKPAD_DPAD_EIGHT_WAY 0x04
++#define STEAM_TRACKPAD_RADIAL_MODE 0x05
++#define STEAM_TRACKPAD_ABSOLUTE_DPAD 0x06
++#define STEAM_TRACKPAD_NONE 0x07
++#define STEAM_TRACKPAD_GESTURE_KEYBOARD 0x08
+
+ /* Other random constants */
+ #define STEAM_SERIAL_LEN 10
+@@ -232,7 +318,7 @@ static int steam_write_registers(struct steam_device *steam,
+ /* Send: 0x87 len (reg valLo valHi)* */
+ u8 reg;
+ u16 val;
+- u8 cmd[64] = {STEAM_CMD_WRITE_REGISTER, 0x00};
++ u8 cmd[64] = {STEAM_CMD_SET_REGISTER, 0x00};
+ int ret;
+ va_list args;
+
+@@ -268,7 +354,7 @@ static int steam_get_serial(struct steam_device *steam)
+ * Recv: 0xae 0x15 0x01 serialnumber (10 chars)
+ */
+ int ret;
+- u8 cmd[] = {STEAM_CMD_GET_SERIAL, 0x15, 0x01};
++ u8 cmd[] = {STEAM_CMD_GET_STRING_ATTRIB, 0x15, STEAM_ATTRIB_STR_UNIT_SERIAL};
+ u8 reply[3 + STEAM_SERIAL_LEN + 1];
+
+ ret = steam_send_report(steam, cmd, sizeof(cmd));
+@@ -277,7 +363,7 @@ static int steam_get_serial(struct steam_device *steam)
+ ret = steam_recv_report(steam, reply, sizeof(reply));
+ if (ret < 0)
+ return ret;
+- if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != 0x01)
++ if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != STEAM_ATTRIB_STR_UNIT_SERIAL)
+ return -EIO;
+ reply[3 + STEAM_SERIAL_LEN] = 0;
+ strscpy(steam->serial_no, reply + 3, sizeof(steam->serial_no));
+@@ -291,7 +377,7 @@ static int steam_get_serial(struct steam_device *steam)
+ */
+ static inline int steam_request_conn_status(struct steam_device *steam)
+ {
+- return steam_send_report_byte(steam, STEAM_CMD_REQUEST_COMM_STATUS);
++ return steam_send_report_byte(steam, STEAM_CMD_DONGLE_GET_STATE);
+ }
+
+ static inline int steam_haptic_rumble(struct steam_device *steam,
+@@ -339,9 +425,9 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
+ /* enable esc, enter, cursors */
+ steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MAPPINGS);
+ /* enable mouse */
+- steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MOUSE);
++ steam_send_report_byte(steam, STEAM_CMD_DEFAULT_REGISTER);
+ steam_write_registers(steam,
+- STEAM_REG_RPAD_MARGIN, 0x01, /* enable margin */
++ STEAM_REG_SMOOTH_ABSOLUTE_MOUSE, 0x01, /* enable smooth */
+ 0);
+
+ cancel_delayed_work_sync(&steam->heartbeat);
+@@ -351,11 +437,11 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
+
+ if (steam->quirks & STEAM_QUIRK_DECK) {
+ steam_write_registers(steam,
+- STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
+- STEAM_REG_LPAD_MODE, 0x07, /* disable mouse */
+- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
+- STEAM_REG_LPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
+- STEAM_REG_RPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
++ STEAM_REG_SMOOTH_ABSOLUTE_MOUSE, 0x00, /* disable smooth */
++ STEAM_REG_LEFT_TRACKPAD_MODE, STEAM_TRACKPAD_NONE, /* disable mouse */
++ STEAM_REG_RIGHT_TRACKPAD_MODE, STEAM_TRACKPAD_NONE, /* disable mouse */
++ STEAM_REG_LEFT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
++ STEAM_REG_RIGHT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
+ 0);
+ /*
+ * The Steam Deck has a watchdog that automatically enables
+@@ -365,9 +451,9 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
+ schedule_delayed_work(&steam->heartbeat, 5 * HZ);
+ } else {
+ steam_write_registers(steam,
+- STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
+- STEAM_REG_LPAD_MODE, 0x07, /* disable mouse */
+- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
++ STEAM_REG_SMOOTH_ABSOLUTE_MOUSE, 0x00, /* disable smooth */
++ STEAM_REG_LEFT_TRACKPAD_MODE, STEAM_TRACKPAD_NONE, /* disable mouse */
++ STEAM_REG_RIGHT_TRACKPAD_MODE, STEAM_TRACKPAD_NONE, /* disable mouse */
+ 0);
+ }
+ }
+@@ -747,7 +833,7 @@ static void steam_lizard_mode_heartbeat(struct work_struct *work)
+ if (!steam->client_opened && steam->client_hdev) {
+ steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
+ steam_write_registers(steam,
+- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
++ STEAM_REG_RIGHT_TRACKPAD_MODE, STEAM_TRACKPAD_NONE, /* disable mouse */
+ 0);
+ schedule_delayed_work(&steam->heartbeat, 5 * HZ);
+ }
+--
+2.41.0
+
+
+From 4b1dd1ebfd2d3f123212e1296d304909e5b3a406 Mon Sep 17 00:00:00 2001
+From: Vicki Pfau <vi@endrift.com>
+Date: Wed, 16 Nov 2022 19:54:26 -0800
+Subject: [PATCH 04/10] HID: hid-steam: Add gamepad-only mode switched to by
+ holding options
+
+Signed-off-by: Vicki Pfau <vi@endrift.com>
+---
+ drivers/hid/hid-steam.c | 72 +++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 72 insertions(+)
+
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index 39a9bf3b7f77d..0620046b142ef 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -202,6 +202,11 @@ static LIST_HEAD(steam_devices);
+ #define STEAM_TRACKPAD_NONE 0x07
+ #define STEAM_TRACKPAD_GESTURE_KEYBOARD 0x08
+
++/* Pad identifiers for the deck */
++#define STEAM_PAD_LEFT 0
++#define STEAM_PAD_RIGHT 1
++#define STEAM_PAD_BOTH 2
++
+ /* Other random constants */
+ #define STEAM_SERIAL_LEN 10
+
+@@ -221,6 +226,9 @@ struct steam_device {
+ u8 battery_charge;
+ u16 voltage;
+ struct delayed_work heartbeat;
++ struct delayed_work mode_switch;
++ bool did_mode_switch;
++ bool gamepad_mode;
+ struct work_struct rumble_work;
+ u16 rumble_left;
+ u16 rumble_right;
+@@ -380,6 +388,33 @@ static inline int steam_request_conn_status(struct steam_device *steam)
+ return steam_send_report_byte(steam, STEAM_CMD_DONGLE_GET_STATE);
+ }
+
++/*
++ * Send a haptic pulse to the trackpads
++ * Duration and interval are measured in microseconds, count is the number
++ * of pulses to send for duration time with interval microseconds between them
++ * and gain is measured in decibels, ranging from -24 to +6
++ */
++static inline int steam_haptic_pulse(struct steam_device *steam, u8 pad,
++ u16 duration, u16 interval, u16 count, u8 gain)
++{
++ u8 report[10] = {STEAM_CMD_HAPTIC_PULSE, 8};
++
++ /* Left and right are swapped on this report for legacy reasons */
++ if (pad < STEAM_PAD_BOTH)
++ pad ^= 1;
++
++ report[2] = pad;
++ report[3] = duration & 0xFF;
++ report[4] = duration >> 8;
++ report[5] = interval & 0xFF;
++ report[6] = interval >> 8;
++ report[7] = count & 0xFF;
++ report[8] = count >> 8;
++ report[9] = gain;
++
++ return steam_send_report(steam, report, sizeof(report));
++}
++
+ static inline int steam_haptic_rumble(struct steam_device *steam,
+ u16 intensity, u16 left_speed, u16 right_speed,
+ u8 left_gain, u8 right_gain)
+@@ -421,6 +456,9 @@ static int steam_play_effect(struct input_dev *dev, void *data,
+
+ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
+ {
++ if (steam->gamepad_mode)
++ enable = false;
++
+ if (enable) {
+ /* enable esc, enter, cursors */
+ steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MAPPINGS);
+@@ -805,6 +843,29 @@ static void steam_work_connect_cb(struct work_struct *work)
+ }
+ }
+
++static void steam_mode_switch_cb(struct work_struct *work)
++{
++ struct steam_device *steam = container_of(to_delayed_work(work),
++ struct steam_device, mode_switch);
++ steam->gamepad_mode = !steam->gamepad_mode;
++ if (!lizard_mode)
++ return;
++
++ mutex_lock(&steam->mutex);
++ if (steam->gamepad_mode)
++ steam_set_lizard_mode(steam, false);
++ else if (!steam->client_opened)
++ steam_set_lizard_mode(steam, lizard_mode);
++ mutex_unlock(&steam->mutex);
++
++ steam_haptic_pulse(steam, STEAM_PAD_RIGHT, 0x190, 0, 1, 0);
++ if (steam->gamepad_mode) {
++ steam_haptic_pulse(steam, STEAM_PAD_LEFT, 0x14D, 0x14D, 0x2D, 0);
++ } else {
++ steam_haptic_pulse(steam, STEAM_PAD_LEFT, 0x1F4, 0x1F4, 0x1E, 0);
++ }
++}
++
+ static bool steam_is_valve_interface(struct hid_device *hdev)
+ {
+ struct hid_report_enum *rep_enum;
+@@ -977,6 +1038,7 @@ static int steam_probe(struct hid_device *hdev,
+ mutex_init(&steam->mutex);
+ steam->quirks = id->driver_data;
+ INIT_WORK(&steam->work_connect, steam_work_connect_cb);
++ INIT_DELAYED_WORK(&steam->mode_switch, steam_mode_switch_cb);
+ INIT_LIST_HEAD(&steam->list);
+ INIT_DEFERRABLE_WORK(&steam->heartbeat, steam_lizard_mode_heartbeat);
+ INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
+@@ -1036,6 +1098,7 @@ static int steam_probe(struct hid_device *hdev,
+ client_hdev_fail:
+ cancel_work_sync(&steam->work_connect);
+ cancel_delayed_work_sync(&steam->heartbeat);
++ cancel_delayed_work_sync(&steam->mode_switch);
+ cancel_work_sync(&steam->rumble_work);
+ steam_alloc_fail:
+ hid_err(hdev, "%s: failed with error %d\n",
+@@ -1059,6 +1122,7 @@ static void steam_remove(struct hid_device *hdev)
+ cancel_delayed_work_sync(&steam->heartbeat);
+ mutex_unlock(&steam->mutex);
+ cancel_work_sync(&steam->work_connect);
++ cancel_delayed_work_sync(&steam->mode_switch);
+ if (steam->quirks & STEAM_QUIRK_WIRELESS) {
+ hid_info(hdev, "Steam wireless receiver disconnected");
+ }
+@@ -1393,6 +1457,14 @@ static void steam_do_deck_input_event(struct steam_device *steam,
+ input_event(input, EV_KEY, BTN_BASE, !!(b14 & BIT(2)));
+
+ input_sync(input);
++
++ if (!(b9 & BIT(6)) && steam->did_mode_switch) {
++ steam->did_mode_switch = false;
++ cancel_delayed_work_sync(&steam->mode_switch);
++ } else if (!steam->client_opened && (b9 & BIT(6)) && !steam->did_mode_switch) {
++ steam->did_mode_switch = true;
++ schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100);
++ }
+ }
+
+ /*
+--
+2.41.0
+
+
+From 187582492c359d56865759f120214cfe6fa4ed50 Mon Sep 17 00:00:00 2001
+From: Vicki Pfau <vi@endrift.com>
+Date: Mon, 8 May 2023 20:24:56 -0700
+Subject: [PATCH 05/10] HID: hid-steam: Clean up locking
+
+This cleans up the locking logic so that the spinlock is consistently used for
+access to a small handful of struct variables, and the mutex is exclusively and
+consistently used for ensuring that mutliple threads aren't trying to
+send/receive reports at the same time. Previously, only some report
+transactions were guarded by this mutex, potentially breaking atomicity. The
+mutex has been renamed to reflect this usage.
+
+Signed-off-by: Vicki Pfau <vi@endrift.com>
+---
+ drivers/hid/hid-steam.c | 148 ++++++++++++++++++++++++----------------
+ 1 file changed, 90 insertions(+), 58 deletions(-)
+
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index 0620046b142ef..845ca71b8bd3a 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -214,7 +214,7 @@ struct steam_device {
+ struct list_head list;
+ spinlock_t lock;
+ struct hid_device *hdev, *client_hdev;
+- struct mutex mutex;
++ struct mutex report_mutex;
+ bool client_opened;
+ struct input_dev __rcu *input;
+ unsigned long quirks;
+@@ -361,21 +361,26 @@ static int steam_get_serial(struct steam_device *steam)
+ * Send: 0xae 0x15 0x01
+ * Recv: 0xae 0x15 0x01 serialnumber (10 chars)
+ */
+- int ret;
++ int ret = 0;
+ u8 cmd[] = {STEAM_CMD_GET_STRING_ATTRIB, 0x15, STEAM_ATTRIB_STR_UNIT_SERIAL};
+ u8 reply[3 + STEAM_SERIAL_LEN + 1];
+
++ mutex_lock(&steam->report_mutex);
+ ret = steam_send_report(steam, cmd, sizeof(cmd));
+ if (ret < 0)
+- return ret;
++ goto out;
+ ret = steam_recv_report(steam, reply, sizeof(reply));
+ if (ret < 0)
+- return ret;
+- if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != STEAM_ATTRIB_STR_UNIT_SERIAL)
+- return -EIO;
++ goto out;
++ if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != STEAM_ATTRIB_STR_UNIT_SERIAL) {
++ ret = -EIO;
++ goto out;
++ }
+ reply[3 + STEAM_SERIAL_LEN] = 0;
+ strscpy(steam->serial_no, reply + 3, sizeof(steam->serial_no));
+- return 0;
++out:
++ mutex_unlock(&steam->report_mutex);
++ return ret;
+ }
+
+ /*
+@@ -385,7 +390,11 @@ static int steam_get_serial(struct steam_device *steam)
+ */
+ static inline int steam_request_conn_status(struct steam_device *steam)
+ {
+- return steam_send_report_byte(steam, STEAM_CMD_DONGLE_GET_STATE);
++ int ret;
++ mutex_lock(&steam->report_mutex);
++ ret = steam_send_report_byte(steam, STEAM_CMD_DONGLE_GET_STATE);
++ mutex_unlock(&steam->report_mutex);
++ return ret;
+ }
+
+ /*
+@@ -397,6 +406,7 @@ static inline int steam_request_conn_status(struct steam_device *steam)
+ static inline int steam_haptic_pulse(struct steam_device *steam, u8 pad,
+ u16 duration, u16 interval, u16 count, u8 gain)
+ {
++ int ret;
+ u8 report[10] = {STEAM_CMD_HAPTIC_PULSE, 8};
+
+ /* Left and right are swapped on this report for legacy reasons */
+@@ -412,13 +422,17 @@ static inline int steam_haptic_pulse(struct steam_device *steam, u8 pad,
+ report[8] = count >> 8;
+ report[9] = gain;
+
+- return steam_send_report(steam, report, sizeof(report));
++ mutex_lock(&steam->report_mutex);
++ ret = steam_send_report(steam, report, sizeof(report));
++ mutex_unlock(&steam->report_mutex);
++ return ret;
+ }
+
+ static inline int steam_haptic_rumble(struct steam_device *steam,
+ u16 intensity, u16 left_speed, u16 right_speed,
+ u8 left_gain, u8 right_gain)
+ {
++ int ret;
+ u8 report[11] = {STEAM_CMD_HAPTIC_RUMBLE, 9};
+
+ report[3] = intensity & 0xFF;
+@@ -430,7 +444,10 @@ static inline int steam_haptic_rumble(struct steam_device *steam,
+ report[9] = left_gain;
+ report[10] = right_gain;
+
+- return steam_send_report(steam, report, sizeof(report));
++ mutex_lock(&steam->report_mutex);
++ ret = steam_send_report(steam, report, sizeof(report));
++ mutex_unlock(&steam->report_mutex);
++ return ret;
+ }
+
+ static void steam_haptic_rumble_cb(struct work_struct *work)
+@@ -460,6 +477,7 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
+ enable = false;
+
+ if (enable) {
++ mutex_lock(&steam->report_mutex);
+ /* enable esc, enter, cursors */
+ steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MAPPINGS);
+ /* enable mouse */
+@@ -467,9 +485,11 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
+ steam_write_registers(steam,
+ STEAM_REG_SMOOTH_ABSOLUTE_MOUSE, 0x01, /* enable smooth */
+ 0);
++ mutex_unlock(&steam->report_mutex);
+
+ cancel_delayed_work_sync(&steam->heartbeat);
+ } else {
++ mutex_lock(&steam->report_mutex);
+ /* disable esc, enter, cursor */
+ steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
+
+@@ -481,18 +501,19 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
+ STEAM_REG_LEFT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
+ STEAM_REG_RIGHT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
+ 0);
++ mutex_unlock(&steam->report_mutex);
+ /*
+ * The Steam Deck has a watchdog that automatically enables
+ * lizard mode if it doesn't see any traffic for too long
+ */
+- if (!work_busy(&steam->heartbeat.work))
+- schedule_delayed_work(&steam->heartbeat, 5 * HZ);
++ schedule_delayed_work(&steam->heartbeat, 5 * HZ);
+ } else {
+ steam_write_registers(steam,
+ STEAM_REG_SMOOTH_ABSOLUTE_MOUSE, 0x00, /* disable smooth */
+ STEAM_REG_LEFT_TRACKPAD_MODE, STEAM_TRACKPAD_NONE, /* disable mouse */
+ STEAM_REG_RIGHT_TRACKPAD_MODE, STEAM_TRACKPAD_NONE, /* disable mouse */
+ 0);
++ mutex_unlock(&steam->report_mutex);
+ }
+ }
+ }
+@@ -500,22 +521,29 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
+ static int steam_input_open(struct input_dev *dev)
+ {
+ struct steam_device *steam = input_get_drvdata(dev);
++ unsigned long flags;
++ bool set_lizard_mode;
+
+- mutex_lock(&steam->mutex);
+- if (!steam->client_opened && lizard_mode)
++ spin_lock_irqsave(&steam->lock, flags);
++ set_lizard_mode = !steam->client_opened && lizard_mode;
++ spin_unlock_irqrestore(&steam->lock, flags);
++ if (set_lizard_mode)
+ steam_set_lizard_mode(steam, false);
+- mutex_unlock(&steam->mutex);
++
+ return 0;
+ }
+
+ static void steam_input_close(struct input_dev *dev)
+ {
+ struct steam_device *steam = input_get_drvdata(dev);
++ unsigned long flags;
++ bool set_lizard_mode;
+
+- mutex_lock(&steam->mutex);
+- if (!steam->client_opened && lizard_mode)
++ spin_lock_irqsave(&steam->lock, flags);
++ set_lizard_mode = !steam->client_opened && lizard_mode;
++ spin_unlock_irqrestore(&steam->lock, flags);
++ if (set_lizard_mode)
+ steam_set_lizard_mode(steam, true);
+- mutex_unlock(&steam->mutex);
+ }
+
+ static enum power_supply_property steam_battery_props[] = {
+@@ -760,6 +788,7 @@ static int steam_register(struct steam_device *steam)
+ {
+ int ret;
+ bool client_opened;
++ unsigned long flags;
+
+ /*
+ * This function can be called several times in a row with the
+@@ -772,11 +801,9 @@ static int steam_register(struct steam_device *steam)
+ * Unlikely, but getting the serial could fail, and it is not so
+ * important, so make up a serial number and go on.
+ */
+- mutex_lock(&steam->mutex);
+ if (steam_get_serial(steam) < 0)
+ strscpy(steam->serial_no, "XXXXXXXXXX",
+ sizeof(steam->serial_no));
+- mutex_unlock(&steam->mutex);
+
+ hid_info(steam->hdev, "Steam Controller '%s' connected",
+ steam->serial_no);
+@@ -791,11 +818,11 @@ static int steam_register(struct steam_device *steam)
+ mutex_unlock(&steam_devices_lock);
+ }
+
+- mutex_lock(&steam->mutex);
++ spin_lock_irqsave(&steam->lock, flags);
+ client_opened = steam->client_opened;
++ spin_unlock_irqrestore(&steam->lock, flags);
+ if (!client_opened)
+ steam_set_lizard_mode(steam, lizard_mode);
+- mutex_unlock(&steam->mutex);
+
+ if (!client_opened)
+ ret = steam_input_register(steam);
+@@ -847,16 +874,21 @@ static void steam_mode_switch_cb(struct work_struct *work)
+ {
+ struct steam_device *steam = container_of(to_delayed_work(work),
+ struct steam_device, mode_switch);
++ unsigned long flags;
++ bool client_opened;
+ steam->gamepad_mode = !steam->gamepad_mode;
+ if (!lizard_mode)
+ return;
+
+- mutex_lock(&steam->mutex);
+ if (steam->gamepad_mode)
+ steam_set_lizard_mode(steam, false);
+- else if (!steam->client_opened)
+- steam_set_lizard_mode(steam, lizard_mode);
+- mutex_unlock(&steam->mutex);
++ else {
++ spin_lock_irqsave(&steam->lock, flags);
++ client_opened = steam->client_opened;
++ spin_unlock_irqrestore(&steam->lock, flags);
++ if (!client_opened)
++ steam_set_lizard_mode(steam, lizard_mode);
++ }
+
+ steam_haptic_pulse(steam, STEAM_PAD_RIGHT, 0x190, 0, 1, 0);
+ if (steam->gamepad_mode) {
+@@ -889,16 +921,21 @@ static void steam_lizard_mode_heartbeat(struct work_struct *work)
+ {
+ struct steam_device *steam = container_of(work, struct steam_device,
+ heartbeat.work);
++ bool client_opened;
++ unsigned long flags;
+
+- mutex_lock(&steam->mutex);
+- if (!steam->client_opened && steam->client_hdev) {
++ spin_lock_irqsave(&steam->lock, flags);
++ client_opened = steam->client_opened;
++ spin_unlock_irqrestore(&steam->lock, flags);
++ if (!client_opened) {
++ mutex_lock(&steam->report_mutex);
+ steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
+ steam_write_registers(steam,
+ STEAM_REG_RIGHT_TRACKPAD_MODE, STEAM_TRACKPAD_NONE, /* disable mouse */
+ 0);
++ mutex_unlock(&steam->report_mutex);
+ schedule_delayed_work(&steam->heartbeat, 5 * HZ);
+ }
+- mutex_unlock(&steam->mutex);
+ }
+
+ static int steam_client_ll_parse(struct hid_device *hdev)
+@@ -921,10 +958,11 @@ static void steam_client_ll_stop(struct hid_device *hdev)
+ static int steam_client_ll_open(struct hid_device *hdev)
+ {
+ struct steam_device *steam = hdev->driver_data;
++ unsigned long flags;
+
+- mutex_lock(&steam->mutex);
++ spin_lock_irqsave(&steam->lock, flags);
+ steam->client_opened = true;
+- mutex_unlock(&steam->mutex);
++ spin_unlock_irqrestore(&steam->lock, flags);
+
+ steam_input_unregister(steam);
+
+@@ -939,14 +977,12 @@ static void steam_client_ll_close(struct hid_device *hdev)
+ bool connected;
+
+ spin_lock_irqsave(&steam->lock, flags);
+- connected = steam->connected;
++ steam->client_opened = false;
++ connected = steam->connected && !steam->client_opened;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+- mutex_lock(&steam->mutex);
+- steam->client_opened = false;
+ if (connected)
+ steam_set_lizard_mode(steam, lizard_mode);
+- mutex_unlock(&steam->mutex);
+
+ if (connected)
+ steam_input_register(steam);
+@@ -1035,7 +1071,7 @@ static int steam_probe(struct hid_device *hdev,
+ steam->hdev = hdev;
+ hid_set_drvdata(hdev, steam);
+ spin_lock_init(&steam->lock);
+- mutex_init(&steam->mutex);
++ mutex_init(&steam->report_mutex);
+ steam->quirks = id->driver_data;
+ INIT_WORK(&steam->work_connect, steam_work_connect_cb);
+ INIT_DELAYED_WORK(&steam->mode_switch, steam_mode_switch_cb);
+@@ -1043,13 +1079,6 @@ static int steam_probe(struct hid_device *hdev,
+ INIT_DEFERRABLE_WORK(&steam->heartbeat, steam_lizard_mode_heartbeat);
+ INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
+
+- steam->client_hdev = steam_create_client_hid(hdev);
+- if (IS_ERR(steam->client_hdev)) {
+- ret = PTR_ERR(steam->client_hdev);
+- goto client_hdev_fail;
+- }
+- steam->client_hdev->driver_data = steam;
+-
+ /*
+ * With the real steam controller interface, do not connect hidraw.
+ * Instead, create the client_hid and connect that.
+@@ -1058,10 +1087,6 @@ static int steam_probe(struct hid_device *hdev,
+ if (ret)
+ goto hid_hw_start_fail;
+
+- ret = hid_add_device(steam->client_hdev);
+- if (ret)
+- goto client_hdev_add_fail;
+-
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev,
+@@ -1087,15 +1112,26 @@ static int steam_probe(struct hid_device *hdev,
+ }
+ }
+
++ steam->client_hdev = steam_create_client_hid(hdev);
++ if (IS_ERR(steam->client_hdev)) {
++ ret = PTR_ERR(steam->client_hdev);
++ goto client_hdev_fail;
++ }
++ steam->client_hdev->driver_data = steam;
++
++ ret = hid_add_device(steam->client_hdev);
++ if (ret)
++ goto client_hdev_add_fail;
++
+ return 0;
+
+-input_register_fail:
+-hid_hw_open_fail:
+ client_hdev_add_fail:
+ hid_hw_stop(hdev);
+-hid_hw_start_fail:
+- hid_destroy_device(steam->client_hdev);
+ client_hdev_fail:
++ hid_destroy_device(steam->client_hdev);
++input_register_fail:
++hid_hw_open_fail:
++hid_hw_start_fail:
+ cancel_work_sync(&steam->work_connect);
+ cancel_delayed_work_sync(&steam->heartbeat);
+ cancel_delayed_work_sync(&steam->mode_switch);
+@@ -1115,14 +1151,12 @@ static void steam_remove(struct hid_device *hdev)
+ return;
+ }
+
++ cancel_delayed_work_sync(&steam->heartbeat);
++ cancel_delayed_work_sync(&steam->mode_switch);
++ cancel_work_sync(&steam->work_connect);
+ hid_destroy_device(steam->client_hdev);
+- mutex_lock(&steam->mutex);
+ steam->client_hdev = NULL;
+ steam->client_opened = false;
+- cancel_delayed_work_sync(&steam->heartbeat);
+- mutex_unlock(&steam->mutex);
+- cancel_work_sync(&steam->work_connect);
+- cancel_delayed_work_sync(&steam->mode_switch);
+ if (steam->quirks & STEAM_QUIRK_WIRELESS) {
+ hid_info(hdev, "Steam wireless receiver disconnected");
+ }
+@@ -1597,10 +1631,8 @@ static int steam_param_set_lizard_mode(const char *val,
+
+ mutex_lock(&steam_devices_lock);
+ list_for_each_entry(steam, &steam_devices, list) {
+- mutex_lock(&steam->mutex);
+ if (!steam->client_opened)
+ steam_set_lizard_mode(steam, lizard_mode);
+- mutex_unlock(&steam->mutex);
+ }
+ mutex_unlock(&steam_devices_lock);
+ return 0;
+--
+2.41.0
+
+
+From d4490c88bed06b4c18af4a6029d67374df5218e1 Mon Sep 17 00:00:00 2001
+From: Vicki Pfau <vi@endrift.com>
+Date: Wed, 10 May 2023 17:27:12 -0700
+Subject: [PATCH 06/10] HID: hid-steam: Make client_opened a counter
+
+The client_opened variable was used to track if the hidraw was opened by any
+clients to silence keyboard/mouse events while opened. However, there was no
+counting of how many clients were opened, so opening two at the same time and
+then closing one would fool the driver into thinking it had no remaining opened
+clients.
+
+Signed-off-by: Vicki Pfau <vi@endrift.com>
+---
+ drivers/hid/hid-steam.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index 845ca71b8bd3a..0c2fe51b29bc1 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -215,7 +215,7 @@ struct steam_device {
+ spinlock_t lock;
+ struct hid_device *hdev, *client_hdev;
+ struct mutex report_mutex;
+- bool client_opened;
++ unsigned long client_opened;
+ struct input_dev __rcu *input;
+ unsigned long quirks;
+ struct work_struct work_connect;
+@@ -787,7 +787,7 @@ static void steam_battery_unregister(struct steam_device *steam)
+ static int steam_register(struct steam_device *steam)
+ {
+ int ret;
+- bool client_opened;
++ unsigned long client_opened;
+ unsigned long flags;
+
+ /*
+@@ -961,7 +961,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
+ unsigned long flags;
+
+ spin_lock_irqsave(&steam->lock, flags);
+- steam->client_opened = true;
++ steam->client_opened++;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+ steam_input_unregister(steam);
+@@ -977,7 +977,7 @@ static void steam_client_ll_close(struct hid_device *hdev)
+ bool connected;
+
+ spin_lock_irqsave(&steam->lock, flags);
+- steam->client_opened = false;
++ steam->client_opened--;
+ connected = steam->connected && !steam->client_opened;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+@@ -1156,7 +1156,7 @@ static void steam_remove(struct hid_device *hdev)
+ cancel_work_sync(&steam->work_connect);
+ hid_destroy_device(steam->client_hdev);
+ steam->client_hdev = NULL;
+- steam->client_opened = false;
++ steam->client_opened = 0;
+ if (steam->quirks & STEAM_QUIRK_WIRELESS) {
+ hid_info(hdev, "Steam wireless receiver disconnected");
+ }
+--
+2.41.0
+
+
+From 58a8667b251984ecc85a503c5dec3fc8f98028ff Mon Sep 17 00:00:00 2001
+From: Vicki Pfau <vi@endrift.com>
+Date: Thu, 18 May 2023 18:00:35 -0700
+Subject: [PATCH 07/10] HID: hid-steam: Better handling of serial number length
+
+The second byte of the GET_STRING_ATTRIB report is a length, so we should set
+the size of the buffer to be the size we're actually requesting, and only
+reject the reply if the length out is nonsensical.
+
+Signed-off-by: Vicki Pfau <vi@endrift.com>
+---
+ drivers/hid/hid-steam.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index 0c2fe51b29bc1..92e3e1052fa42 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -208,7 +208,7 @@ static LIST_HEAD(steam_devices);
+ #define STEAM_PAD_BOTH 2
+
+ /* Other random constants */
+-#define STEAM_SERIAL_LEN 10
++#define STEAM_SERIAL_LEN 0x15
+
+ struct steam_device {
+ struct list_head list;
+@@ -359,10 +359,10 @@ static int steam_get_serial(struct steam_device *steam)
+ {
+ /*
+ * Send: 0xae 0x15 0x01
+- * Recv: 0xae 0x15 0x01 serialnumber (10 chars)
++ * Recv: 0xae 0x15 0x01 serialnumber
+ */
+ int ret = 0;
+- u8 cmd[] = {STEAM_CMD_GET_STRING_ATTRIB, 0x15, STEAM_ATTRIB_STR_UNIT_SERIAL};
++ u8 cmd[] = {STEAM_CMD_GET_STRING_ATTRIB, sizeof(steam->serial_no), STEAM_ATTRIB_STR_UNIT_SERIAL};
+ u8 reply[3 + STEAM_SERIAL_LEN + 1];
+
+ mutex_lock(&steam->report_mutex);
+@@ -372,12 +372,12 @@ static int steam_get_serial(struct steam_device *steam)
+ ret = steam_recv_report(steam, reply, sizeof(reply));
+ if (ret < 0)
+ goto out;
+- if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != STEAM_ATTRIB_STR_UNIT_SERIAL) {
++ if (reply[0] != 0xae || reply[1] < 1 || reply[1] > sizeof(steam->serial_no) || reply[2] != STEAM_ATTRIB_STR_UNIT_SERIAL) {
+ ret = -EIO;
+ goto out;
+ }
+ reply[3 + STEAM_SERIAL_LEN] = 0;
+- strscpy(steam->serial_no, reply + 3, sizeof(steam->serial_no));
++ strscpy(steam->serial_no, reply + 3, reply[1]);
+ out:
+ mutex_unlock(&steam->report_mutex);
+ return ret;
+--
+2.41.0
+
+
+From 7460867bd78651a6187ac44c73d1be653c09973b Mon Sep 17 00:00:00 2001
+From: Vicki Pfau <vi@endrift.com>
+Date: Fri, 24 Mar 2023 10:42:27 -0700
+Subject: [PATCH 08/10] Input: xpad - fix support for some third-party
+ controllers
+
+Some third-party controllers, such as the HORPIAD FPS for Nintendo Switch and
+Gamesir-G3w, require a specific packet that the first-party XInput driver sends
+before it will start sending reports. It's not currently known what this packet
+does, but since the first-party driver always sends it's unlikely that this
+could cause issues with existing controllers.
+
+Co-authored-by: Andrey Smirnov <andrew.smirnov@gmail.com>
+Signed-off-by: Vicki Pfau <vi@endrift.com>
+Link: https://lore.kernel.org/r/20230324040446.3487725-3-vi@endrift.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+---
+ drivers/input/joystick/xpad.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index cdb193317c3b6..fc680b45f936e 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -264,6 +264,7 @@ static const struct xpad_device {
+ { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
+ { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
++ { 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
+ { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
+ { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
+@@ -1720,6 +1721,27 @@ static int xpad_start_input(struct usb_xpad *xpad)
+ return error;
+ }
+ }
++ if (xpad->xtype == XTYPE_XBOX360) {
++ /*
++ * Some third-party controllers Xbox 360-style controllers
++ * require this message to finish initialization.
++ */
++ u8 dummy[20];
++
++ error = usb_control_msg_recv(xpad->udev, 0,
++ /* bRequest */ 0x01,
++ /* bmRequestType */
++ USB_TYPE_VENDOR | USB_DIR_IN |
++ USB_RECIP_INTERFACE,
++ /* wValue */ 0x100,
++ /* wIndex */ 0x00,
++ dummy, sizeof(dummy),
++ 25, GFP_KERNEL);
++ if (error)
++ dev_warn(&xpad->dev->dev,
++ "unable to receive magic message: %d\n",
++ error);
++ }
+
+ return 0;
+ }
+--
+2.41.0
+
+
+From 469ab7efd0383f60e83c086347526273ed1d1a33 Mon Sep 17 00:00:00 2001
+From: Timothee Besset <ttimo@valvesoftware.com>
+Date: Mon, 22 May 2023 20:25:57 -0500
+Subject: [PATCH 09/10] Input: xpad - Add GameSir VID for Xbox One controllers
+
+Co-authored-by: Sam Lantinga <saml@valvesoftware.com>
+Signed-off-by: Sam Lantinga <slouken@libsdl.org>
+Signed-off-by: Vicki Pfau <vi@endrift.com>
+---
+ drivers/input/joystick/xpad.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index fc680b45f936e..bb2f69faa2a81 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -500,6 +500,7 @@ static const struct usb_device_id xpad_table[] = {
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
+ XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
+ XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
++ XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ { }
+ };
+
+--
+2.41.0
+
+
+From 4fd74c574f8554056facabd4e36e5e397f2e6b98 Mon Sep 17 00:00:00 2001
+From: Jonathan Frederick <doublej472@gmail.com>
+Date: Fri, 7 Jul 2023 15:11:33 -0700
+Subject: [PATCH 10/10] Input: xpad - add GameSir T4 Kaleid Controller support
+
+Add VID and PID to the xpad_device table to allow driver
+to use the GameSir T4 Kaleid Controller, which is
+XTYPE_XBOX360 compatible in xinput mode.
+
+Signed-off-by: Jonathan Frederick <doublej472@gmail.com>
+Link: https://lore.kernel.org/r/ZKeKSbP3faIPv5jB@dbj-hp-flip
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+---
+ drivers/input/joystick/xpad.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index bb2f69faa2a81..ede380551e55c 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -366,6 +366,7 @@ static const struct xpad_device {
+ { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
++ { 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
+ { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
+ { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
+@@ -500,6 +501,7 @@ static const struct usb_device_id xpad_table[] = {
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
+ XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
+ XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
++ XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ { }
+ };
+--
+2.41.0
+
diff --git a/0001-ROG-ALLY-bmi323-device.patch b/0001-ROG-ALLY-bmi323-device.patch
new file mode 100644
index 000000000000..e83d6e3e1830
--- /dev/null
+++ b/0001-ROG-ALLY-bmi323-device.patch
@@ -0,0 +1,2672 @@
+From 622ea77bfccd751247b1c08c3126d7ab716f0423 Mon Sep 17 00:00:00 2001
+From: Denis <benato.denis96@gmail.com>
+Date: Mon, 25 Sep 2023 03:38:49 +0200
+Subject: [PATCH] This commit adds support to the bmi323 device on top of the
+ pre-existing bmc150 kernel module.
+
+Some new devices for example the ROG Ally and the Air Plus identify this chip in the ACPI table as a bmc150 so previously the original module was loaded,
+but was erroring out as it cannot handle such device.
+
+The device I own does not allow me to use the interrupt part of the device as the interrupt pin is not connected (or not advertised to be connected) hence
+I avoided including on this commit anything related to IRQ.
+
+This driver has already been proved to work well enough to be used in the switch emulator "yuzu".
+
+While designing this module my main focus was not to alter the original driver and not to limit the original author in regard to future mofications,
+and I was mostly able to achive this, except:
+1) I added a new structure on top of the original one and added a field that is responsible for holding information
+on what type of chip the module is currently managing
+2) the previous point required the init function of the original driver to write that field in order to be sure no bmi323 code
+was executed when the old part of the module is managing the device
+3) as the original driver issued an i2c write on some register not really meant to be written in the bmi323 device I have made sure an i2c read to discover
+the bmi323 is performed prior to that code: such read SHOULD fail in the older bmc150 IC for two reasons:
+ - the i2c address is not reported in the memory map of the bmc150 in its datasheet
+ - the i2c read attempts to get 4 bytes out of a 8-bit device
+ - the fourth bit (the one that cannot be read from a bmc150 device) is initialized to 0 and bmi323 presence is signaled with a 1 in the LSB
+ that is the fourth coming out of the device in temporal order
+---
+ drivers/iio/accel/bmc150-accel-core.c | 2307 ++++++++++++++++++++++++-
+ drivers/iio/accel/bmc150-accel-i2c.c | 100 +-
+ drivers/iio/accel/bmc150-accel.h | 94 +-
+ 3 files changed, 2495 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
+index 110591804b4c..9a2c1732c9ef 100644
+--- a/drivers/iio/accel/bmc150-accel-core.c
++++ b/drivers/iio/accel/bmc150-accel-core.c
+@@ -130,6 +130,73 @@
+ #define BMC150_ACCEL_REG_FIFO_DATA 0x3F
+ #define BMC150_ACCEL_FIFO_LENGTH 32
+
++#define BMC150_BMI323_TEMPER_CENTER_VAL 23
++#define BMC150_BMI323_TEMPER_LSB_PER_KELVIN_VAL 512
++
++#define BMC150_BMI323_AUTO_SUSPEND_DELAY_MS 2000
++
++#define BMC150_BMI323_CHIP_ID_REG 0x00
++#define BMC150_BMI323_SOFT_RESET_REG 0x7E
++#define BMC150_BMI323_SOFT_RESET_VAL 0xDEAFU
++#define BMC150_BMI323_DATA_BASE_REG 0x03
++#define BMC150_BMI323_TEMPERATURE_DATA_REG 0x09
++#define BMC150_BMI323_FIFO_FILL_LEVEL_REG 0x15
++#define BMC150_BMI323_FIFO_DATA_REG 0x16
++#define BMC150_BMI323_ACC_CONF_REG 0x20
++#define BMC150_BMI323_GYR_CONF_REG 0x21
++#define BMC150_BMI323_FIFO_CONF_REG 0x36
++
++// these are bits [0:3] of ACC_CONF.acc_odr, sample rate in Hz for the accel part of the chip
++#define BMC150_BMI323_ACCEL_ODR_0_78123_VAL 0x0001
++#define BMC150_BMI323_ACCEL_ODR_1_5625_VAL 0x0002
++#define BMC150_BMI323_ACCEL_ODR_3_125_VAL 0x0003
++#define BMC150_BMI323_ACCEL_ODR_6_25_VAL 0x0004
++#define BMC150_BMI323_ACCEL_ODR_12_5_VAL 0x0005
++#define BMC150_BMI323_ACCEL_ODR_25_VAL 0x0006
++#define BMC150_BMI323_ACCEL_ODR_50_VAL 0x0007
++#define BMC150_BMI323_ACCEL_ODR_100_VAL 0x0008
++#define BMC150_BMI323_ACCEL_ODR_200_VAL 0x0009
++#define BMC150_BMI323_ACCEL_ODR_400_VAL 0x000A
++#define BMC150_BMI323_ACCEL_ODR_800_VAL 0x000B
++#define BMC150_BMI323_ACCEL_ODR_1600_VAL 0x000C
++#define BMC150_BMI323_ACCEL_ODR_3200_VAL 0x000D
++#define BMC150_BMI323_ACCEL_ODR_6400_VAL 0x000E
++
++#define BMC150_BMI323_ACCEL_BW_ODR_2_VAL 0x0000
++#define BMC150_BMI323_ACCEL_BW_ODR_4_VAL 0x0001
++
++// these are bits [4:6] of ACC_CONF.acc_range, full scale resolution
++#define BMC150_BMI323_ACCEL_RANGE_2_VAL 0x0000 // +/-2g, 16.38 LSB/mg
++#define BMC150_BMI323_ACCEL_RANGE_4_VAL 0x0001 // +/-4g, 8.19 LSB/mg
++#define BMC150_BMI323_ACCEL_RANGE_8_VAL 0x0002 // +/-8g, 4.10 LSB/mg
++#define BMC150_BMI323_ACCEL_RANGE_16_VAL 0x0003 // +/-4g, 2.05 LSB/mg
++
++// these are bits [0:3] of GYR_CONF.gyr_odr, sample rate in Hz for the gyro part of the chip
++#define BMC150_BMI323_GYRO_ODR_0_78123_VAL 0x0001
++#define BMC150_BMI323_GYRO_ODR_1_5625_VAL 0x0002
++#define BMC150_BMI323_GYRO_ODR_3_125_VAL 0x0003
++#define BMC150_BMI323_GYRO_ODR_6_25_VAL 0x0004
++#define BMC150_BMI323_GYRO_ODR_12_5_VAL 0x0005
++#define BMC150_BMI323_GYRO_ODR_25_VAL 0x0006
++#define BMC150_BMI323_GYRO_ODR_50_VAL 0x0007
++#define BMC150_BMI323_GYRO_ODR_100_VAL 0x0008
++#define BMC150_BMI323_GYRO_ODR_200_VAL 0x0009
++#define BMC150_BMI323_GYRO_ODR_400_VAL 0x000A
++#define BMC150_BMI323_GYRO_ODR_800_VAL 0x000B
++#define BMC150_BMI323_GYRO_ODR_1600_VAL 0x000C
++#define BMC150_BMI323_GYRO_ODR_3200_VAL 0x000D
++#define BMC150_BMI323_GYRO_ODR_6400_VAL 0x000E
++
++#define BMC150_BMI323_GYRO_BW_ODR_2_VAL 0x0000
++#define BMC150_BMI323_GYRO_BW_ODR_4_VAL 0x0001
++
++// these are bits [4:6] of GYR_CONF.gyr_range, full scale resolution
++#define BMC150_BMI323_GYRO_RANGE_125_VAL 0x0000 // +/-125°/s, 262.144 LSB/°/s
++#define BMC150_BMI323_GYRO_RANGE_250_VAL 0x0001 // +/-250°/s, 131.2 LSB/°/s
++#define BMC150_BMI323_GYRO_RANGE_500_VAL 0x0002 // +/-500°/s, 65.6 LSB/°/s
++#define BMC150_BMI323_GYRO_RANGE_1000_VAL 0x0003 // +/-1000°/s, 32.8 LSB/°/s
++#define BMC150_BMI323_GYRO_RANGE_2000_VAL 0x0004 // +/-2000°/s, 16.4 LSB/°/s
++
+ enum bmc150_accel_axis {
+ AXIS_X,
+ AXIS_Y,
+@@ -149,6 +216,654 @@ struct bmc150_scale_info {
+ u8 reg_range;
+ };
+
++/*
++ * This enum MUST not be altered as there are parts in the code that
++ * uses an int conversion to get the correct device register to read.
++ */
++enum bmi323_axis {
++ BMI323_ACCEL_AXIS_X = 0,
++ BMI323_ACCEL_AXIS_Y,
++ BMI323_ACCEL_AXIS_Z,
++ BMI323_GYRO_AXIS_X,
++ BMI323_GYRO_AXIS_Y,
++ BMI323_GYRO_AXIS_Z,
++ BMI323_TEMP,
++ BMI323_AXIS_MAX,
++};
++
++static const struct bmi323_scale_accel_info {
++ u8 hw_val;
++ int val;
++ int val2;
++ int ret_type;
++} bmi323_accel_scale_map[] = {
++ {
++ .hw_val = (u16)BMC150_BMI323_ACCEL_RANGE_2_VAL << (u16)4,
++ .val = 0,
++ .val2 = 598,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_ACCEL_RANGE_4_VAL << (u16)4,
++ .val = 0,
++ .val2 = 1196,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_ACCEL_RANGE_8_VAL << (u16)4,
++ .val = 0,
++ .val2 = 2392,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_ACCEL_RANGE_16_VAL << (u16)4,
++ .val = 0,
++ .val2 = 4785,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++};
++
++static const struct bmi323_scale_gyro_info {
++ u8 hw_val;
++ int val;
++ int val2;
++ int ret_type;
++} bmi323_gyro_scale_map[] = {
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_125_VAL << (u16)4,
++ .val = 0,
++ .val2 = 66545,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_125_VAL << (u16)4,
++ .val = 0,
++ .val2 = 66,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_250_VAL << (u16)4,
++ .val = 0,
++ .val2 = 133090,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_250_VAL << (u16)4,
++ .val = 0,
++ .val2 = 133,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_500_VAL << (u16)4,
++ .val = 0,
++ .val2 = 266181,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_500_VAL << (u16)4,
++ .val = 0,
++ .val2 = 266,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_1000_VAL << (u16)4,
++ .val = 0,
++ .val2 = 532362,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_1000_VAL << (u16)4,
++ .val = 0,
++ .val2 = 532,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_2000_VAL << (u16)4,
++ .val = 0,
++ .val2 = 1064724,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ // this shouldn't be necessary, but iio seems to have a wrong rounding of this value...
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_2000_VAL << (u16)4,
++ .val = 0,
++ .val2 = 1064,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++ {
++ .hw_val = (u16)BMC150_BMI323_GYRO_RANGE_2000_VAL << (u16)4,
++ .val = 0,
++ .val2 = 1065,
++ .ret_type = IIO_VAL_INT_PLUS_NANO,
++ },
++};
++
++/*
++ * this reflects the frequency map that is following.
++ * For each index i of that map index i*2 and i*2+1 of of this
++ * holds ODR/2 and ODR/4
++ */
++static const struct bmi323_3db_freq_cutoff_accel_info {
++ int val;
++ int val2;
++ int ret_type;
++} bmi323_accel_3db_freq_cutoff[] = {
++ {
++ .val = 0,
++ .val2 = 390615,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 0,
++ .val2 = 195308,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 0,
++ .val2 = 781300,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 0,
++ .val2 = 390650,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1,
++ .val2 = 562500,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 0,
++ .val2 = 78125,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 3,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1,
++ .val2 = 500000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 6,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 3,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 12,
++ .val2 = 500000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 6,
++ .val2 = 250000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 25,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 12,
++ .val2 = 500000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 50,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 25,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 100,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 50,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 200,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 100,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 400,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 200,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 800,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 400,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1600,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 800,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1600,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 800,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 3200,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1600,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++};
++
++static const struct bmi323_freq_accel_info {
++ u8 hw_val;
++ int val;
++ int val2;
++ s64 time_ns;
++} bmi323_accel_odr_map[] = {
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_0_78123_VAL,
++ .val = 0,
++ .val2 = 781230,
++ .time_ns = 1280032769,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_1_5625_VAL,
++ .val = 1,
++ .val2 = 562600,
++ .time_ns = 886522247,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_3_125_VAL,
++ .val = 3,
++ .val2 = 125000,
++ .time_ns = 320000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_6_25_VAL,
++ .val = 6,
++ .val2 = 250000,
++ .time_ns = 160000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_12_5_VAL,
++ .val = 12,
++ .val2 = 500000,
++ .time_ns = 80000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_25_VAL,
++ .val = 25,
++ .val2 = 0,
++ .time_ns = 40000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_50_VAL,
++ .val = 50,
++ .val2 = 0,
++ .time_ns = 20000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_100_VAL,
++ .val = 100,
++ .val2 = 0,
++ .time_ns = 10000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_200_VAL,
++ .val = 200,
++ .val2 = 0,
++ .time_ns = 5000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_400_VAL,
++ .val = 400,
++ .val2 = 0,
++ .time_ns = 2500000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_800_VAL,
++ .val = 800,
++ .val2 = 0,
++ .time_ns = 1250000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_1600_VAL,
++ .val = 1600,
++ .val2 = 0,
++ .time_ns = 625000,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_3200_VAL,
++ .val = 3200,
++ .val2 = 0,
++ .time_ns = 312500,
++ },
++ {
++ .hw_val = BMC150_BMI323_ACCEL_ODR_6400_VAL,
++ .val = 6400,
++ .val2 = 0,
++ .time_ns = 156250,
++ },
++};
++
++static const struct bmi323_freq_gyro_info {
++ u8 hw_val;
++ int val;
++ int val2;
++ s64 time_ns;
++} bmi323_gyro_odr_map[] = {
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_0_78123_VAL,
++ .val = 0,
++ .val2 = 781230,
++ .time_ns = 1280032769,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_1_5625_VAL,
++ .val = 1,
++ .val2 = 562600,
++ .time_ns = 886522247,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_3_125_VAL,
++ .val = 3,
++ .val2 = 125000,
++ .time_ns = 320000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_6_25_VAL,
++ .val = 6,
++ .val2 = 250000,
++ .time_ns = 160000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_12_5_VAL,
++ .val = 12,
++ .val2 = 500000,
++ .time_ns = 80000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_25_VAL,
++ .val = 25,
++ .val2 = 0,
++ .time_ns = 40000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_50_VAL,
++ .val = 50,
++ .val2 = 0,
++ .time_ns = 20000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_100_VAL,
++ .val = 100,
++ .val2 = 0,
++ .time_ns = 10000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_200_VAL,
++ .val = 200,
++ .val2 = 0,
++ .time_ns = 5000000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_400_VAL,
++ .val = 400,
++ .val2 = 0,
++ .time_ns = 2500000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_800_VAL,
++ .val = 800,
++ .val2 = 0,
++ .time_ns = 1250000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_1600_VAL,
++ .val = 1600,
++ .val2 = 0,
++ .time_ns = 625000,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_3200_VAL,
++ .val = 3200,
++ .val2 = 0,
++ .time_ns = 312500,
++ },
++ {
++ .hw_val = BMC150_BMI323_GYRO_ODR_6400_VAL,
++ .val = 6400,
++ .val2 = 0,
++ .time_ns = 156250,
++ },
++};
++
++static const struct bmi323_3db_freq_cutoff_gyro_info {
++ int val;
++ int val2;
++ int ret_type;
++} bmi323_gyro_3db_freq_cutoff[] = {
++ {
++ .val = 0,
++ .val2 = 390615,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 0,
++ .val2 = 1953075, // TODO: check if this gets reported correctly...
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 0,
++ .val2 = 781300,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 0,
++ .val2 = 390650,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1,
++ .val2 = 562500,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 0,
++ .val2 = 78125,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 3,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1,
++ .val2 = 500000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 6,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 3,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 12,
++ .val2 = 500000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 6,
++ .val2 = 250000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 25,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 12,
++ .val2 = 500000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 50,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 25,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 100,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 50,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 200,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 100,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 400,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 200,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 800,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 400,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1600,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 800,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1600,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 800,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 3200,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++ {
++ .val = 1600,
++ .val2 = 000000,
++ .ret_type = IIO_VAL_INT_PLUS_MICRO,
++ },
++};
++
++static const int bmi323_accel_scales[] = {
++ 0, 598, 0, 1196, 0, 2392, 0, 4785,
++};
++
++static const int bmi323_gyro_scales[] = {
++ 0, 66545, 0, 133090, 0, 266181, 0, 532362, 0, 1064724,
++};
++
++static const int bmi323_sample_freqs[] = {
++ 0, 781230, 1, 562600, 3, 125000, 6, 250000, 12, 500000,
++ 25, 0, 50, 0, 100, 0, 200, 0, 400, 0,
++ 800, 0, 1600, 0, 3200, 0, 6400, 0,
++};
++
++static const struct {
++ int val;
++ int val2; // IIO_VAL_INT_PLUS_MICRO
++ u8 bw_bits;
++} bmi323_samp_freq_table[] = { { 15, 620000, 0x08 }, { 31, 260000, 0x09 },
++ { 62, 500000, 0x0A }, { 125, 0, 0x0B },
++ { 250, 0, 0x0C }, { 500, 0, 0x0D },
++ { 1000, 0, 0x0E }, { 2000, 0, 0x0F } };
++
+ struct bmc150_accel_chip_info {
+ const char *name;
+ u8 chip_id;
+@@ -1113,6 +1828,52 @@ static const struct iio_event_spec bmc150_accel_event = {
+ .num_event_specs = 1 \
+ }
+
++#define BMI323_ACCEL_CHANNEL(_axis, bits) \
++ { \
++ .type = IIO_ACCEL, .modified = 1, .channel2 = IIO_MOD_##_axis, \
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
++ .info_mask_shared_by_type = \
++ BIT(IIO_CHAN_INFO_SCALE) | \
++ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
++ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
++ .info_mask_shared_by_type_available = \
++ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
++ BIT(IIO_CHAN_INFO_SCALE), \
++ .scan_index = BMI323_ACCEL_AXIS_##_axis, \
++ .scan_type = { \
++ .sign = 's', \
++ .realbits = (bits), \
++ .storagebits = 16, \
++ .shift = 16 - (bits), \
++ .endianness = IIO_LE, \
++ }, \
++ }
++
++#define BMI323_GYRO_CHANNEL(_axis, bits) \
++ { \
++ .type = IIO_ANGL_VEL, .modified = 1, \
++ .channel2 = IIO_MOD_##_axis, \
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
++ .info_mask_shared_by_type = \
++ BIT(IIO_CHAN_INFO_SCALE) | \
++ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
++ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
++ .info_mask_shared_by_type_available = \
++ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
++ BIT(IIO_CHAN_INFO_SCALE), \
++ .scan_index = BMI323_GYRO_AXIS_##_axis, \
++ .scan_type = { \
++ .sign = 's', \
++ .realbits = (bits), \
++ .storagebits = 16, \
++ .shift = 16 - (bits), \
++ .endianness = IIO_LE, \
++ }, \
++ /*.ext_info = bmi323_accel_ext_info,*/ \
++ /*.event_spec = &bmi323_accel_event,*/ \
++ /*.num_event_specs = 1*/ \
++ }
++
+ #define BMC150_ACCEL_CHANNELS(bits) { \
+ { \
+ .type = IIO_TEMP, \
+@@ -1595,7 +2356,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret, i;
+ unsigned int val;
+-
++
+ /*
+ * Reset chip to get it in a known good state. A delay of 1.8ms after
+ * reset is required according to the data sheets of supported chips.
+@@ -1677,6 +2438,11 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+
++ /*
++ * Setting the dev_type here is necessary to avoid having it left uninitialized
++ * and therefore potentially executing bmi323 functions for the original bmc150 model.
++ */
++ data->dev_type = BMC150;
+ data->regmap = regmap;
+ data->type = type;
+
+@@ -1826,12 +2592,1407 @@ void bmc150_accel_core_remove(struct device *dev)
+ }
+ EXPORT_SYMBOL_NS_GPL(bmc150_accel_core_remove, IIO_BMC150);
+
+-#ifdef CONFIG_PM_SLEEP
+-static int bmc150_accel_suspend(struct device *dev)
++struct device *bmi323_get_managed_device(struct bmi323_private_data *bmi323)
++{
++ if (bmi323->i2c_client != NULL)
++ return &bmi323->i2c_client->dev;
++
++ return &bmi323->spi_client->dev;
++}
++
++static int bmi323_set_power_state(struct bmi323_private_data *bmi323, bool on)
++{
++#ifdef CONFIG_PM
++ struct device *dev = bmi323_get_managed_device(bmi323);
++ int ret;
++
++ if (on)
++ ret = pm_runtime_get_sync(dev);
++ else {
++ pm_runtime_mark_last_busy(dev);
++ ret = pm_runtime_put_autosuspend(dev);
++ }
++
++ if (ret < 0) {
++ dev_err(dev, "bmi323_set_power_state failed with %d\n", on);
++
++ if (on)
++ pm_runtime_put_noidle(dev);
++
++ return ret;
++ }
++#endif
++
++ return 0;
++}
++
++int bmi323_write_u16(struct bmi323_private_data *bmi323, u8 in_reg,
++ u16 in_value)
++{
++ s32 ret;
++
++ if (bmi323->i2c_client != NULL) {
++ ret = i2c_smbus_write_i2c_block_data(bmi323->i2c_client, in_reg,
++ sizeof(in_value),
++ (u8 *)(&in_value));
++ if (ret != 0) {
++ return -2;
++ }
++
++ return 0;
++ } else if (bmi323->spi_client != NULL) {
++ /*
++ * To whoever may need this: implementing this should be straightforward:
++ * it's specular to the i2c part.
++ */
++
++ return -EINVAL; // TODO: change with 0 once implemented
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL_NS_GPL(bmi323_write_u16, IIO_BMC150);
++
++int bmi323_read_u16(struct bmi323_private_data *bmi323, u8 in_reg,
++ u16 *out_value)
++{
++ s32 ret;
++ u8 read_bytes[4];
++
++ if (bmi323->i2c_client != NULL) {
++ ret = i2c_smbus_read_i2c_block_data(bmi323->i2c_client, in_reg,
++ sizeof(read_bytes),
++ &read_bytes[0]);
++ if (ret != 4) {
++ return ret;
++ }
++
++ // DUMMY = read_bytes[0]
++ // DUMMY = read_bytes[1]
++ // LSB = read_bytes[2]
++ // MSB = read_bytes[3]
++ u8 *o = (u8 *)out_value;
++ o[0] = read_bytes[2];
++ o[1] = read_bytes[3];
++
++ return 0;
++ } else if (bmi323->spi_client != NULL) {
++ printk(KERN_CRIT
++ "bmi323: SPI interface is not yet implemented.\n");
++
++ /*
++ * To whoever may need this: implementing this should be straightforward:
++ * it's specular to the i2c part except that the dummy data is just 1 byte.
++ */
++
++ return -EINVAL; // TODO: change with 0 once implemented
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL_NS_GPL(bmi323_read_u16, IIO_BMC150);
++
++int bmi323_chip_check(struct bmi323_private_data *bmi323)
++{
++ u16 chip_id;
++ int ret;
++
++ ret = bmi323_read_u16(bmi323, BMC150_BMI323_CHIP_ID_REG, &chip_id);
++ if (ret != 0) {
++ return ret;
++ }
++
++ if (((chip_id)&0x00FF) != cpu_to_le16((u16)0x0043U)) {
++ dev_err(bmi323->dev,
++ "bmi323_chip_check failed with: %d; chip_id = 0x%04x",
++ ret, chip_id);
++
++ return -EINVAL;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_NS_GPL(bmi323_chip_check, IIO_BMC150);
++
++static int bmi323_buffer_preenable(struct iio_dev *indio_dev)
++{
++ struct bmc150_accel_data *data = iio_priv(indio_dev);
++
++ const int ret = bmi323_set_power_state(&data->bmi323, true);
++
++ if (ret == 0) {
++ mutex_lock(&data->bmi323.mutex);
++ data->bmi323.fifo_frame_time_diff_ns =
++ (data->bmi323.acc_odr_time_ns >=
++ data->bmi323.gyr_odr_time_ns) ?
++ data->bmi323.acc_odr_time_ns :
++ data->bmi323.gyr_odr_time_ns;
++ mutex_unlock(&data->bmi323.mutex);
++ }
++
++ return ret;
++}
++
++static int bmi323_buffer_postenable(struct iio_dev *indio_dev)
++{
++ //struct bmc150_accel_data *data = iio_priv(indio_dev);
++
++ /*
++ * This code is a placeholder until I can get a way to test it
++ */
++
++ return 0;
++}
++
++static int bmi323_buffer_predisable(struct iio_dev *indio_dev)
++{
++ //struct bmc150_accel_data *data = iio_priv(indio_dev);
++
++ /*
++ * This code is a placeholder until I can get a way to test it
++ */
++
++ return 0;
++}
++
++static int bmi323_buffer_postdisable(struct iio_dev *indio_dev)
++{
++ struct bmc150_accel_data *data = iio_priv(indio_dev);
++
++ return bmi323_set_power_state(&data->bmi323, true);
++}
++
++static const struct iio_buffer_setup_ops bmi323_buffer_ops = {
++ .preenable = bmi323_buffer_preenable,
++ .postenable = bmi323_buffer_postenable,
++ .predisable = bmi323_buffer_predisable,
++ .postdisable = bmi323_buffer_postdisable,
++};
++
++int bmi323_chip_rst(struct bmi323_private_data *bmi323)
++{
++ u16 sensor_status = 0x0000, device_status = 0x0000;
++ int ret;
++
++ ret = bmi323_write_u16(bmi323, BMC150_BMI323_SOFT_RESET_REG,
++ cpu_to_le16((u16)BMC150_BMI323_SOFT_RESET_VAL));
++ if (ret != 0) {
++ dev_err(bmi323->dev,
++ "bmi323: error while issuing the soft-reset command: %d",
++ ret);
++ return ret;
++ }
++
++ /* wait the specified amount of time... I agree with the bmc150 module: better safe than sorry. */
++ msleep(5);
++
++ // if the device is connected over SPI a dummy read is to be performed once after each reset
++ if (bmi323->spi_client != NULL) {
++ dev_info(bmi323->dev,
++ "issuing the dummy read to switch mode to SPI");
++
++ // do not even check the result of that... it's just a dummy read
++ bmi323_chip_check(bmi323);
++ }
++
++ ret = bmi323_chip_check(bmi323);
++ if (ret != 0) {
++ return ret;
++ }
++
++ /* now check the correct initialization status as per datasheet */
++ ret = bmi323_read_u16(bmi323, 0x01, &device_status);
++ if (ret != 0) {
++ return -EINVAL;
++ }
++
++ if ((device_status & cpu_to_le16((u16)0x00FFU)) !=
++ cpu_to_le16((u16)0x0000U)) {
++ dev_err(bmi323->dev,
++ "bmi323: device_status incorrect: %d; device_status = 0x%04x",
++ ret, device_status);
++
++ /* from the datasheet: power error */
++ return -EINVAL;
++ }
++
++ /* from the datasheet: power ok */
++ ret = bmi323_read_u16(bmi323, 0x02, &sensor_status);
++ if (ret != 0) {
++ return -EINVAL;
++ }
++
++ if ((sensor_status & cpu_to_le16((u16)0x00FFU)) !=
++ cpu_to_le16((u16)0x0001U)) {
++ dev_err(bmi323->dev,
++ "bmi323: sensor_status incorrect: %d; sensor_status = 0x%04x",
++ ret, sensor_status);
++
++ /* from the datasheet: initialization error */
++ return -EINVAL;
++ }
++
++ /* from the datasheet: initialization ok */
++ return 0;
++}
++EXPORT_SYMBOL_NS_GPL(bmi323_chip_rst, IIO_BMC150);
++
++static const struct iio_chan_spec bmi323_channels[] = {
++ BMI323_ACCEL_CHANNEL(X, 16),
++ BMI323_ACCEL_CHANNEL(Y, 16),
++ BMI323_ACCEL_CHANNEL(Z, 16),
++ BMI323_GYRO_CHANNEL(X, 16),
++ BMI323_GYRO_CHANNEL(Y, 16),
++ BMI323_GYRO_CHANNEL(Z, 16),
++ {
++ .type = IIO_TEMP,
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
++ BIT(IIO_CHAN_INFO_SCALE) |
++ BIT(IIO_CHAN_INFO_OFFSET),
++ .scan_index = BMI323_TEMP,
++ },
++ IIO_CHAN_SOFT_TIMESTAMP(BMI323_AXIS_MAX),
++};
++
++static int bmi323_read_raw(struct iio_dev *indio_dev,
++ struct iio_chan_spec const *chan, int *val,
++ int *val2, long mask)
++{
++ struct bmc150_accel_data *data = iio_priv(indio_dev);
++ int ret = -EINVAL, was_sleep_modified = -1;
++ u16 raw_read = 0x8000;
++
++ mutex_lock(&data->bmi323.mutex);
++
++ if ((data->bmi323.flags & BMI323_FLAGS_RESET_FAILED) != 0x00U) {
++ dev_err(data->bmi323.dev,
++ "bmi323 error: device has not being woken up correctly.");
++ mutex_unlock(&data->bmi323.mutex);
++ return -EBUSY;
++ }
++
++ switch (mask) {
++ case IIO_CHAN_INFO_RAW: {
++ switch (chan->type) {
++ case IIO_TEMP:
++ if (iio_buffer_enabled(indio_dev)) {
++ ret = -EBUSY;
++ goto bmi323_read_raw_error;
++ }
++
++ was_sleep_modified =
++ bmi323_set_power_state(&data->bmi323, true);
++ if (was_sleep_modified != 0) {
++ ret = was_sleep_modified;
++ goto bmi323_read_raw_error_power;
++ }
++
++ ret = iio_device_claim_direct_mode(indio_dev);
++ if (ret != 0) {
++ printk(KERN_CRIT
++ "bmc150 bmi323_read_raw IIO_TEMP iio_device_claim_direct_mode returned %d\n",
++ ret);
++ goto bmi323_read_raw_error;
++ }
++
++ ret = bmi323_read_u16(
++ &data->bmi323,
++ BMC150_BMI323_TEMPERATURE_DATA_REG, &raw_read);
++ iio_device_release_direct_mode(indio_dev);
++ if (ret != 0) {
++ printk(KERN_CRIT
++ "bmc150 bmi323_read_raw IIO_TEMP bmi323_read_u16 returned %d\n",
++ ret);
++ goto bmi323_read_raw_error;
++ }
++
++ *val = sign_extend32(le16_to_cpu(raw_read), 15);
++ bmi323_set_power_state(&data->bmi323, false);
++ mutex_unlock(&data->bmi323.mutex);
++ return IIO_VAL_INT;
++
++ case IIO_ACCEL:
++ if (iio_buffer_enabled(indio_dev)) {
++ ret = -EBUSY;
++ goto bmi323_read_raw_error;
++ }
++
++ was_sleep_modified =
++ bmi323_set_power_state(&data->bmi323, true);
++ if (was_sleep_modified != 0) {
++ ret = was_sleep_modified;
++ goto bmi323_read_raw_error_power;
++ }
++
++ ret = iio_device_claim_direct_mode(indio_dev);
++ if (ret != 0) {
++ printk(KERN_CRIT
++ "bmc150 bmi323_read_raw IIO_ACCEL iio_device_claim_direct_mode returned %d\n",
++ ret);
++ goto bmi323_read_raw_error;
++ }
++
++ ret = bmi323_read_u16(&data->bmi323,
++ BMC150_BMI323_DATA_BASE_REG +
++ (u8)(chan->scan_index),
++ &raw_read);
++ iio_device_release_direct_mode(indio_dev);
++ if (ret != 0) {
++ printk(KERN_CRIT
++ "bmc150 bmi323_read_raw IIO_ACCEL bmi323_read_u16 returned %d\n",
++ ret);
++ goto bmi323_read_raw_error;
++ }
++ *val = sign_extend32(le16_to_cpu(raw_read), 15);
++ bmi323_set_power_state(&data->bmi323, false);
++ mutex_unlock(&data->bmi323.mutex);
++ return IIO_VAL_INT;
++
++ case IIO_ANGL_VEL:
++ if (iio_buffer_enabled(indio_dev)) {
++ ret = -EBUSY;
++ goto bmi323_read_raw_error;
++ }
++
++ was_sleep_modified =
++ bmi323_set_power_state(&data->bmi323, true);
++ if (was_sleep_modified != 0) {
++ ret = was_sleep_modified;
++ goto bmi323_read_raw_error_power;
++ }
++
++ ret = iio_device_claim_direct_mode(indio_dev);
++ if (ret != 0) {
++ printk(KERN_CRIT
++ "bmc150 bmi323_read_raw IIO_ANGL_VEL iio_device_claim_direct_mode returned %d\n",
++ ret);
++ goto bmi323_read_raw_error;
++ }
++
++ ret = bmi323_read_u16(&data->bmi323,
++ BMC150_BMI323_DATA_BASE_REG +
++ (u8)(chan->scan_index),
++ &raw_read);
++ iio_device_release_direct_mode(indio_dev);
++ if (ret != 0) {
++ printk(KERN_CRIT
++ "bmc150 bmi323_read_raw IIO_ANGL_VEL bmi323_read_u16 returned %d\n",
++ ret);
++ goto bmi323_read_raw_error;
++ }
++
++ *val = sign_extend32(le16_to_cpu(raw_read), 15);
++ bmi323_set_power_state(&data->bmi323, false);
++ mutex_unlock(&data->bmi323.mutex);
++ return IIO_VAL_INT;
++
++ default:
++ goto bmi323_read_raw_error;
++ }
++ }
++ case IIO_CHAN_INFO_OFFSET: {
++ switch (chan->type) {
++ case IIO_TEMP:
++ *val = BMC150_BMI323_TEMPER_CENTER_VAL;
++ *val2 = 0;
++ mutex_unlock(&data->bmi323.mutex);
++ return IIO_VAL_INT;
++
++ default:
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ }
++ case IIO_CHAN_INFO_SCALE:
++ switch (chan->type) {
++ case IIO_TEMP: {
++ *val = 0;
++ *val2 = BMC150_BMI323_TEMPER_LSB_PER_KELVIN_VAL;
++ mutex_unlock(&data->bmi323.mutex);
++ return IIO_VAL_FRACTIONAL;
++ }
++ case IIO_ACCEL: {
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323.acc_conf_reg_value;
++ for (int s = 0; s < ARRAY_SIZE(bmi323_accel_scale_map);
++ ++s) {
++ if (((le_raw_read[0]) & ((u16)0b01110000U)) ==
++ (bmi323_accel_scale_map[s].hw_val)) {
++ *val = bmi323_accel_scale_map[s].val;
++ *val2 = bmi323_accel_scale_map[s].val2;
++
++ mutex_unlock(&data->bmi323.mutex);
++ return bmi323_accel_scale_map[s]
++ .ret_type;
++ }
++ }
++
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ case IIO_ANGL_VEL: {
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323.gyr_conf_reg_value;
++ for (int s = 0; s < ARRAY_SIZE(bmi323_gyro_scale_map);
++ ++s) {
++ if (((le_raw_read[0]) & ((u16)0b01110000U)) ==
++ (bmi323_gyro_scale_map[s].hw_val)) {
++ *val = bmi323_gyro_scale_map[s].val;
++ *val2 = bmi323_gyro_scale_map[s].val2;
++
++ mutex_unlock(&data->bmi323.mutex);
++ return bmi323_gyro_scale_map[s].ret_type;
++ }
++ }
++
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ default:
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
++ switch (chan->type) {
++ case IIO_ACCEL: {
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323.acc_conf_reg_value;
++ for (int s = 0; s < ARRAY_SIZE(bmi323_accel_odr_map);
++ ++s) {
++ if (((le_raw_read[0]) & ((u16)0x0FU)) ==
++ (bmi323_accel_odr_map[s].hw_val)) {
++ /*
++ * from tha datasheed: -3dB cut-off frequency can be configured with the bit 7 of GYR_confm,
++ * also called acc_bw that can either be 0 or 1, where 1 means odr/4 and 0 means odr/2
++ */
++ int freq_adj_idx =
++ (((le_raw_read[0]) &
++ ((u8)0x80U)) == (u8)0x00U) ?
++ (s * 2) + 0 :
++ (s * 2) + 1;
++ *val = bmi323_accel_3db_freq_cutoff
++ [freq_adj_idx]
++ .val;
++ *val2 = bmi323_accel_3db_freq_cutoff
++ [freq_adj_idx]
++ .val2;
++
++ mutex_unlock(&data->bmi323.mutex);
++ return IIO_VAL_INT_PLUS_MICRO;
++ }
++ }
++
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ case IIO_ANGL_VEL: {
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323.gyr_conf_reg_value;
++ for (int s = 0; s < ARRAY_SIZE(bmi323_gyro_odr_map);
++ ++s) {
++ if (((le_raw_read[0]) & ((u16)0x0FU)) ==
++ (bmi323_gyro_odr_map[s].hw_val)) {
++ /*
++ * from tha datasheed: -3dB cut-off frequency can be configured with the bit 7 of GYR_confm,
++ * also called acc_bw that can either be 0 or 1, where 1 means odr/4 and 0 means odr/2
++ */
++ int freq_adj_idx =
++ (((le_raw_read[0]) &
++ ((u8)0x80U)) == (u8)0x0000U) ?
++ (s * 2) + 0 :
++ (s * 2) + 1;
++ *val = bmi323_gyro_3db_freq_cutoff
++ [freq_adj_idx]
++ .val;
++ *val2 = bmi323_gyro_3db_freq_cutoff
++ [freq_adj_idx]
++ .val2;
++
++ mutex_unlock(&data->bmi323.mutex);
++ return bmi323_gyro_3db_freq_cutoff
++ [freq_adj_idx]
++ .ret_type;
++ }
++ }
++
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ default: {
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ }
++ case IIO_CHAN_INFO_SAMP_FREQ:
++ switch (chan->type) {
++ case IIO_TEMP: {
++
++ // while in normal or power mode the temperature sensur has a 50Hz sampling frequency
++ *val = 50;
++ *val2 = 0;
++
++ mutex_unlock(&data->bmi323.mutex);
++ return IIO_VAL_INT_PLUS_MICRO;
++ }
++ case IIO_ACCEL: {
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323.acc_conf_reg_value;
++ for (int s = 0; s < ARRAY_SIZE(bmi323_accel_odr_map);
++ ++s) {
++ if (((le_raw_read[0]) & ((u16)0x0FU)) ==
++ (bmi323_accel_odr_map[s].hw_val)) {
++ *val = bmi323_accel_odr_map[s].val;
++ *val2 = bmi323_accel_odr_map[s].val2;
++
++ mutex_unlock(&data->bmi323.mutex);
++ return IIO_VAL_INT_PLUS_MICRO;
++ }
++ }
++
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ case IIO_ANGL_VEL: {
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323.gyr_conf_reg_value;
++ for (int s = 0; s < ARRAY_SIZE(bmi323_gyro_odr_map);
++ ++s) {
++ if (((le_raw_read[0]) & ((u16)0x0FU)) ==
++ (bmi323_gyro_odr_map[s].hw_val)) {
++ *val = bmi323_gyro_odr_map[s].val;
++ *val2 = bmi323_gyro_odr_map[s].val2;
++
++ mutex_unlock(&data->bmi323.mutex);
++ return IIO_VAL_INT_PLUS_MICRO;
++ }
++ }
++
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ default:
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++ default:
++ ret = -EINVAL;
++ goto bmi323_read_raw_error;
++ }
++
++bmi323_read_raw_error:
++ if (was_sleep_modified == 0) {
++ bmi323_set_power_state(&data->bmi323, false);
++ }
++
++bmi323_read_raw_error_power:
++ mutex_unlock(&data->bmi323.mutex);
++ return ret;
++}
++
++static int bmi323_write_raw(struct iio_dev *indio_dev,
++ struct iio_chan_spec const *chan, int val, int val2,
++ long mask)
++{
++ struct bmc150_accel_data *data = iio_priv(indio_dev);
++ int ret = -EINVAL, was_sleep_modified = -1;
++
++ mutex_lock(&data->bmi323.mutex);
++
++ if ((data->bmi323.flags & BMI323_FLAGS_RESET_FAILED) != 0x00U) {
++ dev_err(data->bmi323.dev,
++ "bmi323 error: device has not being woken up correctly.");
++ mutex_unlock(&data->bmi323.mutex);
++ return -EBUSY;
++ }
++
++ switch (mask) {
++ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
++ switch (chan->type) {
++ default: {
++ ret = -EINVAL;
++ goto bmi323_write_raw_error;
++ }
++ }
++ case IIO_CHAN_INFO_SAMP_FREQ:
++ switch (chan->type) {
++ case IIO_ACCEL:
++ if (iio_buffer_enabled(indio_dev)) {
++ ret = -EBUSY;
++ goto bmi323_write_raw_error;
++ }
++
++ for (int s = 0; s < ARRAY_SIZE(bmi323_accel_odr_map);
++ ++s) {
++ if ((bmi323_accel_odr_map[s].val == val) &&
++ (bmi323_accel_odr_map[s].val2 == val2)) {
++ const u16 conf_backup =
++ data->bmi323.acc_conf_reg_value;
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323
++ .acc_conf_reg_value;
++ le_raw_read[0] &= (u8)0b11110000U;
++ le_raw_read[0] |=
++ ((u8)bmi323_gyro_odr_map[s]
++ .hw_val);
++
++ was_sleep_modified =
++ bmi323_set_power_state(
++ &data->bmi323, true);
++ if (was_sleep_modified != 0) {
++ ret = was_sleep_modified;
++ data->bmi323.acc_conf_reg_value =
++ conf_backup;
++ goto bmi323_write_raw_error_power;
++ }
++
++ ret = bmi323_write_u16(
++ &data->bmi323,
++ BMC150_BMI323_ACC_CONF_REG,
++ data->bmi323.acc_conf_reg_value);
++ if (ret != 0) {
++ data->bmi323.acc_conf_reg_value =
++ conf_backup;
++ goto bmi323_write_raw_error;
++ }
++
++ data->bmi323.acc_odr_time_ns =
++ bmi323_accel_odr_map[s].time_ns;
++ bmi323_set_power_state(&data->bmi323,
++ false);
++ mutex_unlock(&data->bmi323.mutex);
++ return 0;
++ }
++ }
++
++ ret = -EINVAL;
++ goto bmi323_write_raw_error;
++ case IIO_ANGL_VEL:
++ if (iio_buffer_enabled(indio_dev)) {
++ ret = -EBUSY;
++ goto bmi323_write_raw_error;
++ }
++
++ for (int s = 0; s < ARRAY_SIZE(bmi323_gyro_odr_map);
++ ++s) {
++ if ((bmi323_gyro_odr_map[s].val == val) &&
++ (bmi323_gyro_odr_map[s].val2 == val2)) {
++ const u16 conf_backup =
++ data->bmi323.gyr_conf_reg_value;
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323
++ .gyr_conf_reg_value;
++ le_raw_read[0] &= (u8)0b11110000U;
++ le_raw_read[0] |=
++ ((u8)bmi323_gyro_odr_map[s]
++ .hw_val);
++
++ was_sleep_modified =
++ bmi323_set_power_state(
++ &data->bmi323, true);
++ if (was_sleep_modified != 0) {
++ ret = was_sleep_modified;
++ data->bmi323.gyr_conf_reg_value =
++ conf_backup;
++ goto bmi323_write_raw_error_power;
++ }
++
++ ret = bmi323_write_u16(
++ &data->bmi323,
++ BMC150_BMI323_GYR_CONF_REG,
++ data->bmi323.gyr_conf_reg_value);
++ if (ret != 0) {
++ data->bmi323.gyr_conf_reg_value =
++ conf_backup;
++ goto bmi323_write_raw_error;
++ }
++
++ data->bmi323.gyr_odr_time_ns =
++ bmi323_gyro_odr_map[s].time_ns;
++ bmi323_set_power_state(&data->bmi323,
++ false);
++ mutex_unlock(&data->bmi323.mutex);
++ return 0;
++ }
++ }
++
++ ret = -EINVAL;
++ goto bmi323_write_raw_error;
++
++ /* Termometer also ends up here: its sampling frequency depends on the chip configuration and cannot be changed */
++ default:
++ ret = -EINVAL;
++ goto bmi323_write_raw_error;
++ }
++
++ break;
++ case IIO_CHAN_INFO_SCALE:
++ switch (chan->type) {
++ case IIO_ACCEL:
++ if (iio_buffer_enabled(indio_dev)) {
++ ret = -EBUSY;
++ goto bmi323_write_raw_error;
++ }
++
++ for (int s = 0; s < ARRAY_SIZE(bmi323_accel_scale_map);
++ ++s) {
++ if ((bmi323_accel_scale_map[s].val == val) &&
++ (bmi323_accel_scale_map[s].val2 == val2)) {
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323
++ .acc_conf_reg_value;
++ le_raw_read[0] &= (u8)0b10001111U;
++ le_raw_read[0] |=
++ ((u8)bmi323_accel_scale_map[s]
++ .hw_val);
++
++ was_sleep_modified =
++ bmi323_set_power_state(
++ &data->bmi323, true);
++ if (was_sleep_modified != 0) {
++ ret = was_sleep_modified;
++ goto bmi323_write_raw_error_power;
++ }
++
++ ret = bmi323_write_u16(
++ &data->bmi323,
++ BMC150_BMI323_ACC_CONF_REG,
++ data->bmi323.acc_conf_reg_value);
++ if (ret != 0) {
++ goto bmi323_write_raw_error;
++ }
++
++ bmi323_set_power_state(&data->bmi323,
++ false);
++ mutex_unlock(&data->bmi323.mutex);
++ return 0;
++ }
++ }
++
++ dev_warn(
++ data->bmi323.dev,
++ "bmi323 error: accel scale val=%d,val2=%d unavailable: ignoring.",
++ val, val2);
++
++ ret = -EINVAL;
++ goto bmi323_write_raw_error;
++ case IIO_ANGL_VEL:
++ if (iio_buffer_enabled(indio_dev)) {
++ ret = -EBUSY;
++ goto bmi323_write_raw_error;
++ }
++
++ for (int s = 0; s < ARRAY_SIZE(bmi323_gyro_scale_map);
++ ++s) {
++ if ((bmi323_gyro_scale_map[s].val == val) &&
++ (bmi323_gyro_scale_map[s].val2 == val2)) {
++ u8 *le_raw_read =
++ (u8 *)&data->bmi323
++ .gyr_conf_reg_value;
++ le_raw_read[0] &= (u8)0b10001111U;
++ le_raw_read[0] |=
++ ((u8)bmi323_gyro_scale_map[s]
++ .hw_val);
++
++ was_sleep_modified =
++ bmi323_set_power_state(
++ &data->bmi323, true);
++ if (was_sleep_modified != 0) {
++ ret = was_sleep_modified;
++ goto bmi323_write_raw_error_power;
++ }
++
++ ret = bmi323_write_u16(
++ &data->bmi323,
++ BMC150_BMI323_GYR_CONF_REG,
++ data->bmi323.acc_conf_reg_value);
++ if (ret != 0) {
++ goto bmi323_write_raw_error;
++ }
++
++ bmi323_set_power_state(&data->bmi323,
++ false);
++ mutex_unlock(&data->bmi323.mutex);
++ return 0;
++ }
++ }
++
++ dev_warn(
++ data->bmi323.dev,
++ "bmi323 error: gyro scale val=%d,val2=%d unavailable: ignoring.",
++ val, val2);
++
++ ret = -EINVAL;
++ goto bmi323_write_raw_error;
++
++ default:
++ ret = -EINVAL;
++ goto bmi323_write_raw_error;
++ }
++
++ default:
++ ret = -EINVAL;
++ goto bmi323_write_raw_error;
++ }
++
++bmi323_write_raw_error:
++ if (was_sleep_modified == 0) {
++ bmi323_set_power_state(&data->bmi323, false);
++ }
++
++bmi323_write_raw_error_power:
++ mutex_unlock(&data->bmi323.mutex);
++ return ret;
++}
++
++static int bmi323_read_avail(struct iio_dev *indio_dev,
++ struct iio_chan_spec const *chan, const int **vals,
++ int *type, int *length, long mask)
++{
++ switch (mask) {
++ case IIO_CHAN_INFO_SCALE:
++ switch (chan->type) {
++ case IIO_ACCEL:
++ *type = IIO_VAL_INT_PLUS_MICRO;
++ *vals = bmi323_accel_scales;
++ *length = ARRAY_SIZE(bmi323_accel_scales);
++ return IIO_AVAIL_LIST;
++ case IIO_ANGL_VEL:
++ *type = IIO_VAL_INT_PLUS_NANO;
++ *vals = bmi323_gyro_scales;
++ *length = ARRAY_SIZE(bmi323_gyro_scales);
++ return IIO_AVAIL_LIST;
++ default:
++ return -EINVAL;
++ }
++ case IIO_CHAN_INFO_SAMP_FREQ:
++ *type = IIO_VAL_INT_PLUS_MICRO;
++ *vals = bmi323_sample_freqs;
++ *length = ARRAY_SIZE(bmi323_sample_freqs);
++ return IIO_AVAIL_LIST;
++ default:
++ return -EINVAL;
++ }
++}
++
++static const struct iio_info bmi323_accel_info = {
++ .read_raw = bmi323_read_raw,
++ .write_raw = bmi323_write_raw,
++ .read_avail = bmi323_read_avail,
++ //.hwfifo_flush_to_buffer = bmi323_fifo_flush,
++};
++
++static int bmi323_fifo_flush(struct iio_dev *indio_dev)
++{
++ struct bmc150_accel_data *data = iio_priv(indio_dev);
++ int ret;
++
++ ret = bmi323_write_u16(&data->bmi323, 0x37, cpu_to_le16(0x01));
++
++ return ret;
++}
++
++static const u16 stub_value = 0x8000;
++
++#define ADVANCE_AT_REQ_OR_AVAIL(req, avail, dst, dst_offset, src, src_offset) \
++ if (req) { \
++ if (gyr_avail) { \
++ memcpy((void *)(dst + dst_offset), \
++ (const void *)(src + src_offset), 2); \
++ src_offset += 2; \
++ } else { \
++ memcpy((void *)(dst + dst_offset), \
++ (const void *)((const u8 *)(&stub_value)), 2); \
++ } \
++ dst_offset += 2; \
++ } else { \
++ if (avail) { \
++ src_offset += 2; \
++ } \
++ }
++
++static irqreturn_t iio_bmi323_trigger_h(int irq, void *p)
++{
++ printk(KERN_WARNING "bmi323 executed iio_bmi323_trigger_h");
++
++ struct iio_poll_func *pf = p;
++ struct iio_dev *indio_dev = pf->indio_dev;
++ struct bmc150_accel_data *indio_data = iio_priv(indio_dev);
++
++ mutex_lock(&indio_data->bmi323.mutex);
++
++ const bool temp_avail = ((indio_data->bmi323.fifo_conf_reg_value &
++ (cpu_to_le16(0b0000100000000000))) != 0);
++ const bool gyr_avail = ((indio_data->bmi323.fifo_conf_reg_value &
++ (cpu_to_le16(0b0000010000000000))) != 0);
++ const bool acc_avail = ((indio_data->bmi323.fifo_conf_reg_value &
++ (cpu_to_le16(0b0000001000000000))) != 0);
++ const bool time_avail = ((indio_data->bmi323.fifo_conf_reg_value &
++ (cpu_to_le16(0b0000000100000000))) != 0);
++
++ /* Calculate the number of bytes for a frame */
++ const u16 frames_aggregate_size_in_words =
++ /* 2 * */ ((temp_avail ? 1 : 0) + (gyr_avail ? 3 : 0) +
++ (acc_avail ? 3 : 0) + (time_avail ? 1 : 0));
++
++ u16 available_words = 0;
++ const int available_words_read_res = bmi323_read_u16(
++ &indio_data->bmi323, BMC150_BMI323_FIFO_FILL_LEVEL_REG,
++ &available_words);
++ if (available_words_read_res != 0) {
++ goto bmi323_irq_done;
++ }
++
++ const u16 available_frame_aggregates = (le16_to_cpu(available_words)) /
++ (frames_aggregate_size_in_words);
++
++ const s64 current_timestamp_ns = iio_get_time_ns(indio_dev);
++ const s64 fifo_frame_time_ns =
++ indio_data->bmi323.fifo_frame_time_diff_ns;
++ const s64 first_sample_timestamp_ns =
++ current_timestamp_ns -
++ (fifo_frame_time_ns * (s64)(available_frame_aggregates));
++
++ /* This can hold one full block */
++ u8 temp_data[16];
++
++ /* This is fifo data as read from the sensor */
++ u8 fifo_data[32];
++
++ /*
++ | CHANNEL | scan_index
++ |============================
++ | | |
++ | ACCEL_X | 0 |
++ | ACCEL_Y | 1 |
++ | ACCEL_Y | 2 |
++ | GYRO_X | 3 |
++ | GYRO_Y | 4 |
++ | GYRO_Z | 5 |
++ | TEMP | 6 |
++ | TIMESTAMP | ? |
++ */
++ bool accel_x_requested = false;
++ bool accel_y_requested = false;
++ bool accel_z_requested = false;
++ bool gyro_x_requested = false;
++ bool gyro_y_requested = false;
++ bool gyro_z_requested = false;
++ bool temp_requested = false;
++
++ int j = 0;
++ for_each_set_bit(j, indio_dev->active_scan_mask,
++ indio_dev->masklength) {
++ switch (j) {
++ case 0:
++ accel_x_requested = true;
++ break;
++ case 1:
++ accel_y_requested = true;
++ break;
++ case 2:
++ accel_z_requested = true;
++ break;
++ case 3:
++ gyro_x_requested = true;
++ break;
++ case 4:
++ gyro_y_requested = true;
++ break;
++ case 5:
++ gyro_z_requested = true;
++ break;
++ case 6:
++ temp_requested = true;
++ break;
++ default:
++ break;
++ }
++ }
++
++ u16 current_fifo_buffer_offset_bytes = 0;
++ for (u16 f = 0; f < available_frame_aggregates; ++f) {
++ u16 current_sample_buffer_offset = 0;
++
++ /* Read data from the raw device */
++ if (indio_data->bmi323.i2c_client != NULL) {
++ const int bytes_to_read =
++ 2 + (2 * frames_aggregate_size_in_words);
++ int read_block_ret = i2c_smbus_read_i2c_block_data(
++ indio_data->bmi323.i2c_client,
++ BMC150_BMI323_FIFO_DATA_REG, bytes_to_read,
++ &fifo_data[0]);
++ if (read_block_ret < bytes_to_read) {
++ dev_warn(
++ &indio_data->bmi323.i2c_client->dev,
++ "bmi323: i2c_smbus_read_i2c_block_data wrong return: expected %d bytes, %d arrived. Doing what is possible with recovered data.\n",
++ bytes_to_read, read_block_ret);
++
++ /* at this point FIFO buffer must be flushed to avoid interpreting data incorrectly the next trigger */
++ const int flush_res =
++ bmi323_fifo_flush(indio_dev);
++ if (flush_res != 0) {
++ dev_err(&indio_data->bmi323.i2c_client
++ ->dev,
++ "bmi323: Could not flush FIFO (%d). Following buffer data might be corrupted.\n",
++ flush_res);
++ }
++
++ goto bmi323_irq_done;
++ }
++
++ /* Discard 2-bytes dummy data from I2C */
++ current_fifo_buffer_offset_bytes = 2;
++ } else if (indio_data->bmi323.spi_client != NULL) {
++ printk(KERN_CRIT
++ "bmi323: SPI interface is not yet implemented.\n");
++
++ /*
++ * To whoever may need this: implementing this should be straightforward:
++ * it's specular to the i2c part.
++ */
++
++ /* Discard 1-byte dummy data from SPI */
++ current_fifo_buffer_offset_bytes = 1;
++
++ goto bmi323_irq_done;
++ }
++
++ ADVANCE_AT_REQ_OR_AVAIL(accel_x_requested, acc_avail,
++ (u8 *)&temp_data[0],
++ current_sample_buffer_offset,
++ (u8 *)&fifo_data[0],
++ current_fifo_buffer_offset_bytes);
++ ADVANCE_AT_REQ_OR_AVAIL(accel_y_requested, acc_avail,
++ (u8 *)&temp_data[0],
++ current_sample_buffer_offset,
++ (u8 *)&fifo_data[0],
++ current_fifo_buffer_offset_bytes);
++ ADVANCE_AT_REQ_OR_AVAIL(accel_z_requested, acc_avail,
++ (u8 *)&temp_data[0],
++ current_sample_buffer_offset,
++ (u8 *)&fifo_data[0],
++ current_fifo_buffer_offset_bytes);
++ ADVANCE_AT_REQ_OR_AVAIL(gyro_x_requested, gyr_avail,
++ (u8 *)&temp_data[0],
++ current_sample_buffer_offset,
++ (u8 *)&fifo_data[0],
++ current_fifo_buffer_offset_bytes);
++ ADVANCE_AT_REQ_OR_AVAIL(gyro_y_requested, gyr_avail,
++ (u8 *)&temp_data[0],
++ current_sample_buffer_offset,
++ (u8 *)&fifo_data[0],
++ current_fifo_buffer_offset_bytes);
++ ADVANCE_AT_REQ_OR_AVAIL(gyro_z_requested, gyr_avail,
++ (u8 *)&temp_data[0],
++ current_sample_buffer_offset,
++ (u8 *)&fifo_data[0],
++ current_fifo_buffer_offset_bytes);
++ ADVANCE_AT_REQ_OR_AVAIL(temp_requested, temp_avail,
++ (u8 *)&temp_data[0],
++ current_sample_buffer_offset,
++ (u8 *)&fifo_data[0],
++ current_fifo_buffer_offset_bytes);
++
++#ifdef BMC150_BMI232_DEBUG_EN
++ /* The following is code only used for debugging */
++ u16 timestamp = 0;
++ if (time_avail) {
++ memcpy((u8 *)&timestamp,
++ (const u8
++ *)(&fifo_data
++ [current_fifo_buffer_offset_bytes]),
++ 2);
++ current_fifo_buffer_offset_bytes += 2;
++ }
++
++ u16 *debg = (u16 *)&temp_data[0];
++ if (!time_avail) {
++ printk(KERN_WARNING
++ "bmi323 pushing to buffer %d/%d -- accel: %d %d %d gyro: %d %d %d",
++ (int)(f + 1), (int)available_frame_aggregates,
++ (int)(*((s16 *)&debg[0])),
++ (int)(*((s16 *)&debg[1])),
++ (int)(*((s16 *)&debg[2])),
++ (int)(*((s16 *)&debg[3])),
++ (int)(*((s16 *)&debg[4])),
++ (int)(*((s16 *)&debg[5])));
++ } else {
++ printk(KERN_WARNING
++ "bmi323 pushing to buffer %d/%d -- time: %d accel: %d %d %d gyro: %d %d %d",
++ (int)(f + 1), (int)available_frame_aggregates,
++ (int)timestamp, (int)(*((s16 *)&debg[0])),
++ (int)(*((s16 *)&debg[1])),
++ (int)(*((s16 *)&debg[2])),
++ (int)(*((s16 *)&debg[3])),
++ (int)(*((s16 *)&debg[4])),
++ (int)(*((s16 *)&debg[5])));
++ }
++#endif
++
++ iio_push_to_buffers_with_timestamp(
++ indio_dev, &temp_data[0],
++ first_sample_timestamp_ns +
++ (fifo_frame_time_ns * (s64)j));
++ }
++
++bmi323_irq_done:
++ mutex_unlock(&indio_data->bmi323.mutex);
++
++ /*
++ * Tell the core we are done with this trigger and ready for the
++ * next one.
++ */
++ iio_trigger_notify_done(indio_dev->trig);
++
++ return IRQ_HANDLED;
++}
++
++int bmi323_set_trigger_state(struct iio_trigger *trig, bool state)
++{
++ return 0;
++}
++
++/*
++// The following is meant to be used in a IRQ-enabled hardware
++static const struct iio_trigger_ops time_trigger_ops = {
++ .set_trigger_state = &bmi323_set_trigger_state,
++ //.reenable = NULL,
++ .validate_device = &iio_trigger_validate_own_device,
++};
++*/
++
++/*
++ * A very basic scan mask: everything can work in conjunction with everything else so no need to worry about
++ * managing conbinations of mutually exclusive data sources...
++ */
++static const unsigned long bmi323_accel_scan_masks[] = {
++ BIT(BMI323_ACCEL_AXIS_X) | BIT(BMI323_ACCEL_AXIS_Y) |
++ BIT(BMI323_ACCEL_AXIS_Z) | BIT(BMI323_GYRO_AXIS_X) |
++ BIT(BMI323_GYRO_AXIS_Y) |
++ BIT(BMI323_GYRO_AXIS_Z) /*| BIT(BMI323_TEMP)*/,
++ 0
++};
++
++int bmi323_iio_init(struct iio_dev *indio_dev)
++{
++ struct bmc150_accel_data *data = iio_priv(indio_dev);
++ struct irq_data *irq_desc = NULL;
++
++ if (data->bmi323.i2c_client != NULL) {
++ data->bmi323.dev = &data->bmi323.i2c_client->dev;
++ } else if (data->bmi323.spi_client != NULL) {
++ data->bmi323.dev = &data->bmi323.spi_client->dev;
++ } else {
++ return -ENODEV;
++ }
++
++ int ret = 0;
++
++ /* change to 8 for a default 200Hz sampling rate */
++ const int gyr_odr_conf_idx = 7;
++ const int acc_odr_conf_idx = 7;
++
++ mutex_init(&data->bmi323.mutex);
++
++ data->bmi323.acc_odr_time_ns =
++ bmi323_accel_odr_map[acc_odr_conf_idx].time_ns;
++ data->bmi323.gyr_odr_time_ns =
++ bmi323_gyro_odr_map[gyr_odr_conf_idx].time_ns;
++
++ // FIFO enabled for gyro, accel and temp. Overwrite older samples.
++ data->bmi323.fifo_conf_reg_value = cpu_to_le16((u16)0x0F00U);
++ //data->bmi323.fifo_conf_reg_value = cpu_to_le16((u16)0x0E00U);
++ //data->bmi323.fifo_conf_reg_value = cpu_to_le16((u16)0x0600U); // working
++
++ // now set the (default) normal mode...
++ // normal mode: 0x4000
++ // no averaging: 0x0000
++ data->bmi323.acc_conf_reg_value = cpu_to_le16(
++ 0x4000 | ((u16)BMC150_BMI323_ACCEL_RANGE_2_VAL << (u16)4U) |
++ ((u16)bmi323_accel_odr_map[acc_odr_conf_idx].hw_val));
++
++ // now set the (default) normal mode...
++ // normal mode: 0x4000
++ // no averaging: 0x0000
++ // filtering to ODR/2: 0x0000
++ data->bmi323.gyr_conf_reg_value = cpu_to_le16(
++ 0x4000 | ((u16)BMC150_BMI323_GYRO_RANGE_125_VAL << (u16)4U) |
++ ((u16)bmi323_gyro_odr_map[gyr_odr_conf_idx].hw_val));
++
++ // the datasheet states that FIFO buffer MUST be enabled before enabling any sensor
++ ret = bmi323_write_u16(&data->bmi323, BMC150_BMI323_FIFO_CONF_REG,
++ data->bmi323.fifo_conf_reg_value);
++ if (ret != 0) {
++ return -1;
++ }
++
++ ret = bmi323_write_u16(&data->bmi323, BMC150_BMI323_ACC_CONF_REG,
++ data->bmi323.acc_conf_reg_value);
++ if (ret != 0) {
++ return -1;
++ }
++
++ ret = bmi323_write_u16(&data->bmi323, BMC150_BMI323_GYR_CONF_REG,
++ data->bmi323.gyr_conf_reg_value);
++ if (ret != 0) {
++ return -2;
++ }
++
++ indio_dev->channels = bmi323_channels;
++ indio_dev->num_channels = ARRAY_SIZE(bmi323_channels);
++ indio_dev->name = "bmi323";
++ indio_dev->available_scan_masks = bmi323_accel_scan_masks;
++ indio_dev->modes = INDIO_DIRECT_MODE;
++ indio_dev->info = &bmi323_accel_info;
++ indio_dev->label = "bmi323-accel_base";
++
++ if (data->bmi323.irq > 0) {
++ dev_info(data->bmi323.dev, "IRQ pin reported as connected: %d",
++ data->bmi323.irq);
++
++ irq_desc = irq_get_irq_data(data->bmi323.irq);
++ if (!irq_desc) {
++ dev_err(data->bmi323.dev,
++ "Could not find IRQ %d. ignoring it.\n",
++ data->bmi323.irq);
++ goto bmi323_iio_init_missing_irq_pin;
++ }
++
++ //data->bmi323.trig[0] = devm_iio_trigger_alloc(data->bmi323.dev, "trig-fifo_full-%s-%d", indio_dev->name, iio_device_id(indio_dev));
++ //if (data->bmi323.trig[0] == NULL) {
++ // ret = -ENOMEM;
++ // goto bmi323_iio_init_err_trigger_unregister;
++ //}
++ //
++ //data->bmi323.trig[0]->ops = &time_trigger_ops;
++ //iio_trigger_set_drvdata(data->bmi323.trig[0], indio_dev);
++ //ret = devm_iio_trigger_register(data->bmi323.dev, data->bmi323.trig[0]);
++ //if (ret) {
++ // dev_err(data->bmi323.dev, "iio trigger register failed\n");
++ // goto bmi323_iio_init_err_trigger_unregister;
++ //}
++
++ /*
++ * register triggers BEFORE buffer setup so that they are cleared
++ * on emergence exit by bmi323_iio_init_err_trigger_unregister.
++ *
++ * This is just a placeholder until I can get my hands on a bmi323
++ * device that has the IRQ pin actually connected to the CPU.
++ */
++
++ /* here resume operation with the module part common to irq and non-irq enabled code. */
++ goto bmi323_iio_init_common_irq_and_not_irq;
++ }
++
++bmi323_iio_init_missing_irq_pin:
++ dev_info(
++ data->bmi323.dev,
++ "IRQ pin NOT connected (irq=%d). Will continue normally without triggers.",
++ data->bmi323.irq);
++
++bmi323_iio_init_common_irq_and_not_irq:
++
++ /* Once orientation matrix is implemented switch this to iio_triggered_buffer_setup_ext. */
++ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
++ iio_bmi323_trigger_h,
++ &bmi323_buffer_ops);
++ if (ret < 0) {
++ dev_err(data->bmi323.dev,
++ "Failed: iio triggered buffer setup: %d\n", ret);
++ goto bmi323_iio_init_err_trigger_unregister;
++ }
++
++ ret = pm_runtime_set_active(data->bmi323.dev);
++ if (ret) {
++ dev_err(data->bmi323.dev,
++ "bmi323 unable to initialize runtime PD: pm_runtime_set_active returned %d\n",
++ ret);
++ goto bmi323_iio_init_err_buffer_cleanup;
++ }
++
++ pm_runtime_enable(data->bmi323.dev);
++ pm_runtime_set_autosuspend_delay(data->bmi323.dev,
++ BMC150_BMI323_AUTO_SUSPEND_DELAY_MS);
++ pm_runtime_use_autosuspend(data->bmi323.dev);
++
++ ret = iio_device_register(indio_dev);
++ if (ret < 0) {
++ dev_err(data->bmi323.dev,
++ "bmi323 unable to register iio device: %d\n", ret);
++ goto bmi323_iio_init_err_pm_cleanup;
++ }
++
++ return 0;
++
++bmi323_iio_init_err_pm_cleanup:
++ pm_runtime_dont_use_autosuspend(data->bmi323.dev);
++ pm_runtime_disable(data->bmi323.dev);
++bmi323_iio_init_err_buffer_cleanup:
++ iio_triggered_buffer_cleanup(indio_dev);
++bmi323_iio_init_err_trigger_unregister:
++ /*
++ * unregister triggers if they have been setup already.
++ * iio_trigger_unregister shall be used in that regard.
++ *
++ * This is just a placeholder until I can get my hands on a bmi323
++ * device that has the IRQ pin actually connected to the CPU.
++ */
++ //if (data->bmi323.trig[0] != NULL) {
++ // iio_trigger_unregister(data->bmi323.trig[0]);
++ //}
++
++ return ret;
++}
++EXPORT_SYMBOL_NS_GPL(bmi323_iio_init, IIO_BMC150);
++
++void bmi323_iio_deinit(struct iio_dev *indio_dev)
++{
++ struct bmc150_accel_data *data = iio_priv(indio_dev);
++ struct device *dev = bmi323_get_managed_device(&data->bmi323);
++
++ iio_device_unregister(indio_dev);
++
++ pm_runtime_disable(dev);
++ pm_runtime_set_suspended(dev);
++ pm_runtime_put_noidle(dev);
++
++ iio_triggered_buffer_cleanup(indio_dev);
++
++ //iio_device_free(indio_dev); // this isn't done in the bmg160 driver nor in other drivers so I guess I shouldn't do it too
++
++ mutex_unlock(&data->bmi323.mutex);
++ bmi323_chip_rst(&data->bmi323);
++ mutex_unlock(&data->bmi323.mutex);
++}
++EXPORT_SYMBOL_NS_GPL(bmi323_iio_deinit, IIO_BMC150);
++
++#ifdef CONFIG_PM_SLEEP
++static int bmc150_accel_suspend(struct device *dev)
+ {
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
++ if (data->dev_type == BMI323) {
++ int ret;
++
++ //dev_warn(dev, "bmi323 suspending driver...");
++
++ // here push the register GYRO & ACCEL configuration and issue a reset so that chip goes to sleep mode (the default one after a reset)
++ mutex_unlock(&data->bmi323.mutex);
++
++ ret = bmi323_chip_rst(&data->bmi323);
++ mutex_unlock(&data->bmi323.mutex);
++ if (ret != 0) {
++ dev_err(dev,
++ "bmi323 error in suspend on bmi323_chip_rst: %d\n",
++ ret);
++ data->bmi323.flags |= BMI323_FLAGS_RESET_FAILED;
++ return -EAGAIN;
++ }
++
++ return 0;
++ }
++
+ mutex_lock(&data->mutex);
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+ mutex_unlock(&data->mutex);
+@@ -1844,6 +4005,63 @@ static int bmc150_accel_resume(struct device *dev)
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
++ if (data->dev_type == BMI323) {
++ int ret;
++
++ //dev_warn(dev, "bmi323 resuming driver...");
++
++ // here pop the register GYRO & ACCEL configuration and issue a reset so that chip goes to sleep mode (the default one after a reset)
++ mutex_lock(&data->bmi323.mutex);
++
++ // this was done already in runtime_sleep function.
++ if ((data->bmi323.flags & BMI323_FLAGS_RESET_FAILED) != 0x00U) {
++ ret = bmi323_chip_rst(&data->bmi323);
++ if (ret == 0) {
++ data->bmi323.flags &=
++ ~BMI323_FLAGS_RESET_FAILED;
++ } else {
++ goto bmi323_bmc150_accel_resume_terminate;
++ }
++ }
++
++ ret = bmi323_write_u16(&data->bmi323,
++ BMC150_BMI323_FIFO_CONF_REG,
++ data->bmi323.fifo_conf_reg_value);
++ if (ret != 0) {
++ goto bmi323_bmc150_accel_resume_terminate;
++ }
++
++ ret = bmi323_write_u16(&data->bmi323,
++ BMC150_BMI323_GYR_CONF_REG,
++ data->bmi323.gyr_conf_reg_value);
++ if (ret != 0) {
++ goto bmi323_bmc150_accel_resume_terminate;
++ }
++
++ ret = bmi323_write_u16(&data->bmi323,
++ BMC150_BMI323_ACC_CONF_REG,
++ data->bmi323.acc_conf_reg_value);
++ if (ret != 0) {
++ goto bmi323_bmc150_accel_resume_terminate;
++ }
++
++bmi323_bmc150_accel_resume_terminate:
++ mutex_unlock(&data->bmi323.mutex);
++ if (ret != 0) {
++ return -EAGAIN;
++ }
++
++ /*
++ * datasheet says "Start-up time": suspend to high performance mode is tipically 30ms,
++ * however when setting this to 32 or even higher the first reading from the gyro (unlike accel part)
++ * is actually the (wrong) default value 0x8000 so it is better to sleep a bit longer
++ * to prevent issues and give time to the sensor to pick up first readings...
++ */
++ msleep_interruptible(64);
++
++ return 0;
++ }
++
+ mutex_lock(&data->mutex);
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ bmc150_accel_fifo_set_mode(data);
+@@ -1863,6 +4081,25 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int ret;
+
++ if (data->dev_type == BMI323) {
++ //dev_warn(dev, "bmi323 suspending runtime...");
++
++ /*
++ * Every operation requiring this function have the mutex locked already:
++ * with mutex_lock(&data->bmi323.mutex);
++ */
++ ret = bmi323_chip_rst(&data->bmi323);
++ if (ret != 0) {
++ dev_err(dev,
++ "bmi323 error in runtime_suspend on bmi323_chip_rst: %d\n",
++ ret);
++ data->bmi323.flags |= BMI323_FLAGS_RESET_FAILED;
++ return -EAGAIN;
++ }
++
++ return 0;
++ }
++
+ ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+ if (ret < 0)
+ return -EAGAIN;
+@@ -1877,6 +4114,70 @@ static int bmc150_accel_runtime_resume(struct device *dev)
+ int ret;
+ int sleep_val;
+
++ if (data->dev_type == BMI323) {
++ //dev_warn(dev, "bmi323 resuming runtime...");
++
++ /*
++ * Every operation requiring this function have the mutex locked already:
++ * with mutex_lock(&data->bmi323.mutex);
++ */
++
++ // recover from a bad state if it was left that way on reuntime_suspend
++ if ((data->bmi323.flags & BMI323_FLAGS_RESET_FAILED) != 0x00U) {
++ ret = bmi323_chip_rst(&data->bmi323);
++ if (ret == 0) {
++ data->bmi323.flags &=
++ ~BMI323_FLAGS_RESET_FAILED;
++ } else {
++ goto bmi323_bmc150_accel_runtime_resume_terminate;
++ }
++ }
++
++ ret = bmi323_write_u16(&data->bmi323,
++ BMC150_BMI323_FIFO_CONF_REG,
++ data->bmi323.fifo_conf_reg_value);
++ if (ret != 0) {
++ dev_err(dev,
++ "bmi323 writing to GYR_CONF register failed");
++ goto bmi323_bmc150_accel_runtime_resume_terminate;
++ }
++
++ ret = bmi323_write_u16(&data->bmi323,
++ BMC150_BMI323_GYR_CONF_REG,
++ data->bmi323.gyr_conf_reg_value);
++ if (ret != 0) {
++ dev_err(dev,
++ "bmi323 writing to GYR_CONF register failed");
++ goto bmi323_bmc150_accel_runtime_resume_terminate;
++ }
++
++ ret = bmi323_write_u16(&data->bmi323,
++ BMC150_BMI323_ACC_CONF_REG,
++ data->bmi323.acc_conf_reg_value);
++ if (ret != 0) {
++ dev_err(dev,
++ "bmi323 writing to ACC_CONF register failed");
++ goto bmi323_bmc150_accel_runtime_resume_terminate;
++ }
++
++bmi323_bmc150_accel_runtime_resume_terminate:
++ if (ret != 0) {
++ dev_err(dev,
++ "bmi323 bmc150_accel_runtime_resume -EAGAIN");
++ return -EAGAIN;
++ }
++
++ /*
++ * datasheet says "Start-up time": suspend to high performance mode is tipically 30ms,
++ * however when setting this to 32 or even higher the first reading from the gyro (unlike accel part)
++ * is actually the (wrong) default value 0x8000 so it is better to sleep a bit longer
++ * to prevent issues and give time to the sensor to pick up first readings...
++ */
++ msleep_interruptible(64);
++
++ return 0;
++ }
++
+ ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
+index ee1ba134ad42..0d6ee304b3e7 100644
+--- a/drivers/iio/accel/bmc150-accel-i2c.c
++++ b/drivers/iio/accel/bmc150-accel-i2c.c
+@@ -173,15 +173,102 @@ static void bmc150_acpi_dual_accel_remove(struct i2c_client *client) {}
+
+ static int bmc150_accel_probe(struct i2c_client *client)
+ {
++ int ret;
++ u8 chip_id_first[4] = { 0x00, 0x00, 0x00, 0x00 };
++ enum bmc150_device_type dev_type = BMC150;
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ struct regmap *regmap;
+ const char *name = NULL;
+ enum bmc150_type type = BOSCH_UNKNOWN;
++
++ /* reads 4 bytes (2 dummy + 2 good) from the i2c CHIP_ID device register */
++ ret = i2c_smbus_read_i2c_block_data(client, 0x00, 4, &chip_id_first[0]);
++ if (ret != 4) {
++ dev_info(
++ &client->dev,
++ "error checking if the bmc150 is in fact a bmi323: i2c_smbus_read_i2c_block_data = %d: reg = 0x%02x.\n\tIt probably is a bmc150 as correctly reported by the ACPI entry.",
++ (int)ret, 0x00);
++ goto bmi150_old_probe;
++ }
++
++ // at this point we have enough data to know what chip we are handling
++ dev_type = (chip_id_first[2] == 0x43) ? BMI323 : dev_type;
++
++ if (dev_type == BMI323) {
++ dev_warn(
++ &client->dev,
++ "bmc323: what the ACPI table reported as a bmc150 is in fact a bmc323\n");
++
++ struct iio_dev *indio_dev = devm_iio_device_alloc(
++ &client->dev, sizeof(struct bmc150_accel_data));
++ if (!indio_dev) {
++ dev_err(&client->dev,
++ "bmc323 init process failed: out of memory\n");
++
++ return -ENOMEM;
++ }
++
++ dev_set_drvdata(&client->dev, indio_dev);
++ struct bmc150_accel_data *data = iio_priv(indio_dev);
++ data->dev_type = dev_type;
++
++ struct bmi323_private_data *bmi323_data = &data->bmi323;
++ bmi323_data->i2c_client = client;
++ bmi323_data->spi_client = NULL;
++ bmi323_data->irq = client->irq;
++
++ /*
++ * VDD is the analog and digital domain voltage supply
++ * VDDIO is the digital I/O voltage supply
++ */
++ bmi323_data->regulators[0].supply = "vdd";
++ bmi323_data->regulators[1].supply = "vddio";
++ ret = devm_regulator_bulk_get(
++ &client->dev, ARRAY_SIZE(bmi323_data->regulators),
++ bmi323_data->regulators);
++ if (ret) {
++ return dev_err_probe(&client->dev, ret,
++ "failed to get regulators\n");
++ }
++
++ ret = regulator_bulk_enable(ARRAY_SIZE(bmi323_data->regulators),
++ bmi323_data->regulators);
++ if (ret) {
++ iio_device_free(indio_dev);
++
++ dev_err(&client->dev,
++ "failed to enable regulators: %d\n", ret);
++ return ret;
++ }
++
++ ret = bmi323_chip_rst(bmi323_data);
++ if (ret != 0) {
++ dev_err(&client->dev,
++ "bmc323: error issuing the chip reset: %d\n",
++ ret);
++ return ret;
++ }
++
++ dev_info(
++ &client->dev,
++ "bmc323: chip reset success: starting the iio subsystem binding\n");
++
++ ret = bmi323_iio_init(indio_dev);
++ if (ret != 0) {
++ return ret;
++ }
++
++ return 0;
++ }
++
++bmi150_old_probe:
++ dev_info(&client->dev,
++ "executing the normal procedure for a bmc150...");
++
+ bool block_supported =
+ i2c_check_functionality(client->adapter, I2C_FUNC_I2C) ||
+ i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK);
+- int ret;
+
+ regmap = devm_regmap_init_i2c(client, &bmc150_regmap_conf);
+ if (IS_ERR(regmap)) {
+@@ -198,7 +285,7 @@ static int bmc150_accel_probe(struct i2c_client *client)
+ type, name, block_supported);
+ if (ret)
+ return ret;
+-
++
+ /*
+ * The !id check avoids recursion when probe() gets called
+ * for the second client.
+@@ -211,6 +298,15 @@ static int bmc150_accel_probe(struct i2c_client *client)
+
+ static void bmc150_accel_remove(struct i2c_client *client)
+ {
++ struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
++ struct bmc150_accel_data *data = iio_priv(indio_dev);
++
++ if (data->dev_type == BMI323) {
++ bmi323_iio_deinit(indio_dev);
++
++ return;
++ }
++
+ bmc150_acpi_dual_accel_remove(client);
+
+ bmc150_accel_core_remove(&client->dev);
+diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
+index 7775c5edaeef..65ec208960df 100644
+--- a/drivers/iio/accel/bmc150-accel.h
++++ b/drivers/iio/accel/bmc150-accel.h
+@@ -8,6 +8,14 @@
+ #include <linux/regulator/consumer.h>
+ #include <linux/workqueue.h>
+
++/*
++ * the bmi323 needs raw access to spi and i2c: I cannot use regmap
++ * as this device expects i2c writes to be 2 bytes,
++ * spi reads to be 3 bytes and i2c reads to be 4 bytes.
++ */
++#include <linux/i2c.h>
++#include <linux/spi/spi.h>
++
+ struct regmap;
+ struct i2c_client;
+ struct bmc150_accel_chip_info;
+@@ -34,6 +42,11 @@ struct bmc150_accel_interrupt {
+ atomic_t users;
+ };
+
++enum bmc150_device_type {
++ BMC150,
++ BMI323,
++};
++
+ struct bmc150_accel_trigger {
+ struct bmc150_accel_data *data;
+ struct iio_trigger *indio_trig;
+@@ -55,6 +68,25 @@ enum bmc150_accel_trigger_id {
+ BMC150_ACCEL_TRIGGERS,
+ };
+
++#define BMI323_FLAGS_RESET_FAILED 0x00000001U
++
++struct bmi323_private_data {
++ struct regulator_bulk_data regulators[2];
++ struct i2c_client *i2c_client;
++ struct spi_device *spi_client;
++ struct device *dev; /* pointer at i2c_client->dev or spi_client->dev */
++ struct mutex mutex;
++ int irq;
++ u32 flags;
++ u16 acc_conf_reg_value;
++ u16 gyr_conf_reg_value;
++ u16 fifo_conf_reg_value;
++ struct iio_trigger *trig[1];
++ s64 fifo_frame_time_diff_ns;
++ s64 acc_odr_time_ns;
++ s64 gyr_odr_time_ns;
++};
++
+ struct bmc150_accel_data {
+ struct regmap *regmap;
+ struct regulator_bulk_data regulators[2];
+@@ -83,7 +115,67 @@ struct bmc150_accel_data {
+ void (*resume_callback)(struct device *dev);
+ struct delayed_work resume_work;
+ struct iio_mount_matrix orientation;
+-};
++ enum bmc150_device_type dev_type;
++ struct bmi323_private_data bmi323;
++ };
++
++/**
++ * This function performs a write of a u16 little-endian (regardless of CPU architecture) integer
++ * to a device register. Returns 0 on success or an error code otherwise.
++ *
++ * PRE: in_value holds the data to be sent to the sensor, in little endian format even on big endian
++ * architectures.
++ *
++ * NOTE: bmi323->dev can be NULL (not yet initialized) when this function is called
++ * therefore it is not needed and is not used inside the function
++ *
++ * WARNING: this function does not lock any mutex and synchronization MUST be performed by the caller
++ */
++int bmi323_write_u16(struct bmi323_private_data *bmi323, u8 in_reg, u16 in_value);
++
++/**
++ * This function performs a read of "good" values from the bmi323 discarding what
++ * in the datasheet is described as "dummy data": additional useles bytes.
++ *
++ * PRE: bmi323 has been partially initialized: i2c_device and spi_devices MUST be set to either
++ * the correct value or NULL
++ *
++ * NOTE: bmi323->dev can be NULL (not yet initialized) when this function is called
++ * therefore it is not needed and is not used inside the function
++ *
++ * POST: on success out_value is written with data from the sensor, as it came out, so the
++ * content is little-endian even on big endian architectures
++ *
++ * WARNING: this function does not lock any mutex and synchronization MUST be performed by the caller
++ */
++int bmi323_read_u16(struct bmi323_private_data *bmi323, u8 in_reg, u16* out_value);
++
++int bmi323_chip_check(struct bmi323_private_data *bmi323);
++
++/**
++ * Reset the chip in a known state that is ready to accept commands, but is not configured therefore after calling this function
++ * it is required to load a new configuration to start data acquisition.
++ *
++ * PRE: bmi323 has been fully identified and partially initialized
++ *
++ * NOTE: after issuing a reset the the chip will be in what it is called "suspended mode" and the feature angine is
++ * ready to be set. This mode has everything disabled and consumes aroud 15uA.
++ *
++ * When removing the driver or suspend has been requested it's best to reset the chip so that power consumption
++ * will be the lowest possible.
++ */
++int bmi323_chip_rst(struct bmi323_private_data *bmi323);
++
++/**
++ * This function MUST be called in probe and is responsible for registering the userspace sysfs.
++ *
++ * The indio_dev MUST have been allocated but not registered. This function will perform userspace registration.
++ *
++ * @param indio_dev the industrual io device already allocated but not yet registered
++ */
++int bmi323_iio_init(struct iio_dev *indio_dev);
++
++void bmi323_iio_deinit(struct iio_dev *indio_dev);
+
+ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
+ enum bmc150_type type, const char *name,
+--
+2.42.0
+
diff --git a/0002-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch b/0002-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch
new file mode 100644
index 000000000000..1f518e6bb744
--- /dev/null
+++ b/0002-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch
@@ -0,0 +1,89 @@
+From 5e9d8dd3971972048485610161c2556a7608849e Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Thu, 19 May 2022 14:40:07 +0200
+Subject: [PATCH 2/8] drivers/firmware: skip simpledrm if nvidia-drm.modeset=1
+ is set
+
+The Nvidia proprietary driver has some bugs that leads to issues if used
+with the simpledrm driver. The most noticeable is that does not register
+an emulated fbdev device.
+
+It just relies on a fbdev to be registered by another driver, that could
+be that could be attached to the framebuffer console. On UEFI machines,
+this is the efifb driver.
+
+This means that disabling the efifb driver will cause virtual consoles to
+not be present in the system when using the Nvidia driver. Legacy BIOS is
+not affected just because fbcon is not used there, but instead vgacon.
+
+Unless a VGA mode is specified using the vga= kernel command line option,
+in that case the vesafb driver is used instead and its fbdev attached to
+the fbcon.
+
+This is a problem because with CONFIG_SYSFB_SIMPLEFB=y, the sysfb platform
+code attempts to register a "simple-framebuffer" platform device (that is
+matched against simpledrm) and only registers either an "efi-framebuffer"
+or "vesa-framebuffer" if this fails to be registered due the video modes
+not being compatible.
+
+The Nvidia driver relying on another driver to register the fbdev is quite
+fragile, since it can't really assume those will stick around. For example
+there are patches posted to remove the EFI and VESA platform devices once
+a real DRM or fbdev driver probes.
+
+But in any case, moving to a simpledrm + emulated fbdev only breaks this
+assumption and causes users to not have VT if the Nvidia driver is used.
+
+So to prevent this, let's add a workaround and make the sysfb to skip the
+"simple-framebuffer" registration when nvidia-drm.modeset=1 option is set.
+
+This is quite horrible, but honestly I can't think of any other approach.
+
+For this to work, the CONFIG_FB_EFI and CONFIG_FB_VESA config options must
+be enabled besides CONFIG_DRM_SIMPLEDRM.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Cherry-picked-for: https://bugs.archlinux.org/task/73720
+---
+ drivers/firmware/sysfb.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
+index 82fcfd29bc4d2..17b7e096b6828 100644
+--- a/drivers/firmware/sysfb.c
++++ b/drivers/firmware/sysfb.c
+@@ -34,6 +34,22 @@
+ #include <linux/screen_info.h>
+ #include <linux/sysfb.h>
+
++static int skip_simpledrm;
++
++static int __init simpledrm_disable(char *opt)
++{
++ if (!opt)
++ return -EINVAL;
++
++ get_option(&opt, &skip_simpledrm);
++
++ if (skip_simpledrm)
++ pr_info("The simpledrm driver will not be probed\n");
++
++ return 0;
++}
++early_param("nvidia-drm.modeset", simpledrm_disable);
++
+ static struct platform_device *pd;
+ static DEFINE_MUTEX(disable_lock);
+ static bool disabled;
+@@ -85,7 +101,7 @@ static __init int sysfb_init(void)
+
+ /* try to create a simple-framebuffer device */
+ compatible = sysfb_parse_mode(si, &mode);
+- if (compatible) {
++ if (compatible && !skip_simpledrm) {
+ pd = sysfb_create_simplefb(si, &mode);
+ if (!IS_ERR(pd))
+ goto unlock_mutex;
+--
+2.41.0
+
diff --git a/0003-sphinx-kfigure.py-Convert-outdir-to-str-before-using.patch b/0003-sphinx-kfigure.py-Convert-outdir-to-str-before-using.patch
new file mode 100644
index 000000000000..3ec7388d12b1
--- /dev/null
+++ b/0003-sphinx-kfigure.py-Convert-outdir-to-str-before-using.patch
@@ -0,0 +1,33 @@
+From 37d1bd4999f06c7c4b3094cb74b7614c552ecc1d Mon Sep 17 00:00:00 2001
+From: "Jan Alexander Steffens (heftig)" <heftig@archlinux.org>
+Date: Thu, 24 Aug 2023 01:28:17 +0200
+Subject: [PATCH 3/8] sphinx: kfigure.py: Convert outdir to str before using
+ len
+
+Sphinx 7.2 replaced several uses of str with pathlib Paths, causing the
+build to fail with a TypeError when attempting to use len on one.
+
+Patch by @loqs; thanks.
+
+Fixes: https://bugs.archlinux.org/task/79446
+Signed-off-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
+---
+ Documentation/sphinx/kfigure.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
+index cefdbb7e75230..13e885bbd499c 100644
+--- a/Documentation/sphinx/kfigure.py
++++ b/Documentation/sphinx/kfigure.py
+@@ -309,7 +309,7 @@ def convert_image(img_node, translator, src_fname=None):
+ if dst_fname:
+ # the builder needs not to copy one more time, so pop it if exists.
+ translator.builder.images.pop(img_node['uri'], None)
+- _name = dst_fname[len(translator.builder.outdir) + 1:]
++ _name = dst_fname[len(str(translator.builder.outdir)) + 1:]
+
+ if isNewer(dst_fname, src_fname):
+ kernellog.verbose(app,
+--
+2.41.0
+
diff --git a/0004-ASoC-Intel-soc-acpi-fix-Dell-SKU-0B34.patch b/0004-ASoC-Intel-soc-acpi-fix-Dell-SKU-0B34.patch
new file mode 100644
index 000000000000..1a6234faa0ef
--- /dev/null
+++ b/0004-ASoC-Intel-soc-acpi-fix-Dell-SKU-0B34.patch
@@ -0,0 +1,51 @@
+From eb39b2a95930f53bd2cb4fbda0e1372609dff976 Mon Sep 17 00:00:00 2001
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Date: Tue, 5 Sep 2023 11:16:16 -0400
+Subject: [PATCH 4/8] ASoC: Intel: soc-acpi: fix Dell SKU 0B34
+
+The rule for the SoundWire tables is that the platforms with more
+devices need to be added first. We broke that rule with the Dell SKU
+0B34, and caused the second amplifier for SKU 0AF3 to be ignored.
+
+The fix is simple, we need to move the single-amplifier entry after
+the two-amplifier one.
+
+Fixes: b62a1a839b48 ("ASoC: Intel: soc-acpi: add tables for Dell SKU 0B34")
+Closes: https://github.com/thesofproject/linux/issues/4559
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+---
+ sound/soc/intel/common/soc-acpi-intel-adl-match.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index bcd66e0094b4b..c4b57cca6b228 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -648,18 +648,18 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l3.tplg",
+ },
+- {
+- .link_mask = 0x3, /* rt1316 on link1 & rt714 on link0 */
+- .links = adl_sdw_rt1316_link1_rt714_link0,
+- .drv_name = "sof_sdw",
+- .sof_tplg_filename = "sof-adl-rt1316-l1-mono-rt714-l0.tplg",
+- },
+ {
+ .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
+ .links = adl_sdw_rt1316_link12_rt714_link0,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-adl-rt1316-l12-rt714-l0.tplg",
+ },
++ {
++ .link_mask = 0x3, /* rt1316 on link1 & rt714 on link0 */
++ .links = adl_sdw_rt1316_link1_rt714_link0,
++ .drv_name = "sof_sdw",
++ .sof_tplg_filename = "sof-adl-rt1316-l1-mono-rt714-l0.tplg",
++ },
+ {
+ .link_mask = 0x5, /* 2 active links required */
+ .links = adl_sdw_rt1316_link2_rt714_link0,
+--
+2.41.0
+
diff --git a/0005-Revert-ASoC-Intel-soc-acpi-add-tables-for-Dell-SKU-0.patch b/0005-Revert-ASoC-Intel-soc-acpi-add-tables-for-Dell-SKU-0.patch
new file mode 100644
index 000000000000..e719ec2fae81
--- /dev/null
+++ b/0005-Revert-ASoC-Intel-soc-acpi-add-tables-for-Dell-SKU-0.patch
@@ -0,0 +1,79 @@
+From 82d8584b889db5166bf7ad7863e079d38c9c6e7a Mon Sep 17 00:00:00 2001
+From: "Jan Alexander Steffens (heftig)" <heftig@archlinux.org>
+Date: Sat, 2 Sep 2023 15:28:31 +0200
+Subject: [PATCH 5/6] Revert "ASoC: Intel: soc-acpi: add tables for Dell SKU
+ 0B34"
+
+This reverts commit b62a1a839b48f55046727089c3ba7a8ebbf97f0e.
+
+It broke stereo sound on the Dell XPS 13 Plus 9320 (SKU 0AF3),
+see https://github.com/thesofproject/linux/issues/4559.
+
+Considering that this apparently wasn't enough to get sound working on
+SKU 0B34 (https://github.com/thesofproject/linux/issues/4399) and that
+Dell officially supports Linux on 0AF3 but not 0B34, I think this is
+safe to revert.
+
+Fixes: https://github.com/thesofproject/linux/issues/4559
+Signed-off-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
+---
+ .../intel/common/soc-acpi-intel-adl-match.c | 29 -------------------
+ 1 file changed, 29 deletions(-)
+
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index bcd66e0094b4b..83414bfa8d6c4 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -133,15 +133,6 @@ static const struct snd_soc_acpi_adr_device rt1316_1_group2_adr[] = {
+ }
+ };
+
+-static const struct snd_soc_acpi_adr_device rt1316_1_single_adr[] = {
+- {
+- .adr = 0x000130025D131601ull,
+- .num_endpoints = 1,
+- .endpoints = &single_endpoint,
+- .name_prefix = "rt1316-1"
+- }
+-};
+-
+ static const struct snd_soc_acpi_adr_device rt1316_2_single_adr[] = {
+ {
+ .adr = 0x000230025D131601ull,
+@@ -321,20 +312,6 @@ static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link12_rt714_link0[] =
+ {}
+ };
+
+-static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link1_rt714_link0[] = {
+- {
+- .mask = BIT(1),
+- .num_adr = ARRAY_SIZE(rt1316_1_single_adr),
+- .adr_d = rt1316_1_single_adr,
+- },
+- {
+- .mask = BIT(0),
+- .num_adr = ARRAY_SIZE(rt714_0_adr),
+- .adr_d = rt714_0_adr,
+- },
+- {}
+-};
+-
+ static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link3[] = {
+ {
+ .mask = BIT(2),
+@@ -648,12 +625,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l3.tplg",
+ },
+- {
+- .link_mask = 0x3, /* rt1316 on link1 & rt714 on link0 */
+- .links = adl_sdw_rt1316_link1_rt714_link0,
+- .drv_name = "sof_sdw",
+- .sof_tplg_filename = "sof-adl-rt1316-l1-mono-rt714-l0.tplg",
+- },
+ {
+ .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
+ .links = adl_sdw_rt1316_link12_rt714_link0,
+--
+2.41.0
+
diff --git a/0005-btrfs-wait-on-uncached-block-groups-on-every-allocat.patch b/0005-btrfs-wait-on-uncached-block-groups-on-every-allocat.patch
new file mode 100644
index 000000000000..493337c64d42
--- /dev/null
+++ b/0005-btrfs-wait-on-uncached-block-groups-on-every-allocat.patch
@@ -0,0 +1,255 @@
+From 8da079307d115705e243d226591dcb4388cef7e2 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Mon, 31 Jul 2023 16:28:43 -0400
+Subject: [PATCH 5/8] btrfs: wait on uncached block groups on every allocation
+ loop
+
+My initial fix for the generic/475 hangs was related to metadata, but
+our CI testing uncovered another case where we hang for similar reasons.
+We again have a task with a plug that is holding an outstanding request
+that is keeping the dm device from finishing it's suspend, and that task
+is stuck in the allocator.
+
+This time it is stuck trying to allocate data, but we do not have a
+block group that matches the size class. The larger loop in the
+allocator looks like this (simplified of course)
+
+ find_free_extent
+ for_each_block_group {
+ ffe_ctl->cached == btrfs_block_group_cache_done(bg)
+ if (!ffe_ctl->cached)
+ ffe_ctl->have_caching_bg = true;
+ do_allocation()
+ btrfs_wait_block_group_cache_progress();
+ }
+
+ if (loop == LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
+ go search again;
+
+In my earlier fix we were trying to allocate from the block group, but
+we weren't waiting for the progress because we were only waiting for the
+free space to be >= the amount of free space we wanted. My fix made it
+so we waited for forward progress to be made as well, so we would be
+sure to wait.
+
+This time however we did not have a block group that matched our size
+class, so what was happening was this
+
+ find_free_extent
+ for_each_block_group {
+ ffe_ctl->cached == btrfs_block_group_cache_done(bg)
+ if (!ffe_ctl->cached)
+ ffe_ctl->have_caching_bg = true;
+ if (size_class_doesn't_match())
+ goto loop;
+ do_allocation()
+ btrfs_wait_block_group_cache_progress();
+ loop:
+ release_block_group(block_group);
+ }
+
+ if (loop == LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
+ go search again;
+
+The size_class_doesn't_match() part was true, so we'd just skip this
+block group and never wait for caching, and then because we found a
+caching block group we'd just go back and do the loop again. We never
+sleep and thus never flush the plug and we have the same deadlock.
+
+Fix the logic for waiting on the block group caching to instead do it
+unconditionally when we goto loop. This takes the logic out of the
+allocation step, so now the loop looks more like this
+
+ find_free_extent
+ for_each_block_group {
+ ffe_ctl->cached == btrfs_block_group_cache_done(bg)
+ if (!ffe_ctl->cached)
+ ffe_ctl->have_caching_bg = true;
+ if (size_class_doesn't_match())
+ goto loop;
+ do_allocation()
+ btrfs_wait_block_group_cache_progress();
+ loop:
+ if (loop > LOOP_CACHING_NOWAIT && !ffe_ctl->retry_uncached &&
+ !ffe_ctl->cached) {
+ ffe_ctl->retry_uncached = true;
+ btrfs_wait_block_group_cache_progress();
+ }
+
+ release_block_group(block_group);
+ }
+
+ if (loop == LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
+ go search again;
+
+This simplifies the logic a lot, and makes sure that if we're hitting
+uncached block groups we're always waiting on them at some point.
+
+I ran this through 100 iterations of generic/475, as this particular
+case was harder to hit than the previous one.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/extent-tree.c | 61 +++++++++++++-----------------------------
+ fs/btrfs/extent-tree.h | 13 +++------
+ 2 files changed, 22 insertions(+), 52 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index f396a9afa4032..6096bd98e6c70 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3427,7 +3427,6 @@ btrfs_release_block_group(struct btrfs_block_group *cache,
+ * Helper function for find_free_extent().
+ *
+ * Return -ENOENT to inform caller that we need fallback to unclustered mode.
+- * Return -EAGAIN to inform caller that we need to re-search this block group
+ * Return >0 to inform caller that we find nothing
+ * Return 0 means we have found a location and set ffe_ctl->found_offset.
+ */
+@@ -3508,14 +3507,6 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
+ trace_btrfs_reserve_extent_cluster(bg, ffe_ctl);
+ return 0;
+ }
+- } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
+- !ffe_ctl->retry_clustered) {
+- spin_unlock(&last_ptr->refill_lock);
+-
+- ffe_ctl->retry_clustered = true;
+- btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+- ffe_ctl->empty_cluster + ffe_ctl->empty_size);
+- return -EAGAIN;
+ }
+ /*
+ * At this point we either didn't find a cluster or we weren't able to
+@@ -3530,7 +3521,6 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
+ /*
+ * Return >0 to inform caller that we find nothing
+ * Return 0 when we found an free extent and set ffe_ctrl->found_offset
+- * Return -EAGAIN to inform caller that we need to re-search this block group
+ */
+ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
+ struct find_free_extent_ctl *ffe_ctl)
+@@ -3568,25 +3558,8 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
+ offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
+ ffe_ctl->num_bytes, ffe_ctl->empty_size,
+ &ffe_ctl->max_extent_size);
+-
+- /*
+- * If we didn't find a chunk, and we haven't failed on this block group
+- * before, and this block group is in the middle of caching and we are
+- * ok with waiting, then go ahead and wait for progress to be made, and
+- * set @retry_unclustered to true.
+- *
+- * If @retry_unclustered is true then we've already waited on this
+- * block group once and should move on to the next block group.
+- */
+- if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
+- ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
+- btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+- ffe_ctl->empty_size);
+- ffe_ctl->retry_unclustered = true;
+- return -EAGAIN;
+- } else if (!offset) {
++ if (!offset)
+ return 1;
+- }
+ ffe_ctl->found_offset = offset;
+ return 0;
+ }
+@@ -3600,7 +3573,7 @@ static int do_allocation_clustered(struct btrfs_block_group *block_group,
+ /* We want to try and use the cluster allocator, so lets look there */
+ if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) {
+ ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret);
+- if (ret >= 0 || ret == -EAGAIN)
++ if (ret >= 0)
+ return ret;
+ /* ret == -ENOENT case falls through */
+ }
+@@ -3816,8 +3789,7 @@ static void release_block_group(struct btrfs_block_group *block_group,
+ {
+ switch (ffe_ctl->policy) {
+ case BTRFS_EXTENT_ALLOC_CLUSTERED:
+- ffe_ctl->retry_clustered = false;
+- ffe_ctl->retry_unclustered = false;
++ ffe_ctl->retry_uncached = false;
+ break;
+ case BTRFS_EXTENT_ALLOC_ZONED:
+ /* Nothing to do */
+@@ -4168,9 +4140,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
+ ffe_ctl->orig_have_caching_bg = false;
+ ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags);
+ ffe_ctl->loop = 0;
+- /* For clustered allocation */
+- ffe_ctl->retry_clustered = false;
+- ffe_ctl->retry_unclustered = false;
++ ffe_ctl->retry_uncached = false;
+ ffe_ctl->cached = 0;
+ ffe_ctl->max_extent_size = 0;
+ ffe_ctl->total_free_space = 0;
+@@ -4321,16 +4291,12 @@ static noinline int find_free_extent(struct btrfs_root *root,
+
+ bg_ret = NULL;
+ ret = do_allocation(block_group, ffe_ctl, &bg_ret);
+- if (ret == 0) {
+- if (bg_ret && bg_ret != block_group) {
+- btrfs_release_block_group(block_group,
+- ffe_ctl->delalloc);
+- block_group = bg_ret;
+- }
+- } else if (ret == -EAGAIN) {
+- goto have_block_group;
+- } else if (ret > 0) {
++ if (ret > 0)
+ goto loop;
++
++ if (bg_ret && bg_ret != block_group) {
++ btrfs_release_block_group(block_group, ffe_ctl->delalloc);
++ block_group = bg_ret;
+ }
+
+ /* Checks */
+@@ -4371,6 +4337,15 @@ static noinline int find_free_extent(struct btrfs_root *root,
+ btrfs_release_block_group(block_group, ffe_ctl->delalloc);
+ break;
+ loop:
++ if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
++ !ffe_ctl->retry_uncached) {
++ ffe_ctl->retry_uncached = true;
++ btrfs_wait_block_group_cache_progress(block_group,
++ ffe_ctl->num_bytes +
++ ffe_ctl->empty_cluster +
++ ffe_ctl->empty_size);
++ goto have_block_group;
++ }
+ release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc);
+ cond_resched();
+ }
+diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h
+index 429d5c5700618..6bfba2f22fdd4 100644
+--- a/fs/btrfs/extent-tree.h
++++ b/fs/btrfs/extent-tree.h
+@@ -48,16 +48,11 @@ struct find_free_extent_ctl {
+ int loop;
+
+ /*
+- * Whether we're refilling a cluster, if true we need to re-search
+- * current block group but don't try to refill the cluster again.
++ * Set to true if we're retrying the allocation on this block group
++ * after waiting for caching progress, this is so that we retry only
++ * once before moving on to another block group.
+ */
+- bool retry_clustered;
+-
+- /*
+- * Whether we're updating free space cache, if true we need to re-search
+- * current block group but don't try updating free space cache again.
+- */
+- bool retry_unclustered;
++ bool retry_uncached;
+
+ /* If current block group is cached */
+ int cached;
+--
+2.41.0
+
diff --git a/0006-btrfs-set-last-dir-index-to-the-current-last-index-w.patch b/0006-btrfs-set-last-dir-index-to-the-current-last-index-w.patch
new file mode 100644
index 000000000000..c0cbc0f8f5a8
--- /dev/null
+++ b/0006-btrfs-set-last-dir-index-to-the-current-last-index-w.patch
@@ -0,0 +1,86 @@
+From c8026dde1f99ec4f682765aaac7993a964184a15 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Sat, 9 Sep 2023 11:34:40 +0100
+Subject: [PATCH 6/8] btrfs: set last dir index to the current last index when
+ opening dir
+
+When opening a directory for reading it, we set the last index where we
+stop iteration to the value in struct btrfs_inode::index_cnt. That value
+does not match the index of the most recently added directory entry but
+it's instead the index number that will be assigned the next directory
+entry.
+
+This means that if after the call to opendir(3) new directory entries are
+added, a readdir(3) call will return the first new directory entry. This
+is fine because POSIX says the following [1]:
+
+ "If a file is removed from or added to the directory after the most
+ recent call to opendir() or rewinddir(), whether a subsequent call to
+ readdir() returns an entry for that file is unspecified."
+
+For example for the test script from commit 9b378f6ad48c ("btrfs: fix
+infinite directory reads"), where we have 2000 files in a directory, ext4
+doesn't return any new directory entry after opendir(3), while xfs returns
+the first 13 new directory entries added after the opendir(3) call.
+
+If we move to a shorter example with an empty directory when opendir(3) is
+called, and 2 files added to the directory after the opendir(3) call, then
+readdir(3) on btrfs will return the first file, ext4 and xfs return the 2
+files (but in a different order). A test program for this, reported by
+Ian Johnson, is the following:
+
+ #include <dirent.h>
+ #include <stdio.h>
+
+ int main(void) {
+ DIR *dir = opendir("test");
+
+ FILE *file;
+ file = fopen("test/1", "w");
+ fwrite("1", 1, 1, file);
+ fclose(file);
+
+ file = fopen("test/2", "w");
+ fwrite("2", 1, 1, file);
+ fclose(file);
+
+ struct dirent *entry;
+ while ((entry = readdir(dir))) {
+ printf("%s\n", entry->d_name);
+ }
+ closedir(dir);
+ return 0;
+ }
+
+To make this less odd, change the behaviour to never return new entries
+that were added after the opendir(3) call. This is done by setting the
+last_index field of the struct btrfs_file_private attached to the
+directory's file handle with a value matching btrfs_inode::index_cnt
+minus 1, since that value always matches the index of the next new
+directory entry and not the index of the most recently added entry.
+
+[1] https://pubs.opengroup.org/onlinepubs/007904875/functions/readdir_r.html
+
+Link: https://lore.kernel.org/linux-btrfs/YR1P0S.NGASEG570GJ8@ianjohnson.dev/
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+---
+ fs/btrfs/inode.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index aa090b0b5d298..e3a52438f4f1f 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5935,7 +5935,8 @@ static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
+ }
+ }
+
+- *index = dir->index_cnt;
++ /* index_cnt is the index number of next new entry, so decrement it. */
++ *index = dir->index_cnt - 1;
+
+ return 0;
+ }
+--
+2.41.0
+
diff --git a/0007-btrfs-refresh-dir-last-index-during-a-rewinddir-3-ca.patch b/0007-btrfs-refresh-dir-last-index-during-a-rewinddir-3-ca.patch
new file mode 100644
index 000000000000..6218aa7ce847
--- /dev/null
+++ b/0007-btrfs-refresh-dir-last-index-during-a-rewinddir-3-ca.patch
@@ -0,0 +1,102 @@
+From f2e0f18c8bd9c6d61575a2311d488e21831e1163 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Sat, 9 Sep 2023 12:12:14 +0100
+Subject: [PATCH 7/8] btrfs: refresh dir last index during a rewinddir(3) call
+
+When opening a directory we find what's the index of its last entry and
+then store it in the directory's file handle private data (struct
+btrfs_file_private::last_index), so that in the case new directory entries
+are added to a directory after an opendir(3) call we don't end up in an
+infinite loop (see commit 9b378f6ad48c ("btrfs: fix infinite directory
+reads")) when calling readdir(3).
+
+However once rewinddir(3) is called, POSIX states [1] that any new
+directory entries added after the previous opendir(3) call, must be
+returned by subsequent calls to readdir(3):
+
+ "The rewinddir() function shall reset the position of the directory
+ stream to which dirp refers to the beginning of the directory.
+ It shall also cause the directory stream to refer to the current
+ state of the corresponding directory, as a call to opendir() would
+ have done."
+
+We currently don't refresh the last_index field of the struct
+btrfs_file_private associated to the directory, so after a rewinddir(3)
+we are not returning any new entries added after the opendir(3) call.
+
+Fix this by finding the current last index of the directory when llseek
+is called agains the directory.
+
+This can be reproduced by the following C program provided by Ian Johnson:
+
+ #include <dirent.h>
+ #include <stdio.h>
+
+ int main(void) {
+ DIR *dir = opendir("test");
+
+ FILE *file;
+ file = fopen("test/1", "w");
+ fwrite("1", 1, 1, file);
+ fclose(file);
+
+ file = fopen("test/2", "w");
+ fwrite("2", 1, 1, file);
+ fclose(file);
+
+ rewinddir(dir);
+
+ struct dirent *entry;
+ while ((entry = readdir(dir))) {
+ printf("%s\n", entry->d_name);
+ }
+ closedir(dir);
+ return 0;
+ }
+
+[1] https://pubs.opengroup.org/onlinepubs/9699919799/functions/rewinddir.html
+
+Reported-by: Ian Johnson <ian@ianjohnson.dev>
+Link: https://lore.kernel.org/linux-btrfs/YR1P0S.NGASEG570GJ8@ianjohnson.dev/
+Fixes: 9b378f6ad48c ("btrfs: fix infinite directory reads")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+---
+ fs/btrfs/inode.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index e3a52438f4f1f..a50cbcafca03c 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5973,6 +5973,19 @@ static int btrfs_opendir(struct inode *inode, struct file *file)
+ return 0;
+ }
+
++static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++ struct btrfs_file_private *private = file->private_data;
++ int ret;
++
++ ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
++ &private->last_index);
++ if (ret)
++ return ret;
++
++ return generic_file_llseek(file, offset, whence);
++}
++
+ struct dir_entry {
+ u64 ino;
+ u64 offset;
+@@ -11053,7 +11066,7 @@ static const struct inode_operations btrfs_dir_inode_operations = {
+ };
+
+ static const struct file_operations btrfs_dir_file_operations = {
+- .llseek = generic_file_llseek,
++ .llseek = btrfs_dir_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = btrfs_real_readdir,
+ .open = btrfs_opendir,
+--
+2.41.0
+
diff --git a/0008-btrfs-fix-race-between-reading-a-directory-and-addin.patch b/0008-btrfs-fix-race-between-reading-a-directory-and-addin.patch
new file mode 100644
index 000000000000..7da77c9a20cd
--- /dev/null
+++ b/0008-btrfs-fix-race-between-reading-a-directory-and-addin.patch
@@ -0,0 +1,143 @@
+From 5c640491ba9cea8c0a01144db26b2c8892506717 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Tue, 12 Sep 2023 11:45:39 +0100
+Subject: [PATCH 8/8] btrfs: fix race between reading a directory and adding
+ entries to it
+
+When opening a directory (opendir(3)) or rewinding it (rewinddir(3)), we
+are not holding the directory's inode locked, and this can result in later
+attempting to add two entries to the directory with the same index number,
+resulting in a transaction abort, with -EEXIST (-17), when inserting the
+second delayed dir index. This results in a trace like the following:
+
+ Sep 11 22:34:59 myhostname kernel: BTRFS error (device dm-3): err add delayed dir index item(name: cockroach-stderr.log) into the insertion tree of the delayed node(root id: 5, inode id: 4539217, errno: -17)
+ Sep 11 22:34:59 myhostname kernel: ------------[ cut here ]------------
+ Sep 11 22:34:59 myhostname kernel: kernel BUG at fs/btrfs/delayed-inode.c:1504!
+ Sep 11 22:34:59 myhostname kernel: invalid opcode: 0000 [#1] PREEMPT SMP NOPTI
+ Sep 11 22:34:59 myhostname kernel: CPU: 0 PID: 7159 Comm: cockroach Not tainted 6.4.15-200.fc38.x86_64 #1
+ Sep 11 22:34:59 myhostname kernel: Hardware name: ASUS ESC500 G3/P9D WS, BIOS 2402 06/27/2018
+ Sep 11 22:34:59 myhostname kernel: RIP: 0010:btrfs_insert_delayed_dir_index+0x1da/0x260
+ Sep 11 22:34:59 myhostname kernel: Code: eb dd 48 (...)
+ Sep 11 22:34:59 myhostname kernel: RSP: 0000:ffffa9980e0fbb28 EFLAGS: 00010282
+ Sep 11 22:34:59 myhostname kernel: RAX: 0000000000000000 RBX: ffff8b10b8f4a3c0 RCX: 0000000000000000
+ Sep 11 22:34:59 myhostname kernel: RDX: 0000000000000000 RSI: ffff8b177ec21540 RDI: ffff8b177ec21540
+ Sep 11 22:34:59 myhostname kernel: RBP: ffff8b110cf80888 R08: 0000000000000000 R09: ffffa9980e0fb938
+ Sep 11 22:34:59 myhostname kernel: R10: 0000000000000003 R11: ffffffff86146508 R12: 0000000000000014
+ Sep 11 22:34:59 myhostname kernel: R13: ffff8b1131ae5b40 R14: ffff8b10b8f4a418 R15: 00000000ffffffef
+ Sep 11 22:34:59 myhostname kernel: FS: 00007fb14a7fe6c0(0000) GS:ffff8b177ec00000(0000) knlGS:0000000000000000
+ Sep 11 22:34:59 myhostname kernel: CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ Sep 11 22:34:59 myhostname kernel: CR2: 000000c00143d000 CR3: 00000001b3b4e002 CR4: 00000000001706f0
+ Sep 11 22:34:59 myhostname kernel: Call Trace:
+ Sep 11 22:34:59 myhostname kernel: <TASK>
+ Sep 11 22:34:59 myhostname kernel: ? die+0x36/0x90
+ Sep 11 22:34:59 myhostname kernel: ? do_trap+0xda/0x100
+ Sep 11 22:34:59 myhostname kernel: ? btrfs_insert_delayed_dir_index+0x1da/0x260
+ Sep 11 22:34:59 myhostname kernel: ? do_error_trap+0x6a/0x90
+ Sep 11 22:34:59 myhostname kernel: ? btrfs_insert_delayed_dir_index+0x1da/0x260
+ Sep 11 22:34:59 myhostname kernel: ? exc_invalid_op+0x50/0x70
+ Sep 11 22:34:59 myhostname kernel: ? btrfs_insert_delayed_dir_index+0x1da/0x260
+ Sep 11 22:34:59 myhostname kernel: ? asm_exc_invalid_op+0x1a/0x20
+ Sep 11 22:34:59 myhostname kernel: ? btrfs_insert_delayed_dir_index+0x1da/0x260
+ Sep 11 22:34:59 myhostname kernel: ? btrfs_insert_delayed_dir_index+0x1da/0x260
+ Sep 11 22:34:59 myhostname kernel: btrfs_insert_dir_item+0x200/0x280
+ Sep 11 22:34:59 myhostname kernel: btrfs_add_link+0xab/0x4f0
+ Sep 11 22:34:59 myhostname kernel: ? ktime_get_real_ts64+0x47/0xe0
+ Sep 11 22:34:59 myhostname kernel: btrfs_create_new_inode+0x7cd/0xa80
+ Sep 11 22:34:59 myhostname kernel: btrfs_symlink+0x190/0x4d0
+ Sep 11 22:34:59 myhostname kernel: ? schedule+0x5e/0xd0
+ Sep 11 22:34:59 myhostname kernel: ? __d_lookup+0x7e/0xc0
+ Sep 11 22:34:59 myhostname kernel: vfs_symlink+0x148/0x1e0
+ Sep 11 22:34:59 myhostname kernel: do_symlinkat+0x130/0x140
+ Sep 11 22:34:59 myhostname kernel: __x64_sys_symlinkat+0x3d/0x50
+ Sep 11 22:34:59 myhostname kernel: do_syscall_64+0x5d/0x90
+ Sep 11 22:34:59 myhostname kernel: ? syscall_exit_to_user_mode+0x2b/0x40
+ Sep 11 22:34:59 myhostname kernel: ? do_syscall_64+0x6c/0x90
+ Sep 11 22:34:59 myhostname kernel: entry_SYSCALL_64_after_hwframe+0x72/0xdc
+
+The race leading to the problem happens like this:
+
+1) Directory inode X is loaded into memory, its ->index_cnt field is
+ initialized to (u64)-1 (at btrfs_alloc_inode());
+
+2) Task A is adding a new file to directory X, holding its vfs inode lock,
+ and calls btrfs_set_inode_index() to get an index number for the entry.
+
+ Because the inode's index_cnt field is set to (u64)-1 it calls
+ btrfs_inode_delayed_dir_index_count() which fails because no dir index
+ entries were added yet to the delayed inode and then it calls
+ btrfs_set_inode_index_count(). This functions finds the last dir index
+ key and then sets index_cnt to that index value + 1. It found that the
+ last index key has an offset of 100. However before it assigns a value
+ of 101 to index_cnt...
+
+3) Task B calls opendir(3), ending up at btrfs_opendir(), where the vfs
+ lock for inode X is not taken, so it calls btrfs_get_dir_last_index()
+ and sees index_cnt still with a value of (u64)-1. Because of that it
+ calls btrfs_inode_delayed_dir_index_count() which fails since no dir
+ index entries were added to the delayed inode yet, and then it also
+ calls btrfs_set_inode_index_count(). This also finds that the last
+ index key has an offset of 100, and before it assigns the value 101
+ to the index_cnt field of inode X...
+
+4) Task A assigns a value of 101 to index_cnt. And then the code flow
+ goes to btrfs_set_inode_index() where it increments index_cnt from
+ 101 to 102. Task A then creates a delayed dir index entry with a
+ sequence number of 101 and adds it to the delayed inode;
+
+5) Task B assigns 101 to the index_cnt field of inode X;
+
+6) At some later point when someone tries to add a new entry to the
+ directory, btrfs_set_inode_index() will return 101 again and shortly
+ after an attempt to add another delayed dir index key with index
+ number 101 will fail with -EEXIST resulting in a transaction abort.
+
+Fix this by locking the inode at btrfs_get_dir_last_index(), which is only
+only used when opening a directory or attempting to lseek on it.
+
+Reported-by: ken <ken@bllue.org>
+Link: https://lore.kernel.org/linux-btrfs/CAE6xmH+Lp=Q=E61bU+v9eWX8gYfLvu6jLYxjxjFpo3zHVPR0EQ@mail.gmail.com/
+Reported-by: syzbot+d13490c82ad5353c779d@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/linux-btrfs/00000000000036e1290603e097e0@google.com/
+Fixes: 9b378f6ad48c ("btrfs: fix infinite directory reads")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Cherry-picked-for: https://bugs.archlinux.org/task/79673
+---
+ fs/btrfs/inode.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a50cbcafca03c..0d4196cede7d2 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5924,21 +5924,24 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
+
+ static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
+ {
+- if (dir->index_cnt == (u64)-1) {
+- int ret;
++ int ret = 0;
+
++ btrfs_inode_lock(dir, 0);
++ if (dir->index_cnt == (u64)-1) {
+ ret = btrfs_inode_delayed_dir_index_count(dir);
+ if (ret) {
+ ret = btrfs_set_inode_index_count(dir);
+ if (ret)
+- return ret;
++ goto out;
+ }
+ }
+
+ /* index_cnt is the index number of next new entry, so decrement it. */
+ *index = dir->index_cnt - 1;
++out:
++ btrfs_inode_unlock(dir, 0);
+
+- return 0;
++ return ret;
+ }
+
+ /*
+--
+2.41.0
+
diff --git a/0102-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch b/0102-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch
new file mode 100644
index 000000000000..1f518e6bb744
--- /dev/null
+++ b/0102-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch
@@ -0,0 +1,89 @@
+From 5e9d8dd3971972048485610161c2556a7608849e Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Thu, 19 May 2022 14:40:07 +0200
+Subject: [PATCH 2/8] drivers/firmware: skip simpledrm if nvidia-drm.modeset=1
+ is set
+
+The Nvidia proprietary driver has some bugs that leads to issues if used
+with the simpledrm driver. The most noticeable is that does not register
+an emulated fbdev device.
+
+It just relies on a fbdev to be registered by another driver, that could
+be that could be attached to the framebuffer console. On UEFI machines,
+this is the efifb driver.
+
+This means that disabling the efifb driver will cause virtual consoles to
+not be present in the system when using the Nvidia driver. Legacy BIOS is
+not affected just because fbcon is not used there, but instead vgacon.
+
+Unless a VGA mode is specified using the vga= kernel command line option,
+in that case the vesafb driver is used instead and its fbdev attached to
+the fbcon.
+
+This is a problem because with CONFIG_SYSFB_SIMPLEFB=y, the sysfb platform
+code attempts to register a "simple-framebuffer" platform device (that is
+matched against simpledrm) and only registers either an "efi-framebuffer"
+or "vesa-framebuffer" if this fails to be registered due the video modes
+not being compatible.
+
+The Nvidia driver relying on another driver to register the fbdev is quite
+fragile, since it can't really assume those will stick around. For example
+there are patches posted to remove the EFI and VESA platform devices once
+a real DRM or fbdev driver probes.
+
+But in any case, moving to a simpledrm + emulated fbdev only breaks this
+assumption and causes users to not have VT if the Nvidia driver is used.
+
+So to prevent this, let's add a workaround and make the sysfb to skip the
+"simple-framebuffer" registration when nvidia-drm.modeset=1 option is set.
+
+This is quite horrible, but honestly I can't think of any other approach.
+
+For this to work, the CONFIG_FB_EFI and CONFIG_FB_VESA config options must
+be enabled besides CONFIG_DRM_SIMPLEDRM.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Cherry-picked-for: https://bugs.archlinux.org/task/73720
+---
+ drivers/firmware/sysfb.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
+index 82fcfd29bc4d2..17b7e096b6828 100644
+--- a/drivers/firmware/sysfb.c
++++ b/drivers/firmware/sysfb.c
+@@ -34,6 +34,22 @@
+ #include <linux/screen_info.h>
+ #include <linux/sysfb.h>
+
++static int skip_simpledrm;
++
++static int __init simpledrm_disable(char *opt)
++{
++ if (!opt)
++ return -EINVAL;
++
++ get_option(&opt, &skip_simpledrm);
++
++ if (skip_simpledrm)
++ pr_info("The simpledrm driver will not be probed\n");
++
++ return 0;
++}
++early_param("nvidia-drm.modeset", simpledrm_disable);
++
+ static struct platform_device *pd;
+ static DEFINE_MUTEX(disable_lock);
+ static bool disabled;
+@@ -85,7 +101,7 @@ static __init int sysfb_init(void)
+
+ /* try to create a simple-framebuffer device */
+ compatible = sysfb_parse_mode(si, &mode);
+- if (compatible) {
++ if (compatible && !skip_simpledrm) {
+ pd = sysfb_create_simplefb(si, &mode);
+ if (!IS_ERR(pd))
+ goto unlock_mutex;
+--
+2.41.0
+
diff --git a/0103-ASoC-Intel-soc-acpi-fix-Dell-SKU-0B34.patch b/0103-ASoC-Intel-soc-acpi-fix-Dell-SKU-0B34.patch
new file mode 100644
index 000000000000..1a6234faa0ef
--- /dev/null
+++ b/0103-ASoC-Intel-soc-acpi-fix-Dell-SKU-0B34.patch
@@ -0,0 +1,51 @@
+From eb39b2a95930f53bd2cb4fbda0e1372609dff976 Mon Sep 17 00:00:00 2001
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Date: Tue, 5 Sep 2023 11:16:16 -0400
+Subject: [PATCH 4/8] ASoC: Intel: soc-acpi: fix Dell SKU 0B34
+
+The rule for the SoundWire tables is that the platforms with more
+devices need to be added first. We broke that rule with the Dell SKU
+0B34, and caused the second amplifier for SKU 0AF3 to be ignored.
+
+The fix is simple, we need to move the single-amplifier entry after
+the two-amplifier one.
+
+Fixes: b62a1a839b48 ("ASoC: Intel: soc-acpi: add tables for Dell SKU 0B34")
+Closes: https://github.com/thesofproject/linux/issues/4559
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+---
+ sound/soc/intel/common/soc-acpi-intel-adl-match.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index bcd66e0094b4b..c4b57cca6b228 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -648,18 +648,18 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l3.tplg",
+ },
+- {
+- .link_mask = 0x3, /* rt1316 on link1 & rt714 on link0 */
+- .links = adl_sdw_rt1316_link1_rt714_link0,
+- .drv_name = "sof_sdw",
+- .sof_tplg_filename = "sof-adl-rt1316-l1-mono-rt714-l0.tplg",
+- },
+ {
+ .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
+ .links = adl_sdw_rt1316_link12_rt714_link0,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-adl-rt1316-l12-rt714-l0.tplg",
+ },
++ {
++ .link_mask = 0x3, /* rt1316 on link1 & rt714 on link0 */
++ .links = adl_sdw_rt1316_link1_rt714_link0,
++ .drv_name = "sof_sdw",
++ .sof_tplg_filename = "sof-adl-rt1316-l1-mono-rt714-l0.tplg",
++ },
+ {
+ .link_mask = 0x5, /* 2 active links required */
+ .links = adl_sdw_rt1316_link2_rt714_link0,
+--
+2.41.0
+
diff --git a/0104-btrfs-wait-on-uncached-block-groups-on-every-allocat.patch b/0104-btrfs-wait-on-uncached-block-groups-on-every-allocat.patch
new file mode 100644
index 000000000000..493337c64d42
--- /dev/null
+++ b/0104-btrfs-wait-on-uncached-block-groups-on-every-allocat.patch
@@ -0,0 +1,255 @@
+From 8da079307d115705e243d226591dcb4388cef7e2 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Mon, 31 Jul 2023 16:28:43 -0400
+Subject: [PATCH 5/8] btrfs: wait on uncached block groups on every allocation
+ loop
+
+My initial fix for the generic/475 hangs was related to metadata, but
+our CI testing uncovered another case where we hang for similar reasons.
+We again have a task with a plug that is holding an outstanding request
+that is keeping the dm device from finishing it's suspend, and that task
+is stuck in the allocator.
+
+This time it is stuck trying to allocate data, but we do not have a
+block group that matches the size class. The larger loop in the
+allocator looks like this (simplified of course)
+
+ find_free_extent
+ for_each_block_group {
+ ffe_ctl->cached == btrfs_block_group_cache_done(bg)
+ if (!ffe_ctl->cached)
+ ffe_ctl->have_caching_bg = true;
+ do_allocation()
+ btrfs_wait_block_group_cache_progress();
+ }
+
+ if (loop == LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
+ go search again;
+
+In my earlier fix we were trying to allocate from the block group, but
+we weren't waiting for the progress because we were only waiting for the
+free space to be >= the amount of free space we wanted. My fix made it
+so we waited for forward progress to be made as well, so we would be
+sure to wait.
+
+This time however we did not have a block group that matched our size
+class, so what was happening was this
+
+ find_free_extent
+ for_each_block_group {
+ ffe_ctl->cached == btrfs_block_group_cache_done(bg)
+ if (!ffe_ctl->cached)
+ ffe_ctl->have_caching_bg = true;
+ if (size_class_doesn't_match())
+ goto loop;
+ do_allocation()
+ btrfs_wait_block_group_cache_progress();
+ loop:
+ release_block_group(block_group);
+ }
+
+ if (loop == LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
+ go search again;
+
+The size_class_doesn't_match() part was true, so we'd just skip this
+block group and never wait for caching, and then because we found a
+caching block group we'd just go back and do the loop again. We never
+sleep and thus never flush the plug and we have the same deadlock.
+
+Fix the logic for waiting on the block group caching to instead do it
+unconditionally when we goto loop. This takes the logic out of the
+allocation step, so now the loop looks more like this
+
+ find_free_extent
+ for_each_block_group {
+ ffe_ctl->cached == btrfs_block_group_cache_done(bg)
+ if (!ffe_ctl->cached)
+ ffe_ctl->have_caching_bg = true;
+ if (size_class_doesn't_match())
+ goto loop;
+ do_allocation()
+ btrfs_wait_block_group_cache_progress();
+ loop:
+ if (loop > LOOP_CACHING_NOWAIT && !ffe_ctl->retry_uncached &&
+ !ffe_ctl->cached) {
+ ffe_ctl->retry_uncached = true;
+ btrfs_wait_block_group_cache_progress();
+ }
+
+ release_block_group(block_group);
+ }
+
+ if (loop == LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
+ go search again;
+
+This simplifies the logic a lot, and makes sure that if we're hitting
+uncached block groups we're always waiting on them at some point.
+
+I ran this through 100 iterations of generic/475, as this particular
+case was harder to hit than the previous one.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/extent-tree.c | 61 +++++++++++++-----------------------------
+ fs/btrfs/extent-tree.h | 13 +++------
+ 2 files changed, 22 insertions(+), 52 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index f396a9afa4032..6096bd98e6c70 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3427,7 +3427,6 @@ btrfs_release_block_group(struct btrfs_block_group *cache,
+ * Helper function for find_free_extent().
+ *
+ * Return -ENOENT to inform caller that we need fallback to unclustered mode.
+- * Return -EAGAIN to inform caller that we need to re-search this block group
+ * Return >0 to inform caller that we find nothing
+ * Return 0 means we have found a location and set ffe_ctl->found_offset.
+ */
+@@ -3508,14 +3507,6 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
+ trace_btrfs_reserve_extent_cluster(bg, ffe_ctl);
+ return 0;
+ }
+- } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
+- !ffe_ctl->retry_clustered) {
+- spin_unlock(&last_ptr->refill_lock);
+-
+- ffe_ctl->retry_clustered = true;
+- btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+- ffe_ctl->empty_cluster + ffe_ctl->empty_size);
+- return -EAGAIN;
+ }
+ /*
+ * At this point we either didn't find a cluster or we weren't able to
+@@ -3530,7 +3521,6 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
+ /*
+ * Return >0 to inform caller that we find nothing
+ * Return 0 when we found an free extent and set ffe_ctrl->found_offset
+- * Return -EAGAIN to inform caller that we need to re-search this block group
+ */
+ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
+ struct find_free_extent_ctl *ffe_ctl)
+@@ -3568,25 +3558,8 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
+ offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
+ ffe_ctl->num_bytes, ffe_ctl->empty_size,
+ &ffe_ctl->max_extent_size);
+-
+- /*
+- * If we didn't find a chunk, and we haven't failed on this block group
+- * before, and this block group is in the middle of caching and we are
+- * ok with waiting, then go ahead and wait for progress to be made, and
+- * set @retry_unclustered to true.
+- *
+- * If @retry_unclustered is true then we've already waited on this
+- * block group once and should move on to the next block group.
+- */
+- if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
+- ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
+- btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+- ffe_ctl->empty_size);
+- ffe_ctl->retry_unclustered = true;
+- return -EAGAIN;
+- } else if (!offset) {
++ if (!offset)
+ return 1;
+- }
+ ffe_ctl->found_offset = offset;
+ return 0;
+ }
+@@ -3600,7 +3573,7 @@ static int do_allocation_clustered(struct btrfs_block_group *block_group,
+ /* We want to try and use the cluster allocator, so lets look there */
+ if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) {
+ ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret);
+- if (ret >= 0 || ret == -EAGAIN)
++ if (ret >= 0)
+ return ret;
+ /* ret == -ENOENT case falls through */
+ }
+@@ -3816,8 +3789,7 @@ static void release_block_group(struct btrfs_block_group *block_group,
+ {
+ switch (ffe_ctl->policy) {
+ case BTRFS_EXTENT_ALLOC_CLUSTERED:
+- ffe_ctl->retry_clustered = false;
+- ffe_ctl->retry_unclustered = false;
++ ffe_ctl->retry_uncached = false;
+ break;
+ case BTRFS_EXTENT_ALLOC_ZONED:
+ /* Nothing to do */
+@@ -4168,9 +4140,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
+ ffe_ctl->orig_have_caching_bg = false;
+ ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags);
+ ffe_ctl->loop = 0;
+- /* For clustered allocation */
+- ffe_ctl->retry_clustered = false;
+- ffe_ctl->retry_unclustered = false;
++ ffe_ctl->retry_uncached = false;
+ ffe_ctl->cached = 0;
+ ffe_ctl->max_extent_size = 0;
+ ffe_ctl->total_free_space = 0;
+@@ -4321,16 +4291,12 @@ static noinline int find_free_extent(struct btrfs_root *root,
+
+ bg_ret = NULL;
+ ret = do_allocation(block_group, ffe_ctl, &bg_ret);
+- if (ret == 0) {
+- if (bg_ret && bg_ret != block_group) {
+- btrfs_release_block_group(block_group,
+- ffe_ctl->delalloc);
+- block_group = bg_ret;
+- }
+- } else if (ret == -EAGAIN) {
+- goto have_block_group;
+- } else if (ret > 0) {
++ if (ret > 0)
+ goto loop;
++
++ if (bg_ret && bg_ret != block_group) {
++ btrfs_release_block_group(block_group, ffe_ctl->delalloc);
++ block_group = bg_ret;
+ }
+
+ /* Checks */
+@@ -4371,6 +4337,15 @@ static noinline int find_free_extent(struct btrfs_root *root,
+ btrfs_release_block_group(block_group, ffe_ctl->delalloc);
+ break;
+ loop:
++ if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
++ !ffe_ctl->retry_uncached) {
++ ffe_ctl->retry_uncached = true;
++ btrfs_wait_block_group_cache_progress(block_group,
++ ffe_ctl->num_bytes +
++ ffe_ctl->empty_cluster +
++ ffe_ctl->empty_size);
++ goto have_block_group;
++ }
+ release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc);
+ cond_resched();
+ }
+diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h
+index 429d5c5700618..6bfba2f22fdd4 100644
+--- a/fs/btrfs/extent-tree.h
++++ b/fs/btrfs/extent-tree.h
+@@ -48,16 +48,11 @@ struct find_free_extent_ctl {
+ int loop;
+
+ /*
+- * Whether we're refilling a cluster, if true we need to re-search
+- * current block group but don't try to refill the cluster again.
++ * Set to true if we're retrying the allocation on this block group
++ * after waiting for caching progress, this is so that we retry only
++ * once before moving on to another block group.
+ */
+- bool retry_clustered;
+-
+- /*
+- * Whether we're updating free space cache, if true we need to re-search
+- * current block group but don't try updating free space cache again.
+- */
+- bool retry_unclustered;
++ bool retry_uncached;
+
+ /* If current block group is cached */
+ int cached;
+--
+2.41.0
+
diff --git a/0105-net_wwan_t7xx_add-AP-CLDMA.patch b/0105-net_wwan_t7xx_add-AP-CLDMA.patch
new file mode 100644
index 000000000000..d3b001a7e58f
--- /dev/null
+++ b/0105-net_wwan_t7xx_add-AP-CLDMA.patch
@@ -0,0 +1,504 @@
+From c21ba287d8ffc883987593b24b4058964e242c84 Mon Sep 17 00:00:00 2001
+From: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
+Date: Tue, 11 Jul 2023 08:28:13 +0200
+Subject: [PATCH] net: wwan: t7xx: Add AP CLDMA
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+At this moment with the current status, t7xx is not functional due to
+problems like this after connection, if there is no activity:
+[ 57.370534] mtk_t7xx 0000:72:00.0: [PM] SAP suspend error: -110
+[ 57.370581] mtk_t7xx 0000:72:00.0: can't suspend
+ (t7xx_pci_pm_runtime_suspend [mtk_t7xx] returned -110)
+because after this, the traffic no longer works.
+
+The complete series 'net: wwan: t7xx: fw flashing & coredump support'
+was reverted because of issues with the pci implementation.
+In order to have at least the modem working, it would be enough if just
+the first commit of the series is re-applied:
+d20ef656f994 net: wwan: t7xx: Add AP CLDMA
+With that, the Application Processor would be controlled, correctly
+suspended and the commented problems would be fixed (I am testing here
+like this with no related issue).
+
+This commit is independent of the others and not related to the
+commented pci implementation for the new features: fw flashing and
+coredump collection.
+
+Use v2 patch version of d20ef656f994 as JinJian Song suggests
+(https://patchwork.kernel.org/project/netdevbpf/patch/20230105154215.198828-1-m.chetan.kumar@linux.intel.com/).
+
+Original text from the commit that would be re-applied:
+
+ d20ef656f994 net: wwan: t7xx: Add AP CLDMA
+ Author: Haijun Liu <haijun.liu@mediatek.com>
+ Date: Tue Aug 16 09:53:28 2022 +0530
+
+ The t7xx device contains two Cross Layer DMA (CLDMA) interfaces to
+ communicate with AP and Modem processors respectively. So far only
+ MD-CLDMA was being used, this patch enables AP-CLDMA.
+
+ Rename small Application Processor (sAP) to AP.
+
+ Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
+ Co-developed-by: Madhusmita Sahu <madhusmita.sahu@intel.com>
+ Signed-off-by: Madhusmita Sahu <madhusmita.sahu@intel.com>
+ Signed-off-by: Moises Veleta <moises.veleta@linux.intel.com>
+ Signed-off-by: Devegowda Chandrashekar <chandrashekar.devegowda@intel.com>
+ Signed-off-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
+ Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+ Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
+ Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+
+Signed-off-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230711062817.6108-1-jtornosm@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Cherry-picked-for: https://bugs.archlinux.org/task/79728
+---
+ drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 17 +++--
+ drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 2 +-
+ drivers/net/wwan/t7xx/t7xx_mhccif.h | 1 +
+ drivers/net/wwan/t7xx/t7xx_modem_ops.c | 76 +++++++++++++++++-----
+ drivers/net/wwan/t7xx/t7xx_modem_ops.h | 2 +
+ drivers/net/wwan/t7xx/t7xx_port.h | 6 +-
+ drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c | 8 ++-
+ drivers/net/wwan/t7xx/t7xx_port_proxy.c | 18 ++++-
+ drivers/net/wwan/t7xx/t7xx_reg.h | 2 +-
+ drivers/net/wwan/t7xx/t7xx_state_monitor.c | 13 +++-
+ drivers/net/wwan/t7xx/t7xx_state_monitor.h | 2 +
+ 11 files changed, 116 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+index 7162bf38a8c906..cc70360364b7d6 100644
+--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+@@ -1066,13 +1066,18 @@ static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ u32 phy_ao_base, phy_pd_base;
+
+- if (md_ctrl->hif_id != CLDMA_ID_MD)
+- return;
+-
+- phy_ao_base = CLDMA1_AO_BASE;
+- phy_pd_base = CLDMA1_PD_BASE;
+- hw_info->phy_interrupt_id = CLDMA1_INT;
+ hw_info->hw_mode = MODE_BIT_64;
++
++ if (md_ctrl->hif_id == CLDMA_ID_MD) {
++ phy_ao_base = CLDMA1_AO_BASE;
++ phy_pd_base = CLDMA1_PD_BASE;
++ hw_info->phy_interrupt_id = CLDMA1_INT;
++ } else {
++ phy_ao_base = CLDMA0_AO_BASE;
++ phy_pd_base = CLDMA0_PD_BASE;
++ hw_info->phy_interrupt_id = CLDMA0_INT;
++ }
++
+ hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
+ pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
+ hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
+diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+index 47a35e552da78a..4410bac6993aef 100644
+--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+@@ -34,7 +34,7 @@
+ /**
+ * enum cldma_id - Identifiers for CLDMA HW units.
+ * @CLDMA_ID_MD: Modem control channel.
+- * @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
++ * @CLDMA_ID_AP: Application Processor control channel.
+ * @CLDMA_NUM: Number of CLDMA HW units available.
+ */
+ enum cldma_id {
+diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.h b/drivers/net/wwan/t7xx/t7xx_mhccif.h
+index 209b386bc08866..20c50dce9fc31e 100644
+--- a/drivers/net/wwan/t7xx/t7xx_mhccif.h
++++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h
+@@ -25,6 +25,7 @@
+ D2H_INT_EXCEPTION_CLEARQ_DONE | \
+ D2H_INT_EXCEPTION_ALLQ_RESET | \
+ D2H_INT_PORT_ENUM | \
++ D2H_INT_ASYNC_AP_HK | \
+ D2H_INT_ASYNC_MD_HK)
+
+ void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val);
+diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+index 7d0f5e4f0a7815..24e7d491468e0a 100644
+--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+@@ -44,6 +44,7 @@
+ #include "t7xx_state_monitor.h"
+
+ #define RT_ID_MD_PORT_ENUM 0
++#define RT_ID_AP_PORT_ENUM 1
+ /* Modem feature query identification code - "ICCC" */
+ #define MD_FEATURE_QUERY_ID 0x49434343
+
+@@ -298,6 +299,7 @@ static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
+ }
+
+ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
++ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
+
+ if (stage == HIF_EX_INIT)
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
+@@ -426,7 +428,7 @@ static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_inf
+ if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
+ return -EINVAL;
+
+- if (i == RT_ID_MD_PORT_ENUM)
++ if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
+ t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
+ }
+
+@@ -456,12 +458,12 @@ static int t7xx_core_reset(struct t7xx_modem *md)
+ return 0;
+ }
+
+-static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
++static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
++ struct t7xx_fsm_ctl *ctl,
+ enum t7xx_fsm_event_state event_id,
+ enum t7xx_fsm_event_state err_detect)
+ {
+ struct t7xx_fsm_event *event = NULL, *event_next;
+- struct t7xx_sys_info *core_info = &md->core_md;
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ unsigned long flags;
+ int ret;
+@@ -531,19 +533,33 @@ static void t7xx_md_hk_wq(struct work_struct *work)
+ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
+ md->core_md.handshake_ongoing = true;
+- t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
++ t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
++}
++
++static void t7xx_ap_hk_wq(struct work_struct *work)
++{
++ struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
++
++ /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
++ t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
++ t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
++ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
++ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
++ md->core_ap.handshake_ongoing = true;
++ t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
+ }
+
+ void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
+ {
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+- void __iomem *mhccif_base;
+ unsigned int int_sta;
+ unsigned long flags;
+
+ switch (evt_id) {
+ case FSM_PRE_START:
+- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM);
++ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
++ D2H_INT_ASYNC_AP_HK);
+ break;
+
+ case FSM_START:
+@@ -556,16 +572,26 @@ void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
+ ctl->exp_flg = true;
+ md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
++ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
+ } else if (ctl->exp_flg) {
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+- } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
+- queue_work(md->handshake_wq, &md->handshake_work);
+- md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+- mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
+- iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+- t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
++ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
+ } else {
+- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
++ void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
++
++ if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
++ queue_work(md->handshake_wq, &md->handshake_work);
++ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
++ iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
++ }
++
++ if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
++ queue_work(md->handshake_wq, &md->ap_handshake_work);
++ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
++ iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
++ }
+ }
+ spin_unlock_irqrestore(&md->exp_lock, flags);
+
+@@ -578,6 +604,7 @@ void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
+
+ case FSM_READY:
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
+ break;
+
+ default:
+@@ -629,6 +656,12 @@ static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
+ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
+ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
+ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
++
++ INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
++ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
++ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
++ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
++
+ return md;
+ }
+
+@@ -640,6 +673,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
+ md->exp_id = 0;
+ t7xx_fsm_reset(md);
+ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
++ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_port_proxy_reset(md->port_prox);
+ md->md_init_finish = true;
+ return t7xx_core_reset(md);
+@@ -669,6 +703,10 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
+ if (ret)
+ goto err_destroy_hswq;
+
++ ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
++ if (ret)
++ goto err_destroy_hswq;
++
+ ret = t7xx_fsm_init(md);
+ if (ret)
+ goto err_destroy_hswq;
+@@ -681,12 +719,16 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
+ if (ret)
+ goto err_uninit_ccmni;
+
+- ret = t7xx_port_proxy_init(md);
++ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
+ if (ret)
+ goto err_uninit_md_cldma;
+
++ ret = t7xx_port_proxy_init(md);
++ if (ret)
++ goto err_uninit_ap_cldma;
++
+ ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
+- if (ret) /* fsm_uninit flushes cmd queue */
++ if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
+ goto err_uninit_proxy;
+
+ t7xx_md_sys_sw_init(t7xx_dev);
+@@ -696,6 +738,9 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
+ err_uninit_proxy:
+ t7xx_port_proxy_uninit(md->port_prox);
+
++err_uninit_ap_cldma:
++ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
++
+ err_uninit_md_cldma:
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
+
+@@ -722,6 +767,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
+
+ t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ t7xx_port_proxy_uninit(md->port_prox);
++ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_ccmni_exit(t7xx_dev);
+ t7xx_fsm_uninit(md);
+diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+index 7469ed636ae81a..abe633cf7adc01 100644
+--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+@@ -66,10 +66,12 @@ struct t7xx_modem {
+ struct cldma_ctrl *md_ctrl[CLDMA_NUM];
+ struct t7xx_pci_dev *t7xx_dev;
+ struct t7xx_sys_info core_md;
++ struct t7xx_sys_info core_ap;
+ bool md_init_finish;
+ bool rgu_irq_asserted;
+ struct workqueue_struct *handshake_wq;
+ struct work_struct handshake_work;
++ struct work_struct ap_handshake_work;
+ struct t7xx_fsm_ctl *fsm_ctl;
+ struct port_proxy *port_prox;
+ unsigned int exp_id;
+diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h
+index 8ea9079af997b0..4ae8a00a85322e 100644
+--- a/drivers/net/wwan/t7xx/t7xx_port.h
++++ b/drivers/net/wwan/t7xx/t7xx_port.h
+@@ -36,9 +36,13 @@
+ /* Channel ID and Message ID definitions.
+ * The channel number consists of peer_id(15:12) , channel_id(11:0)
+ * peer_id:
+- * 0:reserved, 1: to sAP, 2: to MD
++ * 0:reserved, 1: to AP, 2: to MD
+ */
+ enum port_ch {
++ /* to AP */
++ PORT_CH_AP_CONTROL_RX = 0x1000,
++ PORT_CH_AP_CONTROL_TX = 0x1001,
++
+ /* to MD */
+ PORT_CH_CONTROL_RX = 0x2000,
+ PORT_CH_CONTROL_TX = 0x2001,
+diff --git a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
+index 68430b130a6784..ae632ef966983e 100644
+--- a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
++++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
+@@ -167,8 +167,12 @@ static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb)
+ case CTL_ID_HS2_MSG:
+ skb_pull(skb, sizeof(*ctrl_msg_h));
+
+- if (port_conf->rx_ch == PORT_CH_CONTROL_RX) {
+- ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data,
++ if (port_conf->rx_ch == PORT_CH_CONTROL_RX ||
++ port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) {
++ int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ?
++ FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2;
++
++ ret = t7xx_fsm_append_event(ctl, event, skb->data,
+ le32_to_cpu(ctrl_msg_h->data_length));
+ if (ret)
+ dev_err(port->dev, "Failed to append Handshake 2 event");
+diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+index 894b1d11b2c9c1..274846d39fbf3f 100644
+--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+@@ -48,7 +48,7 @@
+ i < (proxy)->port_count; \
+ i++, (p) = &(proxy)->ports[i])
+
+-static const struct t7xx_port_conf t7xx_md_port_conf[] = {
++static const struct t7xx_port_conf t7xx_port_conf[] = {
+ {
+ .tx_ch = PORT_CH_UART2_TX,
+ .rx_ch = PORT_CH_UART2_RX,
+@@ -89,6 +89,14 @@ static const struct t7xx_port_conf t7xx_md_port_conf[] = {
+ .path_id = CLDMA_ID_MD,
+ .ops = &ctl_port_ops,
+ .name = "t7xx_ctrl",
++ }, {
++ .tx_ch = PORT_CH_AP_CONTROL_TX,
++ .rx_ch = PORT_CH_AP_CONTROL_RX,
++ .txq_index = Q_IDX_CTRL,
++ .rxq_index = Q_IDX_CTRL,
++ .path_id = CLDMA_ID_AP,
++ .ops = &ctl_port_ops,
++ .name = "t7xx_ap_ctrl",
+ },
+ };
+
+@@ -428,6 +436,9 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
+ if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
+ md->core_md.ctl_port = port;
+
++ if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX)
++ md->core_ap.ctl_port = port;
++
+ port->t7xx_dev = md->t7xx_dev;
+ port->dev = &md->t7xx_dev->pdev->dev;
+ spin_lock_init(&port->port_update_lock);
+@@ -442,7 +453,7 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
+
+ static int t7xx_proxy_alloc(struct t7xx_modem *md)
+ {
+- unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf);
++ unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct port_proxy *port_prox;
+ int i;
+@@ -456,7 +467,7 @@ static int t7xx_proxy_alloc(struct t7xx_modem *md)
+ port_prox->dev = dev;
+
+ for (i = 0; i < port_count; i++)
+- port_prox->ports[i].port_conf = &t7xx_md_port_conf[i];
++ port_prox->ports[i].port_conf = &t7xx_port_conf[i];
+
+ port_prox->port_count = port_count;
+ t7xx_proxy_init_all_ports(md);
+@@ -481,6 +492,7 @@ int t7xx_port_proxy_init(struct t7xx_modem *md)
+ if (ret)
+ return ret;
+
++ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
+ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
+ return 0;
+ }
+diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
+index 7c1b81091a0f3f..c41d7d094c0853 100644
+--- a/drivers/net/wwan/t7xx/t7xx_reg.h
++++ b/drivers/net/wwan/t7xx/t7xx_reg.h
+@@ -56,7 +56,7 @@
+ #define D2H_INT_RESUME_ACK BIT(12)
+ #define D2H_INT_SUSPEND_ACK_AP BIT(13)
+ #define D2H_INT_RESUME_ACK_AP BIT(14)
+-#define D2H_INT_ASYNC_SAP_HK BIT(15)
++#define D2H_INT_ASYNC_AP_HK BIT(15)
+ #define D2H_INT_ASYNC_MD_HK BIT(16)
+
+ /* Register base */
+diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+index 0bcca08ff2bd08..80edb8e75a6ad7 100644
+--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+@@ -285,8 +285,9 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
+ t7xx_md_event_notify(md, FSM_START);
+
+- wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg,
+- HZ * 60);
++ wait_event_interruptible_timeout(ctl->async_hk_wq,
++ (md->core_md.ready && md->core_ap.ready) ||
++ ctl->exp_flg, HZ * 60);
+ dev = &md->t7xx_dev->pdev->dev;
+
+ if (ctl->exp_flg)
+@@ -297,6 +298,13 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
+ if (md->core_md.handshake_ongoing)
+ t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
+
++ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
++ return -ETIMEDOUT;
++ } else if (!md->core_ap.ready) {
++ dev_err(dev, "AP handshake timeout\n");
++ if (md->core_ap.handshake_ongoing)
++ t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
++
+ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
+ return -ETIMEDOUT;
+ }
+@@ -335,6 +343,7 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
+ return;
+ }
+
++ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
+ fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
+ }
+diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+index b1af0259d4c557..b6e76f3903c892 100644
+--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+@@ -38,10 +38,12 @@ enum t7xx_fsm_state {
+ enum t7xx_fsm_event_state {
+ FSM_EVENT_INVALID,
+ FSM_EVENT_MD_HS2,
++ FSM_EVENT_AP_HS2,
+ FSM_EVENT_MD_EX,
+ FSM_EVENT_MD_EX_REC_OK,
+ FSM_EVENT_MD_EX_PASS,
+ FSM_EVENT_MD_HS2_EXIT,
++ FSM_EVENT_AP_HS2_EXIT,
+ FSM_EVENT_MAX
+ };
diff --git a/PKGBUILD b/PKGBUILD
index 4df59f7b3653..3b77ff9f8d21 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -12,7 +12,7 @@ pkgbase=linux-jcore
pkgname=('linux-jcore' 'linux-jcore-headers')
_kernelname=-jcore
_hostname=jcore
-pkgver=6.5.3
+pkgver=6.5.7
pkgrel=1
pkgdesc="Kernel for Manjaro/EndeavourOS/Arch (ACS override patch include)"
arch=('x86_64')
@@ -23,13 +23,16 @@ replaces=('linux-acs-manjaro' 'linux-acs-manjaro-headers')
options=('!strip')
source=("https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-$pkgver.tar.xz"
- 'config'
- # Upstream Patches
+ config
# ARCH Patches
- '0101-ZEN_Add_sysctl_and_CONFIG_to_disallow_unprivileged_CLONE_NEWUSER.patch'
+ 0101-ZEN_Add_sysctl_and_CONFIG_to_disallow_unprivileged_CLONE_NEWUSER.patch
+ 0102-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch
+ 0103-ASoC-Intel-soc-acpi-fix-Dell-SKU-0B34.patch
+ 0104-btrfs-wait-on-uncached-block-groups-on-every-allocat.patch
+ 0105-net_wwan_t7xx_add-AP-CLDMA.patch
# MANJARO Patches
- '0201-asus-ally-asus-hid-6.5.patch'
- '0202-mt7921e_Perform_FLR_to_recovery_the_device.patch'
+ 0201-asus-ally-asus-hid-6.5.patch
+ 0202-mt7921e_Perform_FLR_to_recovery_the_device.patch
# GPU reset through sysfs
0001-GPU-reset.patch
# No overrides ROG ally
@@ -49,12 +52,23 @@ source=("https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-$pkgver.tar.xz"
0011-ALSA-hda-cs35l41-Ensure-amp-is-only-unmuted-during-p.patch
# Realtek patch
0998-patch_realtek.patch
+ # HID patches
+ 0001-HID.patch
+ # Additional ALLY patches
+ ROG-ALLY-LED-fix.patch
+ ROG-ALLY-NCT6775-PLATFORM.patch
+ ROG_ALLY_OLDER_BIOS_AUDIO.patch
+ 0001-ROG-ALLY-bmi323-device.patch
# ACS override patch
'0999-acs.gitpatch')
-sha256sums=('4cac13f7b17bd8dcf9032ad68f9123ab5313d698c9f59416043165150763eb4f'
- 'fdfa0d96f3c11d4d52da67d4b0b1ddacdb8347daae88546eba72187d43d558b0'
+sha256sums=('0d09ea448005c9cfe5383e4c72a872b39188b928f8c44e146b03b1b7851fbb8c'
+ '68c8f0aecfabb6f6eac76daed2985e9255ab62f98669d341d02aa66070ce17ce'
'05f04019d4a2ee072238c32860fa80d673687d84d78ef436ae9332b6fb788467'
+ 'e1d17690dd21e8d5482b63ca66bfe6b478d39e8e8b59eedd53adb0a55ebc308d'
+ '6a8e31d9e0b9dfb13c1fb96cea1dc17929920038c50db7d731e1af63fd0b26b9'
+ '997e873b5fa0fb94ebc7ab88474edc15f7e94340643a1305fd2eab2917f39df6'
+ '6a5d3cff6d9887b9f2e6fe8121cadf7b22cafcfabe3bd0d3e80d9174ede7204d'
'6541760a7b0513ce52c7b2d67810135b1bd172849f52765e74a5ec0c7584dd56'
'd673d034fbcd80426fd8d9c6af56537c5fe5b55fe49d74e313474d7fc285ecc1'
'11ff8e0119e1bd33a572e18a10dfb94f521b4e38ee5948d3c0508faf07835a78'
@@ -72,6 +86,11 @@ sha256sums=('4cac13f7b17bd8dcf9032ad68f9123ab5313d698c9f59416043165150763eb4f'
'b9298bde48a9f6c5d028150d627c05c71880e2693933ef2fe070f090e80876a5'
'4d53a6853b63c0f01b60b408bee61fa729656f925e50fa55ae3cba309668242a'
'3aa9f1ca47bb078f3c9a52fe61897cf4fe989068cd7e66bfa6644fd605fa40d2'
+ 'ed7f4ba3b47c92b5102c9eef81f41e57216e9357d4a638199035a93f080eeb1a'
+ '68a9b80e0b8d75880fbf3d8486bfe522cb19b4042554786b23bead9320165fa5'
+ 'cfcd5c177423df8b7b98b0500fe7ab0757f895ed945c33e205963f0069c7a3be'
+ '2d8246d2ff6312cd8ff832c50f4176201e43fe9d55e9b758bec9f0cad75bd0ba'
+ '5574a68b1c7733769835bb856a8c32e54398dfde59f264708672b87b73b3c6ea'
'458d7e024d33d4965b14b9b987f01a2884ff28761cff5da7c6a54132a95e9f36')
prepare() {
diff --git a/ROG-ALLY-LED-fix.patch b/ROG-ALLY-LED-fix.patch
new file mode 100644
index 000000000000..e5014cba75de
--- /dev/null
+++ b/ROG-ALLY-LED-fix.patch
@@ -0,0 +1,32 @@
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index e7993d7..c8189f9 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -454,6 +454,9 @@ static int rog_nkey_led_init(struct hid_device *hdev)
+ 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
+ u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1,
+ 0x05, 0x20, 0x31, 0x00, 0x08 };
++ u8 buf_init4[] = { 0x5a, 0xb3};
++ u8 buf_init5[] = { 0x5a, 0xb5};
++ u8 buf_init6[] = { 0x5a, 0xb4};
+ int ret;
+
+ hid_info(hdev, "Asus initialise N-KEY Device");
+@@ -488,6 +491,17 @@ static int rog_nkey_led_init(struct hid_device *hdev)
+ ret = asus_kbd_set_report(hdev, buf_init3, sizeof(buf_init3));
+ if (ret < 0)
+ hid_warn(hdev, "Asus failed to send init command 2.1: %d\n", ret);
++
++ /* ROG ALLY LED init */
++ ret = asus_kbd_set_report(hdev, buf_init4, sizeof(buf_init4));
++ if (ret < 0)
++ hid_warn(hdev, "Asus failed to send ROG ALLY LED init command: %d\n", ret);
++ ret = asus_kbd_set_report(hdev, buf_init5, sizeof(buf_init5));
++ if (ret < 0)
++ hid_warn(hdev, "Asus failed to send ROG ALLY LED init command: %d\n", ret);
++ ret = asus_kbd_set_report(hdev, buf_init6, sizeof(buf_init6));
++ if (ret < 0)
++ hid_warn(hdev, "Asus failed to send ROG ALLY LED init command: %d\n", ret);
+
+ return ret;
+ }
diff --git a/ROG-ALLY-NCT6775-PLATFORM.patch b/ROG-ALLY-NCT6775-PLATFORM.patch
new file mode 100644
index 000000000000..806ad7959e01
--- /dev/null
+++ b/ROG-ALLY-NCT6775-PLATFORM.patch
@@ -0,0 +1,12 @@
+diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
+index 81bf03d..96d875b 100644
+--- a/drivers/hwmon/nct6775-platform.c
++++ b/drivers/hwmon/nct6775-platform.c
+@@ -1359,6 +1359,7 @@ static const char * const asus_msi_boards[] = {
+ "ProArt X670E-CREATOR WIFI",
+ "ProArt Z690-CREATOR WIFI",
+ "ProArt Z790-CREATOR WIFI",
++ "RC71L",
+ "ROG CROSSHAIR X670E EXTREME",
+ "ROG CROSSHAIR X670E GENE",
+ "ROG CROSSHAIR X670E HERO",
diff --git a/ROG_ALLY_OLDER_BIOS_AUDIO.patch b/ROG_ALLY_OLDER_BIOS_AUDIO.patch
new file mode 100644
index 000000000000..d8cf5d888221
--- /dev/null
+++ b/ROG_ALLY_OLDER_BIOS_AUDIO.patch
@@ -0,0 +1,18 @@
+diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
+index b39f944..feb4852 100644
+--- a/sound/pci/hda/cs35l41_hda_property.c
++++ b/sound/pci/hda/cs35l41_hda_property.c
+@@ -72,6 +72,13 @@ static int asus_rog_2023_spkr_id2(struct cs35l41_hda *cs35l41, struct device *ph
+ } else {
+ cs35l41->reset_gpio = gpiod_get_index(physdev, NULL, 0, GPIOD_OUT_HIGH);
+ }
++
++ if (strcmp(cs35l41->acpi_subsystem_id, "104317F3") == 0){
++ hw_cfg->bst_type = CS35L41_INT_BOOST; /* ROG ALLY specific config */
++ hw_cfg->bst_ind = 1000; /* 1,000nH Inductance value */
++ hw_cfg->bst_ipk = 4500; /* 4,500mA peak current */
++ hw_cfg->bst_cap = 24; /* 24 microFarad cap value */
++ }
+
+ hw_cfg->valid = true;
+
diff --git a/config b/config
index 8223e5b6fbd9..70ae58a567bf 100644
--- a/config
+++ b/config
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 6.5.3-1 Kernel Configuration
+# Linux/x86 6.5.7-1 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 13.2.1 20230801"
CONFIG_CC_IS_GCC=y
@@ -6142,6 +6142,7 @@ CONFIG_DVB_BUDGET_CORE=m
CONFIG_DVB_BUDGET=m
CONFIG_DVB_BUDGET_CI=m
CONFIG_DVB_BUDGET_AV=m
+CONFIG_IPU_BRIDGE=m
CONFIG_VIDEO_IPU3_CIO2=m
CONFIG_CIO2_BRIDGE=y
CONFIG_RADIO_ADAPTERS=m
@@ -6326,10 +6327,7 @@ CONFIG_MEDIA_ATTACH=y
# IR I2C driver auto-selected by 'Autoselect ancillary drivers'
#
CONFIG_VIDEO_IR_I2C=m
-
-#
-# Camera sensor devices
-#
+CONFIG_VIDEO_CAMERA_SENSOR=y
CONFIG_VIDEO_APTINA_PLL=m
CONFIG_VIDEO_CCS_PLL=m
CONFIG_VIDEO_AR0521=m
@@ -6392,7 +6390,6 @@ CONFIG_VIDEO_S5K5BAF=m
CONFIG_VIDEO_S5K6A3=m
CONFIG_VIDEO_CCS=m
CONFIG_VIDEO_ET8EK8=m
-# end of Camera sensor devices
#
# Lens drivers
@@ -10644,7 +10641,6 @@ CONFIG_INTEGRITY_MACHINE_KEYRING=y
CONFIG_LOAD_UEFI_KEYS=y
CONFIG_INTEGRITY_AUDIT=y
# CONFIG_IMA is not set
-# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set
# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set
# CONFIG_EVM is not set
# CONFIG_DEFAULT_SECURITY_SELINUX is not set