[Copybara Auto Merge] Merge branch 'whitechapel' into android-gs-pixel-5.10

  edgetpu: add namespace import for DMA_BUF for 5.16
  Bug: 232003048
  edgetpu: add support for all dvfs frequencies
  Bug: 230582743
  edgetpu: fetch hw ctx region from device tree
  Bug: 230236668
  edgetpu: include uaccess.h for access_ok
  Bug: 201243473
  edgetpu: cast access_ok address param to avoid type warning
  Bug: 201243473 (repeat)
  edgetpu: retry buffer map read-only on EFAULT
  Bug: 201243473 (repeat)
  edgetpu: add new firmware metrics
  Bug: 229311738
  edgetpu: Change debug dump logs for prod FW builds.
  Bug: 228917587
  edgetpu: mobile: Add handler for FW checkpoint dump reason.
  Bug: 228916950
  edgetpu: fix system out of memory locking pages log message
  edgetpu: add edgetpu_domain_pool
  edgetpu: update client pid on wakelock acquire
  Bug: 217454089

GitOrigin-RevId: 73c9cafea4482608a8010716af983a72da30a7f3
Change-Id: Icd69eb5cdbe0aa135c5074f11daaa8cd9890893b
diff --git a/drivers/edgetpu/Kbuild b/drivers/edgetpu/Kbuild
index 83dd828..e0c3588 100644
--- a/drivers/edgetpu/Kbuild
+++ b/drivers/edgetpu/Kbuild
@@ -10,7 +10,7 @@
 	ccflags-y	+= -DGIT_REPO_TAG=\"Not\ a\ git\ repository\"
 endif
 
-edgetpu-objs	:= edgetpu-mailbox.o edgetpu-kci.o edgetpu-telemetry.o edgetpu-mapping.o edgetpu-dmabuf.o edgetpu-async.o edgetpu-iremap-pool.o edgetpu-sw-watchdog.o edgetpu-firmware.o edgetpu-firmware-util.o
+edgetpu-objs	:= edgetpu-mailbox.o edgetpu-kci.o edgetpu-telemetry.o edgetpu-mapping.o edgetpu-dmabuf.o edgetpu-async.o edgetpu-iremap-pool.o edgetpu-sw-watchdog.o edgetpu-firmware.o edgetpu-firmware-util.o edgetpu-domain-pool.o
 
 abrolhos-y	:= abrolhos-device.o abrolhos-device-group.o abrolhos-fs.o abrolhos-core.o abrolhos-platform.o abrolhos-firmware.o abrolhos-thermal.o abrolhos-pm.o abrolhos-iommu.o abrolhos-debug-dump.o abrolhos-usage-stats.o abrolhos-wakelock.o $(edgetpu-objs)
 
diff --git a/drivers/edgetpu/Makefile b/drivers/edgetpu/Makefile
index 3c2f832..ac850d2 100644
--- a/drivers/edgetpu/Makefile
+++ b/drivers/edgetpu/Makefile
@@ -16,7 +16,8 @@
 edgetpu-objs	:= edgetpu-async.o edgetpu-dmabuf.o edgetpu-iremap-pool.o \
 		   edgetpu-kci.o edgetpu-mailbox.o edgetpu-mapping.o \
 		   edgetpu-sw-watchdog.o edgetpu-telemetry.o \
-		   edgetpu-firmware-util.o edgetpu-firmware.o
+		   edgetpu-firmware-util.o edgetpu-firmware.o \
+		   edgetpu-domain-pool.o
 
 abrolhos-objs	:= abrolhos-core.o abrolhos-debug-dump.o \
 		   abrolhos-device-group.o abrolhos-device.o \
diff --git a/drivers/edgetpu/edgetpu-core.c b/drivers/edgetpu/edgetpu-core.c
index b84d626..81c54a6 100644
--- a/drivers/edgetpu/edgetpu-core.c
+++ b/drivers/edgetpu/edgetpu-core.c
@@ -434,6 +434,8 @@
 	etdev->vcid_pool = (1u << EDGETPU_NUM_VCIDS) - 1;
 	mutex_init(&etdev->state_lock);
 	etdev->state = ETDEV_STATE_NOFW;
+	etdev->freq_count = 0;
+	mutex_init(&etdev->freq_lock);
 
 	ret = edgetpu_fs_add(etdev, iface_params, num_ifaces);
 	if (ret) {
diff --git a/drivers/edgetpu/edgetpu-debug-dump.c b/drivers/edgetpu/edgetpu-debug-dump.c
index a017e7f..baf526a 100644
--- a/drivers/edgetpu/edgetpu-debug-dump.c
+++ b/drivers/edgetpu/edgetpu-debug-dump.c
@@ -70,13 +70,14 @@
 	etdev_dbg(etdev, "Sent debug dump request, tpu addr: %llx",
 		  (u64)etdev->debug_dump_mem.tpu_addr);
 	if (ret) {
-		if (init_fw_dump_buffer)
-			etdev_err(etdev, "failed to init dump buffer in FW");
-
-		if (ret == KCI_ERROR_UNIMPLEMENTED)
+		if (ret == KCI_ERROR_UNIMPLEMENTED) {
 			etdev_dbg(etdev, "Debug dump KCI not implemented");
-		else
-			etdev_err(etdev, "Debug dump KCI req failed: %d", ret);
+		} else {
+			if (init_fw_dump_buffer)
+				etdev_err(etdev, "failed to init dump buffer in FW");
+			else
+				etdev_err(etdev, "Debug dump KCI req failed: %d", ret);
+		}
 	}
 
 	return ret;
diff --git a/drivers/edgetpu/edgetpu-device-group.c b/drivers/edgetpu/edgetpu-device-group.c
index 49c672d..86ef9d2 100644
--- a/drivers/edgetpu/edgetpu-device-group.c
+++ b/drivers/edgetpu/edgetpu-device-group.c
@@ -21,6 +21,7 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/uaccess.h>
 #include <linux/uidgid.h>
 
 #include "edgetpu-async.h"
@@ -1168,10 +1169,16 @@
 
 	if (size == 0)
 		return ERR_PTR(-EINVAL);
+	if (!access_ok((const void *)host_addr, size)) {
+		etdev_err(etdev, "invalid address range in buffer map request");
+		return ERR_PTR(-EFAULT);
+	}
 	offset = host_addr & (PAGE_SIZE - 1);
-	/* overflow check */
-	if (unlikely((size + offset) / PAGE_SIZE >= UINT_MAX - 1 || size + offset < size))
-		return ERR_PTR(-ENOMEM);
+	/* overflow check (should also be caught by access_ok) */
+	if (unlikely((size + offset) / PAGE_SIZE >= UINT_MAX - 1 || size + offset < size)) {
+		etdev_err(etdev, "address overflow in buffer map request");
+		return ERR_PTR(-EFAULT);
+	}
 	num_pages = DIV_ROUND_UP((size + offset), PAGE_SIZE);
 	etdev_dbg(etdev, "%s: hostaddr=%#llx pages=%u", __func__, host_addr, num_pages);
 	/*
@@ -1204,10 +1211,20 @@
 		*pnum_pages = num_pages;
 		return pages;
 	}
+	if (ret == -EFAULT && !*preadonly) {
+		foll_flags &= ~FOLL_WRITE;
+		*preadonly = true;
+		ret = pin_user_pages_fast(host_addr & PAGE_MASK, num_pages,
+					  foll_flags, pages);
+	}
 	if (ret < 0) {
 		etdev_dbg(etdev, "pin_user_pages failed %u:%pK-%u: %d",
 			  group->workload_id, (void *)host_addr, num_pages,
 			  ret);
+		if (ret == -EFAULT)
+			etdev_err(etdev,
+				  "bad address locking %u pages for %s",
+				  num_pages, *preadonly ? "read" : "write");
 		if (ret != -ENOMEM) {
 			num_pages = 0;
 			goto error;
@@ -1236,12 +1253,11 @@
 		etdev_dbg(etdev, "pin_user_pages failed %u:%pK-%u: %d",
 			  group->workload_id, (void *)host_addr, num_pages,
 			  ret);
-		num_pages = 0;
-
 		if (ret == -ENOMEM)
 			etdev_err(etdev,
 				  "system out of memory locking %u pages",
 				  num_pages);
+		num_pages = 0;
 		goto error;
 	}
 	if (ret < num_pages) {
diff --git a/drivers/edgetpu/edgetpu-dmabuf.c b/drivers/edgetpu/edgetpu-dmabuf.c
index 276da72..657ae75 100644
--- a/drivers/edgetpu/edgetpu-dmabuf.c
+++ b/drivers/edgetpu/edgetpu-dmabuf.c
@@ -1049,3 +1049,7 @@
 	spin_unlock_irq(&etfence_list_lock);
 	return 0;
 }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)
+MODULE_IMPORT_NS(DMA_BUF);
+#endif
diff --git a/drivers/edgetpu/edgetpu-domain-pool.c b/drivers/edgetpu/edgetpu-domain-pool.c
new file mode 100644
index 0000000..2989254
--- /dev/null
+++ b/drivers/edgetpu/edgetpu-domain-pool.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EdgeTPU IOMMU domain allocator.
+ *
+ * Copyright (C) 2022 Google, LLC.
+ */
+
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+
+#include "edgetpu-domain-pool.h"
+#include "edgetpu-internal.h"
+
+int edgetpu_domain_pool_init(struct edgetpu_dev *etdev, struct edgetpu_domain_pool *pool,
+			     unsigned int size)
+{
+	unsigned int i;
+	struct iommu_domain *domain;
+
+	pool->size = size;
+	pool->etdev = etdev;
+
+	if (!size)
+		return 0;
+
+	etdev_dbg(pool->etdev, "Initializing domain pool with %u domains\n", size);
+
+	ida_init(&pool->idp);
+	pool->array = vzalloc(sizeof(*pool->array) * size);
+	if (!pool->array) {
+		etdev_err(etdev, "Failed to allocate memory for domain pool array\n");
+		return -ENOMEM;
+	}
+	for (i = 0; i < size; i++) {
+		domain = iommu_domain_alloc(pool->etdev->dev->bus);
+		if (!domain) {
+			etdev_err(pool->etdev, "Failed to allocate iommu domain %d of %u\n", i + 1,
+				  size);
+			edgetpu_domain_pool_destroy(pool);
+			return -ENOMEM;
+		}
+		pool->array[i] = domain;
+	}
+	return 0;
+}
+
+struct iommu_domain *edgetpu_domain_pool_alloc(struct edgetpu_domain_pool *pool)
+{
+	int id;
+
+	if (!pool->size)
+		return iommu_domain_alloc(pool->etdev->dev->bus);
+
+	id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
+
+	if (id < 0) {
+		etdev_err(pool->etdev, "No more domains available from pool of size %u\n",
+			  pool->size);
+		return NULL;
+	}
+
+	etdev_dbg(pool->etdev, "Allocated domain from pool with id = %d\n", id);
+
+	return pool->array[id];
+}
+
+void edgetpu_domain_pool_free(struct edgetpu_domain_pool *pool, struct iommu_domain *domain)
+{
+	int id;
+
+	if (!pool->size) {
+		iommu_domain_free(domain);
+		return;
+	}
+	for (id = 0; id < pool->size; id++) {
+		if (pool->array[id] == domain) {
+			etdev_dbg(pool->etdev, "Released domain from pool with id = %d\n", id);
+			ida_free(&pool->idp, id);
+			return;
+		}
+	}
+	etdev_err(pool->etdev, "%s: domain not found in pool", __func__);
+}
+
+void edgetpu_domain_pool_destroy(struct edgetpu_domain_pool *pool)
+{
+	int i;
+
+	if (!pool->size)
+		return;
+
+	etdev_dbg(pool->etdev, "Destroying domain pool with %u domains\n", pool->size);
+
+	for (i = 0; i < pool->size; i++) {
+		if (pool->array[i])
+			iommu_domain_free(pool->array[i]);
+	}
+
+	ida_destroy(&pool->idp);
+	vfree(pool->array);
+}
diff --git a/drivers/edgetpu/edgetpu-domain-pool.h b/drivers/edgetpu/edgetpu-domain-pool.h
new file mode 100644
index 0000000..3dd19d3
--- /dev/null
+++ b/drivers/edgetpu/edgetpu-domain-pool.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IOMMU domain allocator for edgetpu
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __EDGETPU_DOMAIN_POOL_H__
+#define __EDGETPU_DOMAIN_POOL_H__
+
+#include <linux/idr.h>
+#include <linux/iommu.h>
+
+#include "edgetpu-internal.h"
+
+struct edgetpu_domain_pool {
+	struct ida idp;			/* ID allocator to keep track of used domains. */
+	/*
+	 * Size of the pool. Can be set to 0, in which case the implementation will fall back to
+	 * dynamic domain allocation using the IOMMU API directly.
+	 */
+	unsigned int size;
+	struct iommu_domain **array;	/* Array holding the pointers to pre-allocated domains. */
+	struct edgetpu_dev *etdev;	/* The edgetpu device used for logging warnings/errors. */
+};
+
+
+/*
+ * Initializes a domain pool.
+ *
+ * @etdev: pointer to edgeptu device.
+ * @pool: caller-allocated pool structure.
+ * @size: size of the pre-allocated domains pool.
+ * Set to zero to fall back to dynamically allocated domains.
+ *
+ * returns 0 on success or negative error value.
+ */
+int edgetpu_domain_pool_init(struct edgetpu_dev *etdev, struct edgetpu_domain_pool *pool,
+			     unsigned int size);
+
+/*
+ * Allocates a domain from the pool
+ * returns NULL on error.
+ */
+struct iommu_domain *edgetpu_domain_pool_alloc(struct edgetpu_domain_pool *pool);
+
+/* Releases a domain from the pool. */
+void edgetpu_domain_pool_free(struct edgetpu_domain_pool *pool, struct iommu_domain *domain);
+
+/* Cleans up all resources used by the domain pool. */
+void edgetpu_domain_pool_destroy(struct edgetpu_domain_pool *pool);
+
+#endif /* __EDGETPU_DOMAIN_POOL_H__ */
diff --git a/drivers/edgetpu/edgetpu-fs.c b/drivers/edgetpu/edgetpu-fs.c
index e722738..d0efb67 100644
--- a/drivers/edgetpu/edgetpu-fs.c
+++ b/drivers/edgetpu/edgetpu-fs.c
@@ -582,6 +582,14 @@
 	struct edgetpu_thermal *thermal = client->etdev->thermal;
 
 	LOCK(client);
+	/*
+	 * Update client PID; the client may have been passed from the
+	 * edgetpu service that originally created it to a new process.
+	 * By the time the client holds TPU wakelocks it will have been
+	 * passed to the new owning process.
+	 */
+	client->pid = current->pid;
+	client->tgid = current->tgid;
 	edgetpu_thermal_lock(thermal);
 	if (edgetpu_thermal_is_suspended(thermal)) {
 		/* TPU is thermal suspended, so fail acquiring wakelock */
@@ -629,7 +637,8 @@
 	return 0;
 error_unlock:
 	UNLOCK(client);
-	etdev_err(client->etdev, "PID: %d failed to acquire wakelock", client->pid);
+	etdev_err(client->etdev, "client pid %d failed to acquire wakelock",
+		  client->pid);
 	return ret;
 }
 
@@ -658,7 +667,8 @@
 
 	ret = edgetpu_chip_acquire_ext_mailbox(client, &ext_mailbox);
 	if (ret)
-		etdev_err(client->etdev, "PID: %d failed to acquire ext mailbox", client->pid);
+		etdev_err(client->etdev, "client pid %d failed to acquire ext mailbox",
+			  client->pid);
 	return ret;
 }
 
diff --git a/drivers/edgetpu/edgetpu-google-iommu.c b/drivers/edgetpu/edgetpu-google-iommu.c
index 0f28918..95578b5 100644
--- a/drivers/edgetpu/edgetpu-google-iommu.c
+++ b/drivers/edgetpu/edgetpu-google-iommu.c
@@ -14,10 +14,15 @@
 #include <linux/types.h>
 
 #include "edgetpu-config.h"
+#include "edgetpu-domain-pool.h"
 #include "edgetpu-internal.h"
 #include "edgetpu-mapping.h"
 #include "edgetpu-mmu.h"
 
+#if !defined(EDGETPU_NUM_PREALLOCATED_DOMAINS)
+#define EDGETPU_NUM_PREALLOCATED_DOMAINS 0
+#endif
+
 struct edgetpu_iommu {
 	struct iommu_group *iommu_group;
 	/*
@@ -26,13 +31,20 @@
 	 */
 	struct iommu_domain *domains[EDGETPU_NCONTEXTS];
 	/*
-	 * Records all domains currently allocated, to support IOMMU (un)mapping
+	 * Records IDs for all domains currently allocated, to support IOMMU (un)mapping
 	 * when the domain is not attached.
 	 */
-	struct idr domain_pool;
-	struct mutex pool_lock;		/* protects access of @domain_pool */
+	struct idr domain_id_pool;
+	struct mutex pool_lock;		/* protects access of @domain_id_pool */
 	bool context_0_default;		/* is context 0 domain the default? */
 	bool aux_enabled;
+	/*
+	 * Holds a pool of pre-allocated IOMMU domains if the chip config specifies this is
+	 * required.
+	 * The implementation will fall back to dynamically allocated domains otherwise.
+	 */
+	struct edgetpu_domain_pool domain_pool;
+
 };
 
 struct edgetpu_iommu_map_params {
@@ -57,7 +69,7 @@
 	struct iommu_domain *domain;
 
 	mutex_lock(&etiommu->pool_lock);
-	domain = idr_find(&etiommu->domain_pool, token);
+	domain = idr_find(&etiommu->domain_id_pool, token);
 	mutex_unlock(&etiommu->pool_lock);
 	return domain;
 }
@@ -131,8 +143,9 @@
 static int edgetpu_idr_free_domain_callback(int id, void *p, void *data)
 {
 	struct iommu_domain *domain = p;
+	struct edgetpu_iommu *etiommu = data;
 
-	iommu_domain_free(domain);
+	edgetpu_domain_pool_free(&etiommu->domain_pool, domain);
 	return 0;
 }
 
@@ -181,7 +194,7 @@
 	if (!etiommu->aux_enabled)
 		return -EINVAL;
 
-	domain = iommu_domain_alloc(etdev->dev->bus);
+	domain = edgetpu_domain_pool_alloc(&etiommu->domain_pool);
 	if (!domain) {
 		etdev_warn(etdev, "iommu domain alloc failed");
 		return -EINVAL;
@@ -189,7 +202,7 @@
 	ret = iommu_aux_attach_device(domain, etdev->dev);
 	if (ret) {
 		etdev_warn(etdev, "Attach IOMMU aux failed: %d", ret);
-		iommu_domain_free(domain);
+		edgetpu_domain_pool_free(&etiommu->domain_pool, domain);
 		return ret;
 	}
 	pasid = iommu_aux_get_pasid(domain, etdev->dev);
@@ -198,7 +211,7 @@
 		etdev_warn(etdev, "Invalid PASID %d returned from iommu\n",
 			   pasid);
 		iommu_aux_detach_device(domain, etdev->dev);
-		iommu_domain_free(domain);
+		edgetpu_domain_pool_free(&etiommu->domain_pool, domain);
 		return -EINVAL;
 	}
 out:
@@ -215,7 +228,9 @@
 	etiommu = kzalloc(sizeof(*etiommu), GFP_KERNEL);
 	if (!etiommu)
 		return -ENOMEM;
-	idr_init(&etiommu->domain_pool);
+	ret = edgetpu_domain_pool_init(etdev, &etiommu->domain_pool,
+				       EDGETPU_NUM_PREALLOCATED_DOMAINS);
+	idr_init(&etiommu->domain_id_pool);
 	mutex_init(&etiommu->pool_lock);
 	etiommu->iommu_group = iommu_group_get(etdev->dev);
 	if (etiommu->iommu_group)
@@ -278,11 +293,12 @@
 
 	/* free the domain if the context 0 domain is not default */
 	if (!etiommu->context_0_default && etiommu->domains[0])
-		iommu_domain_free(etiommu->domains[0]);
+		edgetpu_domain_pool_free(&etiommu->domain_pool, etiommu->domains[0]);
 
-	idr_for_each(&etiommu->domain_pool, edgetpu_idr_free_domain_callback,
-		     NULL);
-	idr_destroy(&etiommu->domain_pool);
+	idr_for_each(&etiommu->domain_id_pool, edgetpu_idr_free_domain_callback,
+		     etiommu);
+	idr_destroy(&etiommu->domain_id_pool);
+	edgetpu_domain_pool_destroy(&etiommu->domain_pool);
 	kfree(etiommu);
 	etdev->mmu_cookie = NULL;
 }
@@ -611,26 +627,26 @@
 
 	if (!etiommu->aux_enabled)
 		return &invalid_etdomain;
-	domain = iommu_domain_alloc(etdev->dev->bus);
+	domain = edgetpu_domain_pool_alloc(&etiommu->domain_pool);
 	if (!domain) {
-		etdev_warn(etdev, "iommu domain alloc failed");
+		etdev_warn(etdev, "iommu domain allocation failed");
 		return NULL;
 	}
 
 	etdomain = kzalloc(sizeof(*etdomain), GFP_KERNEL);
 	if (!etdomain) {
-		iommu_domain_free(domain);
+		edgetpu_domain_pool_free(&etiommu->domain_pool, domain);
 		return NULL;
 	}
 
 	mutex_lock(&etiommu->pool_lock);
-	token = idr_alloc(&etiommu->domain_pool, domain, 0,
+	token = idr_alloc(&etiommu->domain_id_pool, domain, 0,
 			  EDGETPU_DOMAIN_TOKEN_END, GFP_KERNEL);
 	mutex_unlock(&etiommu->pool_lock);
 	if (token < 0) {
 		etdev_warn(etdev, "alloc iommu domain token failed: %d", token);
 		kfree(etdomain);
-		iommu_domain_free(domain);
+		edgetpu_domain_pool_free(&etiommu->domain_pool, domain);
 		return NULL;
 	}
 
@@ -650,9 +666,9 @@
 		edgetpu_mmu_detach_domain(etdev, etdomain);
 	}
 	mutex_lock(&etiommu->pool_lock);
-	idr_remove(&etiommu->domain_pool, etdomain->token);
+	idr_remove(&etiommu->domain_id_pool, etdomain->token);
 	mutex_unlock(&etiommu->pool_lock);
-	iommu_domain_free(etdomain->iommu_domain);
+	edgetpu_domain_pool_free(&etiommu->domain_pool, etdomain->iommu_domain);
 	kfree(etdomain);
 }
 
diff --git a/drivers/edgetpu/edgetpu-internal.h b/drivers/edgetpu/edgetpu-internal.h
index 122a3e7..37ea27f 100644
--- a/drivers/edgetpu/edgetpu-internal.h
+++ b/drivers/edgetpu/edgetpu-internal.h
@@ -226,6 +226,10 @@
 	/* debug dump handlers */
 	edgetpu_debug_dump_handlers *debug_dump_handlers;
 	struct work_struct debug_dump_work;
+
+	struct mutex freq_lock;	/* protects below freq_* variables */
+	uint32_t *freq_table;	/* Array to record reported frequencies by f/w */
+	uint32_t freq_count;	/* Number of entries in freq_table */
 };
 
 struct edgetpu_dev_iface {
diff --git a/drivers/edgetpu/edgetpu-mobile-platform.c b/drivers/edgetpu/edgetpu-mobile-platform.c
index 4e9cac0..af6bcb7 100644
--- a/drivers/edgetpu/edgetpu-mobile-platform.c
+++ b/drivers/edgetpu/edgetpu-mobile-platform.c
@@ -279,6 +279,38 @@
 		edgetpu_unregister_irq(etdev, etmdev->irq[i]);
 }
 
+/*
+ * Fetch and set the firmware context region from device tree.
+ *
+ * Maybe be unused since not all chips need this.
+ */
+static int __maybe_unused
+edgetpu_mobile_platform_set_fw_ctx_memory(struct edgetpu_mobile_platform_dev *etmdev)
+{
+	struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+	struct device *dev = etdev->dev;
+	struct resource r;
+	struct device_node *np;
+	int ret;
+
+	np = of_parse_phandle(dev->of_node, "memory-region", 1);
+	if (!np) {
+		etdev_warn(etdev, "No memory for firmware contexts");
+		return -ENODEV;
+	}
+
+	ret = of_address_to_resource(np, 0, &r);
+	of_node_put(np);
+	if (ret) {
+		etdev_warn(etdev, "No memory address for firmware contexts");
+		return ret;
+	}
+
+	etmdev->fw_ctx_paddr = r.start;
+	etmdev->fw_ctx_size = resource_size(&r);
+	return 0;
+}
+
 static int edgetpu_mobile_platform_probe(struct platform_device *pdev,
 					 struct edgetpu_mobile_platform_dev *etmdev)
 {
diff --git a/drivers/edgetpu/edgetpu-pm.c b/drivers/edgetpu/edgetpu-pm.c
index ae075d0..a71232d 100644
--- a/drivers/edgetpu/edgetpu-pm.c
+++ b/drivers/edgetpu/edgetpu-pm.c
@@ -378,7 +378,8 @@
 		if (NO_WAKELOCK(lc->client->wakelock) ||
 		    !lc->client->wakelock->req_count)
 			continue;
-		etdev_warn_ratelimited(etdev, "pid %d tgid %d count %d\n",
+		etdev_warn_ratelimited(etdev,
+				       "client pid %d tgid %d count %d\n",
 				       lc->client->pid,
 				       lc->client->tgid,
 				       lc->client->wakelock->req_count);
diff --git a/drivers/edgetpu/edgetpu-usage-stats.c b/drivers/edgetpu/edgetpu-usage-stats.c
index 4c6dfcc..ba93d49 100644
--- a/drivers/edgetpu/edgetpu-usage-stats.c
+++ b/drivers/edgetpu/edgetpu-usage-stats.c
@@ -13,16 +13,36 @@
 #include "edgetpu-kci.h"
 #include "edgetpu-usage-stats.h"
 
+/* Max number of frequencies to support */
+#define EDGETPU_MAX_STATES	10
+
 struct uid_entry {
 	int32_t uid;
-	uint64_t time_in_state[EDGETPU_NUM_STATES];
+	uint64_t time_in_state[EDGETPU_MAX_STATES];
 	struct hlist_node node;
 };
 
-static int tpu_state_map(uint32_t state)
+static int tpu_state_map(struct edgetpu_dev *etdev, uint32_t state)
 {
-	int i;
+	int i, idx = 0;
 
+	mutex_lock(&etdev->freq_lock);
+	/* Use frequency table if f/w already reported via usage_stats */
+	if (etdev->freq_table) {
+		for (i = etdev->freq_count - 1; i >= 0; i--) {
+			if (state == etdev->freq_table[i])
+				idx = i;
+		}
+		mutex_unlock(&etdev->freq_lock);
+		return idx;
+	}
+
+	mutex_unlock(&etdev->freq_lock);
+
+	/*
+	 * use predefined state table in case of no f/w reported supported
+	 * frequencies.
+	 */
 	for (i = (EDGETPU_NUM_STATES - 1); i >= 0; i--) {
 		if (state >= edgetpu_active_states[i])
 			return i;
@@ -49,7 +69,7 @@
 {
 	struct edgetpu_usage_stats *ustats = etdev->usage_stats;
 	struct uid_entry *uid_entry;
-	int state = tpu_state_map(tpu_usage->power_state);
+	int state = tpu_state_map(etdev, tpu_usage->power_state);
 
 	if (!ustats)
 		return 0;
@@ -186,6 +206,39 @@
 	mutex_unlock(&ustats->usage_stats_lock);
 }
 
+/* Record new supported frequencies if reported by firmware */
+static void edgetpu_dvfs_frequency_update(struct edgetpu_dev *etdev, uint32_t frequency)
+{
+	uint32_t *freq_table, i;
+
+	mutex_lock(&etdev->freq_lock);
+	if (!etdev->freq_table) {
+		freq_table = kvmalloc(EDGETPU_MAX_STATES * sizeof(uint32_t), GFP_KERNEL);
+		if (!freq_table) {
+			etdev_warn(etdev, "Unable to create supported frequencies table");
+			goto out;
+		}
+		etdev->freq_count = 0;
+		etdev->freq_table = freq_table;
+	}
+
+	freq_table = etdev->freq_table;
+
+	for (i = 0; i < etdev->freq_count; i++) {
+		if (freq_table[i] == frequency)
+			goto out;
+	}
+
+	if (etdev->freq_count >= EDGETPU_MAX_STATES) {
+		etdev_warn(etdev, "Unable to record supported frequencies");
+		goto out;
+	}
+
+	freq_table[etdev->freq_count++] = frequency;
+out:
+	mutex_unlock(&etdev->freq_lock);
+}
+
 void edgetpu_usage_stats_process_buffer(struct edgetpu_dev *etdev, void *buf)
 {
 	struct edgetpu_usage_header *header = buf;
@@ -221,6 +274,10 @@
 			edgetpu_thread_stats_update(
 				etdev, &metric->thread_stats);
 			break;
+		case EDGETPU_METRIC_TYPE_DVFS_FREQUENCY_INFO:
+			edgetpu_dvfs_frequency_update(
+				etdev, metric->dvfs_frequency_info);
+			break;
 		default:
 			etdev_dbg(etdev, "%s: %d: skip unknown type=%u",
 				  __func__, i, metric->type);
@@ -294,9 +351,18 @@
 	/* uid: state0speed state1speed ... */
 	ret += scnprintf(buf, PAGE_SIZE, "uid:");
 
-	for (i = 0; i < EDGETPU_NUM_STATES; i++)
-		ret += scnprintf(buf + ret, PAGE_SIZE - ret, " %d",
-				 edgetpu_states_display[i]);
+	mutex_lock(&etdev->freq_lock);
+	if (!etdev->freq_table) {
+		mutex_unlock(&etdev->freq_lock);
+		for (i = 0; i < EDGETPU_NUM_STATES; i++)
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret, " %d",
+					 edgetpu_states_display[i]);
+	} else {
+		for (i = 0; i < etdev->freq_count; i++)
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret, " %d",
+					 etdev->freq_table[i]);
+		mutex_unlock(&etdev->freq_lock);
+	}
 
 	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
 
@@ -570,6 +636,69 @@
 static DEVICE_ATTR(hardware_preempt_count, 0664, hardware_preempt_count_show,
 		   hardware_preempt_count_store);
 
+static ssize_t hardware_ctx_save_time_show(struct device *dev, struct device_attribute *attr,
+					   char *buf)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+	int64_t val;
+
+	val = edgetpu_usage_get_counter(etdev, EDGETPU_COUNTER_HARDWARE_CTX_SAVE_TIME_US);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+}
+
+static ssize_t hardware_ctx_save_time_store(struct device *dev, struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+	edgetpu_counter_clear(etdev, EDGETPU_COUNTER_HARDWARE_CTX_SAVE_TIME_US);
+	return count;
+}
+static DEVICE_ATTR(hardware_ctx_save_time, 0664, hardware_ctx_save_time_show,
+		   hardware_ctx_save_time_store);
+
+static ssize_t scalar_fence_wait_time_show(struct device *dev, struct device_attribute *attr,
+					   char *buf)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+	int64_t val;
+
+	val = edgetpu_usage_get_counter(etdev, EDGETPU_COUNTER_SCALAR_FENCE_WAIT_TIME_US);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+}
+
+static ssize_t scalar_fence_wait_time_store(struct device *dev, struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+	edgetpu_counter_clear(etdev, EDGETPU_COUNTER_SCALAR_FENCE_WAIT_TIME_US);
+	return count;
+}
+static DEVICE_ATTR(scalar_fence_wait_time, 0664, scalar_fence_wait_time_show,
+		   scalar_fence_wait_time_store);
+
+static ssize_t long_suspend_count_show(struct device *dev, struct device_attribute *attr,
+					   char *buf)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+	int64_t val;
+
+	val = edgetpu_usage_get_counter(etdev, EDGETPU_COUNTER_LONG_SUSPEND);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+}
+
+static ssize_t long_suspend_count_store(struct device *dev, struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+
+	edgetpu_counter_clear(etdev, EDGETPU_COUNTER_LONG_SUSPEND);
+	return count;
+}
+static DEVICE_ATTR(long_suspend_count, 0664, long_suspend_count_show,
+		   long_suspend_count_store);
+
 static ssize_t outstanding_commands_max_show(
 	struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -629,6 +758,93 @@
 static DEVICE_ATTR(preempt_depth_max, 0664, preempt_depth_max_show,
 		   preempt_depth_max_store);
 
+static ssize_t hardware_ctx_save_time_max_show(
+	struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+	int64_t val;
+
+	val = edgetpu_usage_get_max_watermark(
+			etdev, EDGETPU_MAX_WATERMARK_HARDWARE_CTX_SAVE_TIME_US);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+}
+
+static ssize_t hardware_ctx_save_time_max_store(
+	struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+	struct edgetpu_usage_stats *ustats = etdev->usage_stats;
+
+	if (ustats) {
+		mutex_lock(&ustats->usage_stats_lock);
+		ustats->max_watermark[EDGETPU_MAX_WATERMARK_HARDWARE_CTX_SAVE_TIME_US] = 0;
+		mutex_unlock(&ustats->usage_stats_lock);
+	}
+
+	return count;
+}
+static DEVICE_ATTR(hardware_ctx_save_time_max, 0664, hardware_ctx_save_time_max_show,
+		   hardware_ctx_save_time_max_store);
+
+static ssize_t scalar_fence_wait_time_max_show(
+	struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+	int64_t val;
+
+	val = edgetpu_usage_get_max_watermark(
+			etdev, EDGETPU_MAX_WATERMARK_SCALAR_FENCE_WAIT_TIME_US);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+}
+
+static ssize_t scalar_fence_wait_time_max_store(
+	struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+	struct edgetpu_usage_stats *ustats = etdev->usage_stats;
+
+	if (ustats) {
+		mutex_lock(&ustats->usage_stats_lock);
+		ustats->max_watermark[EDGETPU_MAX_WATERMARK_SCALAR_FENCE_WAIT_TIME_US] = 0;
+		mutex_unlock(&ustats->usage_stats_lock);
+	}
+
+	return count;
+}
+static DEVICE_ATTR(scalar_fence_wait_time_max, 0664, scalar_fence_wait_time_max_show,
+		   scalar_fence_wait_time_max_store);
+
+static ssize_t suspend_time_max_show(
+	struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+	int64_t val;
+
+	val = edgetpu_usage_get_max_watermark(
+			etdev, EDGETPU_MAX_WATERMARK_SUSPEND_TIME_US);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+}
+
+static ssize_t suspend_time_max_store(
+	struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+	struct edgetpu_usage_stats *ustats = etdev->usage_stats;
+
+	if (ustats) {
+		mutex_lock(&ustats->usage_stats_lock);
+		ustats->max_watermark[EDGETPU_MAX_WATERMARK_SUSPEND_TIME_US] = 0;
+		mutex_unlock(&ustats->usage_stats_lock);
+	}
+
+	return count;
+}
+static DEVICE_ATTR(suspend_time_max, 0664, suspend_time_max_show,
+		   suspend_time_max_store);
+
 static ssize_t fw_thread_stats_show(
 	struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -681,8 +897,14 @@
 	&dev_attr_param_cache_miss_count.attr,
 	&dev_attr_context_preempt_count.attr,
 	&dev_attr_hardware_preempt_count.attr,
+	&dev_attr_hardware_ctx_save_time.attr,
+	&dev_attr_scalar_fence_wait_time.attr,
+	&dev_attr_long_suspend_count.attr,
 	&dev_attr_outstanding_commands_max.attr,
 	&dev_attr_preempt_depth_max.attr,
+	&dev_attr_hardware_ctx_save_time_max.attr,
+	&dev_attr_scalar_fence_wait_time_max.attr,
+	&dev_attr_suspend_time_max.attr,
 	&dev_attr_fw_thread_stats.attr,
 	NULL,
 };
@@ -721,6 +943,13 @@
 	if (ustats) {
 		usage_stats_remove_uids(ustats);
 		device_remove_group(etdev->dev, &usage_stats_attr_group);
+		/* free the frequency table if allocated */
+		mutex_lock(&etdev->freq_lock);
+		if (etdev->freq_table)
+			kvfree(etdev->freq_table);
+		etdev->freq_table = NULL;
+		etdev->freq_count = 0;
+		mutex_unlock(&etdev->freq_lock);
 	}
 
 	etdev_dbg(etdev, "%s exit\n", __func__);
diff --git a/drivers/edgetpu/edgetpu-usage-stats.h b/drivers/edgetpu/edgetpu-usage-stats.h
index c76899a..a60b107 100644
--- a/drivers/edgetpu/edgetpu-usage-stats.h
+++ b/drivers/edgetpu/edgetpu-usage-stats.h
@@ -74,8 +74,14 @@
 	EDGETPU_COUNTER_CONTEXT_PREEMPTS = 6,
 	/* Number of times a hardware preemption occurred. */
 	EDGETPU_COUNTER_HARDWARE_PREEMPTS = 7,
+	/* Total time(us) spent in saving hw ctx during hw preemption */
+	EDGETPU_COUNTER_HARDWARE_CTX_SAVE_TIME_US = 8,
+	/* Total time(us) spent in waiting to hit scalar fence during hw preemption */
+	EDGETPU_COUNTER_SCALAR_FENCE_WAIT_TIME_US = 9,
+	/* Number of times (firmware)suspend function takes longer than SLA time. */
+	EDGETPU_COUNTER_LONG_SUSPEND = 10,
 
-	EDGETPU_COUNTER_COUNT = 8, /* number of counters above */
+	EDGETPU_COUNTER_COUNT = 11, /* number of counters above */
 };
 
 /* Generic counter. Only reported if it has a value larger than 0. */
@@ -94,9 +100,15 @@
 	EDGETPU_MAX_WATERMARK_OUT_CMDS = 0,
 	/* Number of preempted contexts at any given time. */
 	EDGETPU_MAX_WATERMARK_PREEMPT_DEPTH = 1,
+	/* Max time(us) spent in saving hw ctx during hw preemption */
+	EDGETPU_MAX_WATERMARK_HARDWARE_CTX_SAVE_TIME_US = 2,
+	/* Max time(us) spent in waiting to hit scalar fence during hw preemption */
+	EDGETPU_MAX_WATERMARK_SCALAR_FENCE_WAIT_TIME_US = 3,
+	/* Max time(us) spent during (firmware)suspend function. */
+	EDGETPU_MAX_WATERMARK_SUSPEND_TIME_US = 4,
 
 	/* Number of watermark types above */
-	EDGETPU_MAX_WATERMARK_TYPE_COUNT = 2,
+	EDGETPU_MAX_WATERMARK_TYPE_COUNT = 5,
 };
 
 /* Max watermark. Only reported if it has a value larger than 0. */
@@ -138,6 +150,7 @@
 	EDGETPU_METRIC_TYPE_COUNTER = 3,
 	EDGETPU_METRIC_TYPE_THREAD_STATS = 4,
 	EDGETPU_METRIC_TYPE_MAX_WATERMARK = 5,
+	EDGETPU_METRIC_TYPE_DVFS_FREQUENCY_INFO = 6,
 };
 
 /*
@@ -153,6 +166,7 @@
 		struct edgetpu_usage_counter counter;
 		struct edgetpu_thread_stats thread_stats;
 		struct edgetpu_usage_max_watermark max_watermark;
+		uint32_t dvfs_frequency_info;
 	};
 };
 
diff --git a/drivers/edgetpu/mobile-debug-dump.c b/drivers/edgetpu/mobile-debug-dump.c
index 4ef02ac..b6b7c2a 100644
--- a/drivers/edgetpu/mobile-debug-dump.c
+++ b/drivers/edgetpu/mobile-debug-dump.c
@@ -518,6 +518,7 @@
 		return -ENOMEM;
 	etdev->debug_dump_handlers[DUMP_REASON_REQ_BY_USER] = mobile_sscd_generate_coredump;
 	etdev->debug_dump_handlers[DUMP_REASON_RECOVERABLE_FAULT] = mobile_sscd_generate_coredump;
+	etdev->debug_dump_handlers[DUMP_REASON_FW_CHECKPOINT] = mobile_sscd_generate_coredump;
 
 	pdev->sscd_info.pdata = &sscd_pdata;
 	pdev->sscd_info.dev = &sscd_dev;