2 * Copyright (c) 2015, Sony Mobile Communications AB.
3 * Copyright (c) 2012-2013, 2019 The Linux Foundation. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/hwspinlock.h>
17 #include <linux/module.h>
19 #include <linux/of_address.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/soc/qcom/smem.h>
25 * The Qualcomm shared memory system is a allocate only heap structure that
26 * consists of one of more memory areas that can be accessed by the processors
29 * All systems contains a global heap, accessible by all processors in the SoC,
30 * with a table of contents data structure (@smem_header) at the beginning of
31 * the main shared memory block.
33 * The global header contains meta data for allocations as well as a fixed list
34 * of 512 entries (@smem_global_entry) that can be initialized to reference
35 * parts of the shared memory space.
38 * In addition to this global heap a set of "private" heaps can be set up at
39 * boot time with access restrictions so that only certain processor pairs can
42 * These partitions are referenced from an optional partition table
43 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
44 * partition table entries (@smem_ptable_entry) lists the involved processors
45 * (or hosts) and their location in the main shared memory region.
47 * Each partition starts with a header (@smem_partition_header) that identifies
48 * the partition and holds properties for the two internal memory regions. The
49 * two regions are cached and non-cached memory respectively. Each region
50 * contain a link list of allocation headers (@smem_private_entry) followed by
53 * Items in the non-cached region are allocated from the start of the partition
54 * while items in the cached region are allocated from the end. The free area
55 * is hence the region between the cached and non-cached offsets.
58 * To synchronize allocations in the shared memory heaps a remote spinlock must
59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
65 * Item 3 of the global heap contains an array of versions for the various
66 * software components in the SoC. We verify that the boot loader version is
67 * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
69 #define SMEM_ITEM_VERSION 3
70 #define SMEM_MASTER_SBL_VERSION_INDEX 7
71 #define SMEM_EXPECTED_VERSION 11
74 * The first 8 items are only to be allocated by the boot loader while
75 * initializing the heap.
77 #define SMEM_ITEM_LAST_FIXED 8
79 /* Highest accepted item number, for both global and private heaps */
80 #define SMEM_ITEM_COUNT 512
82 /* Processor/host identifier for the application processor */
83 #define SMEM_HOST_APPS 0
85 /* Max number of processors/hosts in a system */
86 #define SMEM_HOST_COUNT 9
89 * struct smem_proc_comm - proc_comm communication struct (legacy)
90 * @command: current command to be executed
91 * @status: status of the currently requested command
92 * @params: parameters to the command
94 struct smem_proc_comm {
101 * struct smem_global_entry - entry to reference smem items on the heap
102 * @allocated: boolean to indicate if this entry is used
103 * @offset: offset to the allocated space
104 * @size: size of the allocated space, 8 byte aligned
105 * @aux_base: base address for the memory region used by this unit, or 0 for
106 * the default region. bits 0,1 are reserved
108 struct smem_global_entry {
112 __le32 aux_base; /* bits 1:0 reserved */
114 #define AUX_BASE_MASK 0xfffffffc
117 * struct smem_header - header found in beginning of primary smem region
118 * @proc_comm: proc_comm communication interface (legacy)
119 * @version: array of versions for the various subsystems
120 * @initialized: boolean to indicate that smem is initialized
121 * @free_offset: index of the first unallocated byte in smem
122 * @available: number of bytes available for allocation
123 * @reserved: reserved field, must be 0
124 * toc: array of references to items
127 struct smem_proc_comm proc_comm[4];
133 struct smem_global_entry toc[SMEM_ITEM_COUNT];
137 * struct smem_ptable_entry - one entry in the @smem_ptable list
138 * @offset: offset, within the main shared memory region, of the partition
139 * @size: size of the partition
140 * @flags: flags for the partition (currently unused)
141 * @host0: first processor/host with access to this partition
142 * @host1: second processor/host with access to this partition
143 * @reserved: reserved entries for later use
145 struct smem_ptable_entry {
155 * struct smem_ptable - partition table for the private partitions
156 * @magic: magic number, must be SMEM_PTABLE_MAGIC
157 * @version: version of the partition table
158 * @num_entries: number of partitions in the table
159 * @reserved: for now reserved entries
160 * @entry: list of @smem_ptable_entry for the @num_entries partitions
167 struct smem_ptable_entry entry[];
170 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
173 * struct smem_partition_header - header of the partitions
174 * @magic: magic number, must be SMEM_PART_MAGIC
175 * @host0: first processor/host with access to this partition
176 * @host1: second processor/host with access to this partition
177 * @size: size of the partition
178 * @offset_free_uncached: offset to the first free byte of uncached memory in
180 * @offset_free_cached: offset to the first free byte of cached memory in this
182 * @reserved: for now reserved entries
184 struct smem_partition_header {
189 __le32 offset_free_uncached;
190 __le32 offset_free_cached;
194 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
197 * struct smem_private_entry - header of each item in the private partition
198 * @canary: magic number, must be SMEM_PRIVATE_CANARY
199 * @item: identifying number of the smem item
200 * @size: size of the data, including padding bytes
201 * @padding_data: number of bytes of padding of data
202 * @padding_hdr: number of bytes of padding between the header and the data
203 * @reserved: for now reserved entry
205 struct smem_private_entry {
206 u16 canary; /* bytes are the same so no swapping needed */
208 __le32 size; /* includes padding bytes */
213 #define SMEM_PRIVATE_CANARY 0xa5a5
216 * struct smem_region - representation of a chunk of memory used for smem
217 * @aux_base: identifier of aux_mem base
218 * @virt_base: virtual base address of memory with this aux_mem identifier
219 * @size: size of the memory region
223 void __iomem *virt_base;
228 * struct qcom_smem - device data for the smem device
229 * @dev: device pointer
230 * @hwlock: reference to a hwspinlock
231 * @ptable_entries: list of pointers to partitions table entry of current
233 * @num_regions: number of @regions
234 * @regions: list of the memory regions defining the shared memory
239 struct hwspinlock *hwlock;
241 struct smem_ptable_entry *ptable_entries[SMEM_HOST_COUNT];
243 unsigned num_regions;
244 struct smem_region regions[0];
247 /* Pointer to the one and only smem handle */
248 static struct qcom_smem *__smem;
250 /* Timeout (ms) for the trylock of remote spinlocks */
251 #define HWSPINLOCK_TIMEOUT 1000
253 static struct smem_partition_header *
254 ptable_entry_to_phdr(struct smem_ptable_entry *entry)
256 return __smem->regions[0].virt_base + le32_to_cpu(entry->offset);
259 static struct smem_private_entry *
260 phdr_to_last_private_entry(struct smem_partition_header *phdr)
264 return p + le32_to_cpu(phdr->offset_free_uncached);
267 static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
271 return p + le32_to_cpu(phdr->offset_free_cached);
274 static struct smem_private_entry *
275 phdr_to_first_private_entry(struct smem_partition_header *phdr)
279 return p + sizeof(*phdr);
282 static struct smem_private_entry *
283 private_entry_next(struct smem_private_entry *e)
287 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
288 le32_to_cpu(e->size);
291 static void *entry_to_item(struct smem_private_entry *e)
295 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
298 static int qcom_smem_alloc_private(struct qcom_smem *smem,
299 struct smem_ptable_entry *entry,
303 struct smem_partition_header *phdr;
304 struct smem_private_entry *hdr, *end;
305 struct smem_partition_header *phdr;
310 phdr = ptable_entry_to_phdr(entry);
311 p_end = (void *)phdr + le32_to_cpu(entry->size);
313 hdr = phdr_to_first_private_entry(phdr);
314 end = phdr_to_last_private_entry(phdr);
315 cached = phdr_to_first_cached_entry(phdr);
317 if (WARN_ON((void *)end > p_end || (void *)cached > p_end))
321 if (hdr->canary != SMEM_PRIVATE_CANARY) {
323 "Found invalid canary in host %d:%d partition\n",
324 phdr->host0, phdr->host1);
328 if (le16_to_cpu(hdr->item) == item)
331 hdr = private_entry_next(hdr);
333 if (WARN_ON((void *)hdr > p_end))
336 /* Check that we don't grow into the cached region */
337 alloc_size = sizeof(*hdr) + ALIGN(size, 8);
338 if ((void *)hdr + alloc_size >= cached) {
339 dev_err(smem->dev, "Out of memory\n");
343 hdr->canary = SMEM_PRIVATE_CANARY;
344 hdr->item = cpu_to_le16(item);
345 hdr->size = cpu_to_le32(ALIGN(size, 8));
346 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
347 hdr->padding_hdr = 0;
350 * Ensure the header is written before we advance the free offset, so
351 * that remote processors that does not take the remote spinlock still
352 * gets a consistent view of the linked list.
355 le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
360 static int qcom_smem_alloc_global(struct qcom_smem *smem,
364 struct smem_header *header;
365 struct smem_global_entry *entry;
367 if (WARN_ON(item >= SMEM_ITEM_COUNT))
370 header = smem->regions[0].virt_base;
371 entry = &header->toc[item];
372 if (entry->allocated)
375 size = ALIGN(size, 8);
376 if (WARN_ON(size > le32_to_cpu(header->available)))
379 entry->offset = header->free_offset;
380 entry->size = cpu_to_le32(size);
383 * Ensure the header is consistent before we mark the item allocated,
384 * so that remote processors will get a consistent view of the item
385 * even though they do not take the spinlock on read.
388 entry->allocated = cpu_to_le32(1);
390 le32_add_cpu(&header->free_offset, size);
391 le32_add_cpu(&header->available, -size);
397 * qcom_smem_alloc() - allocate space for a smem item
398 * @host: remote processor id, or -1
399 * @item: smem item handle
400 * @size: number of bytes to be allocated
402 * Allocate space for a given smem item of size @size, given that the item is
405 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
407 struct smem_ptable_entry *entry;
412 return -EPROBE_DEFER;
414 if (item < SMEM_ITEM_LAST_FIXED) {
416 "Rejecting allocation of static entry %d\n", item);
420 ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
426 if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
427 entry = __smem->ptable_entries[host];
428 ret = qcom_smem_alloc_private(__smem, entry, item, size);
430 ret = qcom_smem_alloc_global(__smem, item, size);
433 hwspin_unlock_irqrestore(__smem->hwlock, &flags);
437 EXPORT_SYMBOL(qcom_smem_alloc);
439 static void *qcom_smem_get_global(struct qcom_smem *smem,
443 struct smem_global_entry *entry;
444 struct smem_header *header;
445 struct smem_region *area;
451 if (WARN_ON(item >= SMEM_ITEM_COUNT))
452 return ERR_PTR(-EINVAL);
454 header = smem->regions[0].virt_base;
455 entry = &header->toc[item];
456 if (!entry->allocated)
457 return ERR_PTR(-ENXIO);
459 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
461 for (i = 0; i < smem->num_regions; i++) {
462 area = &smem->regions[i];
464 if (area->aux_base == aux_base || !aux_base) {
465 e_size = le32_to_cpu(entry->size);
466 entry_offset = le32_to_cpu(entry->offset);
468 if (WARN_ON(e_size + entry_offset > area->size))
469 return ERR_PTR(-EINVAL);
474 return area->virt_base + entry_offset;
478 return ERR_PTR(-ENOENT);
481 static void *qcom_smem_get_private(struct qcom_smem *smem,
482 struct smem_ptable_entry *entry,
486 struct smem_partition_header *phdr;
487 struct smem_private_entry *e, *end;
488 void *item_ptr, *p_end;
493 phdr = ptable_entry_to_phdr(entry);
494 partition_size = le32_to_cpu(entry->size);
495 p_end = (void *)phdr + partition_size;
497 e = phdr_to_first_private_entry(phdr);
498 end = phdr_to_last_private_entry(phdr);
500 if (WARN_ON((void *)end > p_end))
501 return ERR_PTR(-EINVAL);
504 if (e->canary != SMEM_PRIVATE_CANARY) {
506 "Found invalid canary in host %d:%d partition\n",
507 phdr->host0, phdr->host1);
508 return ERR_PTR(-EINVAL);
511 if (le16_to_cpu(e->item) == item) {
513 e_size = le32_to_cpu(e->size);
514 padding_data = le16_to_cpu(e->padding_data);
516 if (e_size < partition_size
517 && padding_data < e_size)
518 *size = e_size - padding_data;
520 return ERR_PTR(-EINVAL);
523 item_ptr = entry_to_item(e);
524 if (WARN_ON(item_ptr > p_end))
525 return ERR_PTR(-EINVAL);
530 e = private_entry_next(e);
532 if (WARN_ON((void *)e > p_end))
533 return ERR_PTR(-EINVAL);
535 return ERR_PTR(-ENOENT);
539 * qcom_smem_get() - resolve ptr of size of a smem item
540 * @host: the remote processor, or -1
541 * @item: smem item handle
542 * @size: pointer to be filled out with size of the item
544 * Looks up smem item and returns pointer to it. Size of smem
545 * item is returned in @size.
547 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
549 struct smem_ptable_entry *entry;
552 void *ptr = ERR_PTR(-EPROBE_DEFER);
557 ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
563 if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
564 entry = __smem->ptable_entries[host];
565 ptr = qcom_smem_get_private(__smem, entry, item, size);
567 ptr = qcom_smem_get_global(__smem, item, size);
569 hwspin_unlock_irqrestore(__smem->hwlock, &flags);
574 EXPORT_SYMBOL(qcom_smem_get);
577 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
578 * @host: the remote processor identifying a partition, or -1
580 * To be used by smem clients as a quick way to determine if any new
581 * allocations has been made.
583 int qcom_smem_get_free_space(unsigned host)
585 struct smem_partition_header *phdr;
586 struct smem_ptable_entry *entry;
587 struct smem_header *header;
591 return -EPROBE_DEFER;
593 if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
594 entry = __smem->ptable_entries[host];
595 phdr = ptable_entry_to_phdr(entry);
597 ret = le32_to_cpu(phdr->offset_free_cached) -
598 le32_to_cpu(phdr->offset_free_uncached);
600 if (ret > le32_to_cpu(entry->size))
603 header = __smem->regions[0].virt_base;
604 ret = le32_to_cpu(header->available);
606 if (ret > __smem->regions[0].size)
612 EXPORT_SYMBOL(qcom_smem_get_free_space);
614 static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
619 versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
620 if (IS_ERR(versions)) {
621 dev_err(smem->dev, "Unable to read the version item\n");
625 if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
626 dev_err(smem->dev, "Version item is too small\n");
630 return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
633 static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
636 struct smem_partition_header *header;
637 struct smem_ptable_entry *entry;
638 struct smem_ptable *ptable;
639 unsigned remote_host;
640 u32 version, host0, host1;
643 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
644 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
647 version = le32_to_cpu(ptable->version);
650 "Unsupported partition header version %d\n", version);
654 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
655 entry = &ptable->entry[i];
656 host0 = le16_to_cpu(entry->host0);
657 host1 = le16_to_cpu(entry->host1);
659 if (host0 != local_host && host1 != local_host)
662 if (!le32_to_cpu(entry->offset))
665 if (!le32_to_cpu(entry->size))
668 if (host0 == local_host)
673 if (remote_host >= SMEM_HOST_COUNT) {
675 "Invalid remote host %d\n",
680 if (smem->ptable_entries[remote_host]) {
682 "Already found a partition for host %d\n",
687 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
688 host0 = le16_to_cpu(header->host0);
689 host1 = le16_to_cpu(header->host1);
691 if (memcmp(header->magic, SMEM_PART_MAGIC,
692 sizeof(header->magic))) {
694 "Partition %d has invalid magic\n", i);
698 if (host0 != local_host && host1 != local_host) {
700 "Partition %d hosts are invalid\n", i);
704 if (host0 != remote_host && host1 != remote_host) {
706 "Partition %d hosts are invalid\n", i);
710 if (header->size != entry->size) {
712 "Partition %d has invalid size\n", i);
716 if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
718 "Partition %d has invalid free pointer\n", i);
722 smem->ptable_entries[remote_host] = entry;
728 static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
729 const char *name, int i)
731 struct device_node *np;
735 np = of_parse_phandle(dev->of_node, name, 0);
737 dev_err(dev, "No %s specified\n", name);
741 ret = of_address_to_resource(np, 0, &r);
746 smem->regions[i].aux_base = (u32)r.start;
747 smem->regions[i].size = resource_size(&r);
748 smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start,
750 if (!smem->regions[i].virt_base)
756 static int qcom_smem_probe(struct platform_device *pdev)
758 struct smem_header *header;
759 struct qcom_smem *smem;
767 if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
770 array_size = num_regions * sizeof(struct smem_region);
771 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
775 smem->dev = &pdev->dev;
776 smem->num_regions = num_regions;
778 ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
782 if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
783 "qcom,rpm-msg-ram", 1)))
786 header = smem->regions[0].virt_base;
787 if (le32_to_cpu(header->initialized) != 1 ||
788 le32_to_cpu(header->reserved)) {
789 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
793 version = qcom_smem_get_sbl_version(smem);
794 if (version >> 16 != SMEM_EXPECTED_VERSION) {
795 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
799 ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
803 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
805 dev_err(&pdev->dev, "failed to retrieve hwlock\n");
809 smem->hwlock = hwspin_lock_request_specific(hwlock_id);
818 static int qcom_smem_remove(struct platform_device *pdev)
820 hwspin_lock_free(__smem->hwlock);
826 static const struct of_device_id qcom_smem_of_match[] = {
827 { .compatible = "qcom,smem" },
830 MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
832 static struct platform_driver qcom_smem_driver = {
833 .probe = qcom_smem_probe,
834 .remove = qcom_smem_remove,
837 .of_match_table = qcom_smem_of_match,
838 .suppress_bind_attrs = true,
842 static int __init qcom_smem_init(void)
844 return platform_driver_register(&qcom_smem_driver);
846 arch_initcall(qcom_smem_init);
848 static void __exit qcom_smem_exit(void)
850 platform_driver_unregister(&qcom_smem_driver);
852 module_exit(qcom_smem_exit)
854 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
855 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
856 MODULE_LICENSE("GPL v2");