blob: 5189fa819b601a41e32f1e6e822efeb764bb37b5 [file] [log] [blame]
Rob Herring786a7672011-12-09 17:58:35 +01001/*
2 * Copyright 2011 Calxeda, Inc.
3 * Based on PPC version Copyright 2007 MontaVista Software, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17#ifndef ASM_EDAC_H
18#define ASM_EDAC_H
19/*
20 * ECC atomic, DMA, SMP and interrupt safe scrub function.
Borislav Petkovb01aec92015-05-21 19:59:31 +020021 * Implements the per arch edac_atomic_scrub() that EDAC use for software
Rob Herring786a7672011-12-09 17:58:35 +010022 * ECC scrubbing. It reads memory and then writes back the original
23 * value, allowing the hardware to detect and correct memory errors.
24 */
Borislav Petkovb01aec92015-05-21 19:59:31 +020025
26static inline void edac_atomic_scrub(void *va, u32 size)
Rob Herring786a7672011-12-09 17:58:35 +010027{
28#if __LINUX_ARM_ARCH__ >= 6
29 unsigned int *virt_addr = va;
30 unsigned int temp, temp2;
31 unsigned int i;
32
33 for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
34 /* Very carefully read and write to memory atomically
35 * so we are interrupt, DMA and SMP safe.
36 */
37 __asm__ __volatile__("\n"
38 "1: ldrex %0, [%2]\n"
39 " strex %1, %0, [%2]\n"
40 " teq %1, #0\n"
41 " bne 1b\n"
42 : "=&r"(temp), "=&r"(temp2)
43 : "r"(virt_addr)
44 : "cc");
45 }
46#endif
47}
48
49#endif