9
0
Fork 0

Merge branch 'for-next/misc'

Conflicts:
	lib/Makefile
This commit is contained in:
Sascha Hauer 2014-08-07 06:15:23 +02:00
commit 38c3b2455e
58 changed files with 1843 additions and 791 deletions

View File

@ -8,3 +8,5 @@ Common leds properties
* ``panic`` - LED turns on when barebox panics
* ``net`` - LED indicates network activity
* ``label``: The label for this LED. If omitted, the label is taken
from the node name (excluding the unit address).

View File

@ -2,6 +2,8 @@ config ARM
bool
select HAS_KALLSYMS
select HAS_MODULES
select HAS_DMA
select HAS_CACHE
select HAVE_CONFIGURABLE_TEXT_BASE
select HAVE_PBL_IMAGE
select HAVE_IMAGE_COMPRESSION

View File

@ -5,6 +5,7 @@ config BLACKFIN
select HAS_MODULES
select HAVE_CONFIGURABLE_MEMORY_LAYOUT
select HAVE_CONFIGURABLE_TEXT_BASE
select GENERIC_FIND_NEXT_BIT
default y
config BF561

View File

@ -1,11 +1,4 @@
/*
* barebox - bitops.h Routines for bit operations
*
* Copyright (c) 2005 blackfin.uclinux.org
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
@ -16,349 +9,27 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#ifndef _BLACKFIN_BITOPS_H
#define _BLACKFIN_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*/
#include <asm/byteorder.h>
#include <asm/system.h>
#ifdef __KERNEL__
/*
* Function prototypes to keep gcc -Wall happy
*/
/*
* The __ functions are not atomic
*/
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
static __inline__ unsigned long ffz(unsigned long word)
{
unsigned long result = 0;
while (word & 1) {
result++;
word >>= 1;
}
return result;
}
static __inline__ void set_bit(int nr, volatile void *addr)
{
int *a = (int *) addr;
int mask;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
save_and_cli(flags);
*a |= mask;
restore_flags(flags);
}
static __inline__ void __set_bit(int nr, volatile void *addr)
{
int *a = (int *) addr;
int mask;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
*a |= mask;
}
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
static __inline__ void clear_bit(int nr, volatile void *addr)
{
int *a = (int *) addr;
int mask;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
save_and_cli(flags);
*a &= ~mask;
restore_flags(flags);
}
static __inline__ void change_bit(int nr, volatile void *addr)
{
int mask, flags;
unsigned long *ADDR = (unsigned long *) addr;
ADDR += nr >> 5;
mask = 1 << (nr & 31);
save_and_cli(flags);
*ADDR ^= mask;
restore_flags(flags);
}
static __inline__ void __change_bit(int nr, volatile void *addr)
{
int mask;
unsigned long *ADDR = (unsigned long *) addr;
ADDR += nr >> 5;
mask = 1 << (nr & 31);
*ADDR ^= mask;
}
static __inline__ int test_and_set_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
save_and_cli(flags);
retval = (mask & *a) != 0;
*a |= mask;
restore_flags(flags);
return retval;
}
static __inline__ int __test_and_set_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
retval = (mask & *a) != 0;
*a |= mask;
return retval;
}
static __inline__ int test_and_clear_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
save_and_cli(flags);
retval = (mask & *a) != 0;
*a &= ~mask;
restore_flags(flags);
return retval;
}
static __inline__ int __test_and_clear_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
retval = (mask & *a) != 0;
*a &= ~mask;
return retval;
}
static __inline__ int test_and_change_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
save_and_cli(flags);
retval = (mask & *a) != 0;
*a ^= mask;
restore_flags(flags);
return retval;
}
static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
retval = (mask & *a) != 0;
*a ^= mask;
return retval;
}
/*
* This routine doesn't need to be atomic.
*/
static __inline__ int __constant_test_bit(int nr,
const volatile void *addr)
{
return ((1UL << (nr & 31)) &
(((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
static __inline__ int __test_bit(int nr, volatile void *addr)
{
int *a = (int *) addr;
int mask;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
return ((mask & *a) != 0);
}
#define test_bit(nr,addr) \
(__builtin_constant_p(nr) ? \
__constant_test_bit((nr),(addr)) : \
__test_bit((nr),(addr)))
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL;
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset &= 31UL;
if (offset) {
tmp = *(p++);
tmp |= ~0UL >> (32 - offset);
if (size < 32)
goto found_first;
if (~tmp)
goto found_middle;
size -= 32;
result += 32;
}
while (size & ~31UL) {
if (~(tmp = *(p++)))
goto found_middle;
result += 32;
size -= 32;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp |= ~0UL >> size;
found_middle:
return result + ffz(tmp);
}
#ifndef _ASM_BITOPS_H_
#define _ASM_BITOPS_H_
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ops.h>
static __inline__ int ext2_set_bit(int nr, volatile void *addr)
{
int mask, retval;
unsigned long flags;
volatile unsigned char *ADDR = (unsigned char *) addr;
#define set_bit(x, y) __set_bit(x, y)
#define clear_bit(x, y) __clear_bit(x, y)
#define change_bit(x, y) __change_bit(x, y)
#define test_and_set_bit(x, y) __test_and_set_bit(x, y)
#define test_and_clear_bit(x, y) __test_and_clear_bit(x, y)
#define test_and_change_bit(x, y) __test_and_change_bit(x, y)
ADDR += nr >> 3;
mask = 1 << (nr & 0x07);
save_and_cli(flags);
retval = (mask & *ADDR) != 0;
*ADDR |= mask;
restore_flags(flags);
return retval;
}
static __inline__ int ext2_clear_bit(int nr, volatile void *addr)
{
int mask, retval;
unsigned long flags;
volatile unsigned char *ADDR = (unsigned char *) addr;
ADDR += nr >> 3;
mask = 1 << (nr & 0x07);
save_and_cli(flags);
retval = (mask & *ADDR) != 0;
*ADDR &= ~mask;
restore_flags(flags);
return retval;
}
static __inline__ int ext2_test_bit(int nr, const volatile void *addr)
{
int mask;
const volatile unsigned char *ADDR = (const unsigned char *) addr;
ADDR += nr >> 3;
mask = 1 << (nr & 0x07);
return ((mask & *ADDR) != 0);
}
#define ext2_find_first_zero_bit(addr, size) \
ext2_find_next_zero_bit((addr), (size), 0)
static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
unsigned long size,
unsigned long
offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL;
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset &= 31UL;
if (offset) {
tmp = *(p++);
tmp |= ~0UL >> (32 - offset);
if (size < 32)
goto found_first;
if (~tmp)
goto found_middle;
size -= 32;
result += 32;
}
while (size & ~31UL) {
if (~(tmp = *(p++)))
goto found_middle;
result += 32;
size -= 32;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp |= ~0UL >> size;
found_middle:
return result + ffz(tmp);
}
/* Bitmap functions for the minix filesystem. */
#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
#define minix_set_bit(nr,addr) set_bit(nr,addr)
#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
#define minix_test_bit(nr,addr) test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
#endif
#endif
#endif /* _ASM_BITOPS_H_ */

View File

@ -7,6 +7,7 @@ config ARCH_EFI
select EFI_GUID
select EFI_DEVICEPATH
select PRINTF_UUID
select GENERIC_FIND_NEXT_BIT
config ARCH_TEXT_BASE
hex

View File

@ -12,4 +12,11 @@
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/ops.h>
#define set_bit(x, y) __set_bit(x, y)
#define clear_bit(x, y) __clear_bit(x, y)
#define change_bit(x, y) __change_bit(x, y)
#define test_and_set_bit(x, y) __test_and_set_bit(x, y)
#define test_and_clear_bit(x, y) __test_and_clear_bit(x, y)
#define test_and_change_bit(x, y) __test_and_change_bit(x, y)
#endif

View File

@ -8,6 +8,11 @@
#size-cells = <1>;
ranges;
wdt: wdt@b0002000 {
compatible = "ingenic,jz4740-wdt";
reg = <0xb0002000 0x10>;
};
serial0: serial@b0030000 {
compatible = "ingenic,jz4740-uart";
reg = <0xb0030000 0x20>;

View File

@ -12,20 +12,24 @@
*
*/
/**
* @file
* @brief mips bit operations
*
* This file is required only to make all sources happy including
* 'linux/bitops.h'
*/
#ifndef _ASM_MIPS_BITOPS_H_
#define _ASM_MIPS_BITOPS_H_
#ifndef _ASM_BITOPS_H_
#define _ASM_BITOPS_H_
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ops.h>
#endif /* _ASM_MIPS_BITOPS_H_ */
#define set_bit(x, y) __set_bit(x, y)
#define clear_bit(x, y) __clear_bit(x, y)
#define change_bit(x, y) __change_bit(x, y)
#define test_and_set_bit(x, y) __test_and_set_bit(x, y)
#define test_and_clear_bit(x, y) __test_and_clear_bit(x, y)
#define test_and_change_bit(x, y) __test_and_change_bit(x, y)
#endif /* _ASM_BITOPS_H_ */

View File

@ -6,6 +6,8 @@ config ARCH_TEXT_BASE
config CPU_JZ4755
bool
select WATCHDOG
select WATCHDOG_JZ4740
choice
prompt "Board type"

View File

@ -58,28 +58,6 @@
#define TCU_OSTCSR_RTC_EN (1 << 1) /* select rtcclk as the timer clock input */
#define TCU_OSTCSR_PCK_EN (1 << 0) /* select pclk as the timer clock input */
/*************************************************************************
* WDT (WatchDog Timer)
*************************************************************************/
#define WDT_TDR (WDT_BASE + 0x00)
#define WDT_TCER (WDT_BASE + 0x04)
#define WDT_TCNT (WDT_BASE + 0x08)
#define WDT_TCSR (WDT_BASE + 0x0c)
#define WDT_TCSR_PRESCALE_BIT 3
#define WDT_TCSR_PRESCALE_MASK (0x7 << WDT_TCSR_PRESCALE_BIT)
#define WDT_TCSR_PRESCALE1 (0x0 << WDT_TCSR_PRESCALE_BIT)
#define WDT_TCSR_PRESCALE4 (0x1 << WDT_TCSR_PRESCALE_BIT)
#define WDT_TCSR_PRESCALE16 (0x2 << WDT_TCSR_PRESCALE_BIT)
#define WDT_TCSR_PRESCALE64 (0x3 << WDT_TCSR_PRESCALE_BIT)
#define WDT_TCSR_PRESCALE256 (0x4 << WDT_TCSR_PRESCALE_BIT)
#define WDT_TCSR_PRESCALE1024 (0x5 << WDT_TCSR_PRESCALE_BIT)
#define WDT_TCSR_EXT_EN (1 << 2)
#define WDT_TCSR_RTC_EN (1 << 1)
#define WDT_TCSR_PCK_EN (1 << 0)
#define WDT_TCER_TCEN (1 << 0)
/*************************************************************************
* RTC
*************************************************************************/

View File

@ -24,8 +24,6 @@
#include <io.h>
#include <mach/jz4750d_regs.h>
#define JZ_EXTAL 24000000
static void __noreturn jz4750d_halt(void)
{
while (1) {
@ -39,22 +37,6 @@ static void __noreturn jz4750d_halt(void)
unreachable();
}
void __noreturn reset_cpu(ulong addr)
{
__raw_writew(WDT_TCSR_PRESCALE4 | WDT_TCSR_EXT_EN, (u16 *)WDT_TCSR);
__raw_writew(0, (u16 *)WDT_TCNT);
/* reset after 4ms */
__raw_writew(JZ_EXTAL / 1000, (u16 *)WDT_TDR);
/* enable wdt clock */
__raw_writel(TCU_TSCR_WDTSC, (u32 *)TCU_TSCR);
/* start wdt */
__raw_writeb(WDT_TCER_TCEN, (u8 *)WDT_TCER);
unreachable();
}
EXPORT_SYMBOL(reset_cpu);
void __noreturn poweroff()
{
u32 ctrl;

View File

@ -2,7 +2,9 @@ config NIOS2
bool
select HAS_KALLSYMS
select HAS_MODULES
select HAS_CACHE
select HAVE_CONFIGURABLE_MEMORY_LAYOUT
select GENERIC_FIND_NEXT_BIT
default y
config ARCH_TEXT_BASE

View File

@ -1,9 +1,35 @@
#ifndef _ASM_BITOPS_H
#define _ASM_BITOPS_H
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#ifndef _ASM_BITOPS_H_
#define _ASM_BITOPS_H_
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ops.h>
#endif /* _ASM_BITOPS_H */
#define set_bit(x, y) __set_bit(x, y)
#define clear_bit(x, y) __clear_bit(x, y)
#define change_bit(x, y) __change_bit(x, y)
#define test_and_set_bit(x, y) __test_and_set_bit(x, y)
#define test_and_clear_bit(x, y) __test_and_clear_bit(x, y)
#define test_and_change_bit(x, y) __test_and_change_bit(x, y)
#endif /* _ASM_BITOPS_H_ */

View File

@ -1,7 +1,9 @@
config OPENRISC
bool
select HAS_CACHE
select HAVE_CONFIGURABLE_MEMORY_LAYOUT
select HAVE_DEFAULT_ENVIRONMENT_NEW
select GENERIC_FIND_NEXT_BIT
default y
# not used

View File

@ -1,6 +1,4 @@
/*
* (C) Copyright 2011, Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
@ -11,14 +9,27 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#ifndef __ASM_OPENRISC_BITOPS_H
#define __ASM_OPENRISC_BITOPS_H
#ifndef _ASM_BITOPS_H_
#define _ASM_BITOPS_H_
#define PLATFORM_FLS
#include <asm/bitops/fls.h>
#define PLATFORM_FFS
#include <asm/bitops/ffs.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ops.h>
#endif /* __ASM_GENERIC_BITOPS_H */
#define set_bit(x, y) __set_bit(x, y)
#define clear_bit(x, y) __clear_bit(x, y)
#define change_bit(x, y) __change_bit(x, y)
#define test_and_set_bit(x, y) __test_and_set_bit(x, y)
#define test_and_clear_bit(x, y) __test_and_clear_bit(x, y)
#define test_and_change_bit(x, y) __test_and_change_bit(x, y)
#endif /* _ASM_BITOPS_H_ */

View File

@ -3,6 +3,8 @@ config PPC
select HAVE_CONFIGURABLE_TEXT_BASE
select HAS_KALLSYMS
select HAS_MODULES
select HAS_CACHE
select GENERIC_FIND_NEXT_BIT
select OFTREE
default y

View File

@ -153,6 +153,11 @@ extern __inline__ int ffz(unsigned int x)
return __ilog2(x & -x);
}
static __inline__ int __ffs(unsigned long x)
{
return __ilog2(x & -x);
}
/*
* fls: find last (most-significant) bit set.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
@ -182,49 +187,7 @@ extern __inline__ int ffs(int x)
#endif /* __KERNEL__ */
/*
* This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h.
*/
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
extern __inline__ unsigned long find_next_zero_bit(void * addr,
unsigned long size, unsigned long offset)
{
unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
unsigned int result = offset & ~31UL;
unsigned int tmp;
if (offset >= size)
return size;
size -= result;
offset &= 31UL;
if (offset) {
tmp = *p++;
tmp |= ~0UL >> (32-offset);
if (size < 32)
goto found_first;
if (tmp != ~0U)
goto found_middle;
size -= 32;
result += 32;
}
while (size >= 32) {
if ((tmp = *p++) != ~0U)
goto found_middle;
result += 32;
size -= 32;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp |= ~0UL << size;
found_middle:
return result + ffz(tmp);
}
#include <asm-generic/bitops/find.h>
#define _EXT2_HAVE_ASM_BITOPS_

View File

@ -1,15 +1,28 @@
#ifndef _SANDBOX_BITOPS_H
#define _SANDBOX_BITOPS_H
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#ifndef _ASM_BITOPS_H_
#define _ASM_BITOPS_H_
/* nothing but the defaults.. */
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ops.h>
#define set_bit(x, y) __set_bit(x, y)
@ -19,4 +32,4 @@
#define test_and_clear_bit(x, y) __test_and_clear_bit(x, y)
#define test_and_change_bit(x, y) __test_and_change_bit(x, y)
#endif
#endif /* _ASM_BITOPS_H_ */

View File

@ -20,6 +20,7 @@ config X86
select HAS_MODULES
select HAVE_CONFIGURABLE_MEMORY_LAYOUT
select HAVE_CONFIGURABLE_TEXT_BASE
select GENERIC_FIND_NEXT_BIT
default y
config X86_BOOTLOADER

View File

@ -12,22 +12,24 @@
*
*/
/**
* @file
* @brief x86 bit operations
*
* This file is required only to make all sources happy including
* 'linux/bitops.h'
*/
#ifndef _ASM_BITOPS_H_
#define _ASM_BITOPS_H_
#ifndef _ASM_X86_BITOPS_H_
#define _ASM_X86_BITOPS_H_
#define BITS_PER_LONG 32
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ops.h>
#endif /* _ASM_X86_BITOPS_H_ */
#define set_bit(x, y) __set_bit(x, y)
#define clear_bit(x, y) __clear_bit(x, y)
#define change_bit(x, y) __change_bit(x, y)
#define test_and_set_bit(x, y) __test_and_set_bit(x, y)
#define test_and_clear_bit(x, y) __test_and_clear_bit(x, y)
#define test_and_change_bit(x, y) __test_and_change_bit(x, y)
#endif /* _ASM_BITOPS_H_ */

View File

@ -41,6 +41,8 @@ typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
#define BITS_PER_LONG 32
#endif /* __ASSEMBLY__ */
#endif /* __ASM_X86_TYPES_H */

View File

@ -25,6 +25,22 @@ config ENV_HANDLING
select CRC32
bool
config HAS_CACHE
bool
help
This allows you do run "make ARCH=sandbox allyesconfig".
Drivers that depend on a cache implementation can depend on this
config, so that you don't get a compilation error.
config HAS_DMA
bool
help
This allows you do run "make ARCH=sandbox allyesconfig".
Drivers that depend on a DMA implementation can depend on this
config, so that you don't get a compilation error.
config GENERIC_GPIO
bool
@ -657,6 +673,7 @@ config BAREBOXENV_TARGET
config BAREBOXCRC32_TARGET
bool
prompt "build bareboxcrc32 tool for target"
depends on !SANDBOX
help
'bareboxcrc32' is a userspacetool to generate the crc32 checksums the same way
barebox does. Say yes here to build it for the target.
@ -691,6 +708,7 @@ config COMPILE_LOGLEVEL
5 normal but significant condition (notice)
6 informational (info)
7 debug-level messages (debug)
8 verbose debug messages (vdebug)
config DEFAULT_LOGLEVEL
int "default loglevel"
@ -707,6 +725,7 @@ config DEFAULT_LOGLEVEL
5 normal but significant condition (notice)
6 informational (info)
7 debug-level messages (debug)
8 verbose debug messages (vdebug)
config DEBUG_INFO
bool

View File

@ -1712,29 +1712,6 @@ void *memalign(size_t alignment, size_t bytes)
return chunk2mem(p);
}
#if 0
/*
* valloc just invokes memalign with alignment argument equal
* to the page size of the system (or as near to this as can
* be figured out from all the includes/defines above.)
*/
void *valloc(size_t bytes)
{
return memalign(malloc_getpagesize, bytes);
}
#endif
/*
* pvalloc just invokes valloc for the nearest pagesize
* that will accommodate request
*/
void *pvalloc (size_t bytes)
{
size_t pagesize = malloc_getpagesize;
return memalign(pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
}
/*
*
* calloc calls malloc, then zeroes out the allocated chunk.
@ -1774,115 +1751,6 @@ void *calloc(size_t n, size_t elem_size)
}
}
/*
*
* cfree just calls free. It is needed/defined on some systems
* that pair it with calloc, presumably for odd historical reasons.
*/
#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
void cfree(void *mem)
{
free(mem);
}
#endif
/*
Malloc_trim gives memory back to the system (via negative
arguments to sbrk) if there is unused memory at the `high' end of
the malloc pool. You can call this after freeing large blocks of
memory to potentially reduce the system-level memory requirements
of a program. However, it cannot guarantee to reduce memory. Under
some allocation patterns, some large free blocks of memory will be
locked between two used chunks, so they cannot be given back to
the system.
The `pad' argument to malloc_trim represents the amount of free
trailing space to leave untrimmed. If this argument is zero,
only the minimum amount of memory to maintain internal data
structures will be left (one page or less). Non-zero arguments
can be supplied to maintain enough trailing space to service
future expected allocations without having to re-obtain memory
from the system.
Malloc_trim returns 1 if it actually released any memory, else 0.
*/
#ifdef USE_MALLOC_TRIM
int malloc_trim(size_t pad)
{
long top_size; /* Amount of top-most memory */
long extra; /* Amount to release */
char *current_brk; /* address returned by pre-check sbrk call */
char *new_brk; /* address returned by negative sbrk call */
unsigned long pagesz = malloc_getpagesize;
top_size = chunksize(top);
extra = ((top_size - pad - MINSIZE + (pagesz - 1)) / pagesz -
1) * pagesz;
if (extra < (long)pagesz) /* Not enough memory to release */
return 0;
else {
/* Test to make sure no one else called sbrk */
current_brk = (char*)(sbrk(0));
if (current_brk != (char*)(top) + top_size)
return 0; /* Apparently we don't own memory; must fail */
else {
new_brk = (char *) (sbrk(-extra));
if (new_brk == (char*)(NULL)) { /* sbrk failed? */
/* Try to figure out what we have */
current_brk = (char*)(sbrk (0));
top_size = current_brk - (char*) top;
if (top_size >= (long)MINSIZE) { /* if not, we are very very dead! */
sbrked_mem = current_brk - sbrk_base;
set_head(top, top_size | PREV_INUSE);
}
return 0;
}
else {
/* Success. Adjust top accordingly. */
set_head(top, (top_size - extra) | PREV_INUSE);
sbrked_mem -= extra;
return 1;
}
}
}
}
#endif
/*
* malloc_usable_size:
*
* This routine tells you how many bytes you can actually use in an
* allocated chunk, which may be more than you requested (although
* often not). You can use this many bytes without worrying about
* overwriting other allocated objects. Not a particularly great
* programming practice, but still sometimes useful.
*/
size_t malloc_usable_size(void *mem)
{
mchunkptr p;
if (!mem)
return 0;
else {
p = mem2chunk(mem);
if (!chunk_is_mmapped(p)) {
if (!inuse(p))
return 0;
return chunksize(p) - SIZE_SZ;
}
return chunksize(p) - 2 * SIZE_SZ;
}
}
/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
#ifdef CONFIG_CMD_MEMINFO
@ -1955,43 +1823,6 @@ void malloc_stats(void)
#endif /* CONFIG_CMD_MEMINFO */
/*
mallopt:
mallopt is the general SVID/XPG interface to tunable parameters.
The format is to provide a (parameter-number, parameter-value) pair.
mallopt then sets the corresponding parameter to the argument
value if it can (i.e., so long as the value is meaningful),
and returns 1 if successful else 0.
See descriptions of tunable parameters above.
*/
#ifndef __BAREBOX__
int mallopt(int param_number, int value)
{
switch (param_number) {
case M_TRIM_THRESHOLD:
trim_threshold = value;
return 1;
case M_TOP_PAD:
top_pad = value;
return 1;
case M_MMAP_THRESHOLD:
mmap_threshold = value;
return 1;
case M_MMAP_MAX:
if (value != 0)
return 0;
else
n_mmaps_max = value;
return 1;
default:
return 0;
}
}
#endif
/*
History:

View File

@ -1725,7 +1725,7 @@ static int parse_stream_outer(struct p_context *ctx, struct in_str *inp, int fla
b_free(&temp);
} while (rcode != -1 && !(flag & FLAG_EXIT_FROM_LOOP)); /* loop on syntax errors, return on EOF */
return (code != 0) ? 1 : 0;
return code;
}
static int parse_string_outer(struct p_context *ctx, const char *s, int flag)

View File

@ -71,7 +71,7 @@ struct resource *__request_region(struct resource *parent,
goto ok;
if (start > r->end)
continue;
pr_warn("%s: 0x%08llx:0x%08llx conflicts with 0x%08llx:0x%08llx\n",
debug("%s: 0x%08llx:0x%08llx conflicts with 0x%08llx:0x%08llx\n",
__func__,
(unsigned long long)start,
(unsigned long long)end,

View File

@ -34,6 +34,7 @@ config DISK_ATA
config DISK_AHCI
bool "AHCI support"
depends on HAS_DMA
select DISK_ATA
config DISK_AHCI_IMX

View File

@ -186,7 +186,7 @@ static int clk_divider_set_rate(struct clk *clk, unsigned long rate,
div = clk_divider_bestdiv(clk, rate, &best_parent_rate);
clk_set_rate(clk_get_parent(clk), best_parent_rate);
} else {
div = parent_rate / rate;
div = DIV_ROUND_UP(parent_rate, rate);
}
value = _get_val(divider, div);

View File

@ -1132,7 +1132,7 @@ static struct platform_device_id omap_i2c_ids[] = {
},
};
static __maybe_unused struct of_device_id omap_spi_dt_ids[] = {
static __maybe_unused struct of_device_id omap_i2c_dt_ids[] = {
{
.compatible = "ti,omap3-i2c",
.data = (unsigned long)&omap3_data,
@ -1148,7 +1148,7 @@ static struct driver_d omap_i2c_driver = {
.probe = i2c_omap_probe,
.name = DRIVER_NAME,
.id_table = omap_i2c_ids,
.of_compatible = DRV_OF_COMPAT(omap_spi_dt_ids),
.of_compatible = DRV_OF_COMPAT(omap_i2c_dt_ids),
};
device_platform_driver(omap_i2c_driver);

View File

@ -32,7 +32,7 @@ config KEYBOARD_IMX_KEYPAD
config KEYBOARD_QT1070
tristate "Atmel AT42QT1070 Touch Sensor Chip"
depends on I2C
depends on I2C && GENERIC_GPIO
select POLLER
help
Say Y here if you want to use Atmel AT42QT1070 QTouch

View File

@ -206,13 +206,17 @@ static int led_gpio_of_probe(struct device_d *dev)
struct gpio_led *gled;
enum of_gpio_flags flags;
int gpio;
const char *label;
gpio = of_get_named_gpio_flags(child, "gpios", 0, &flags);
if (gpio < 0)
continue;
gled = xzalloc(sizeof(*gled));
gled->led.name = xstrdup(child->name);
if (of_property_read_string(child, "label", &label))
label = child->name;
gled->led.name = xstrdup(label);
gled->gpio = gpio;
gled->active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;

View File

@ -616,6 +616,9 @@ retry_scr:
break;
}
if (mci->scr[0] & SD_DATA_4BIT)
mci->card_caps |= MMC_CAP_4_BIT_DATA;
/* Version 1.0 doesn't support switching */
if (mci->version == SD_VERSION_1_0)
return 0;
@ -634,9 +637,6 @@ retry_scr:
break;
}
if (mci->scr[0] & SD_DATA_4BIT)
mci->card_caps |= MMC_CAP_4_BIT_DATA;
/* If high-speed isn't supported, we return */
if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
return 0;

View File

@ -123,9 +123,15 @@ static int mc34704_probe(struct device_d *dev)
return 0;
}
static __maybe_unused struct of_device_id mc34704_dt_ids[] = {
{ .compatible = "fsl,mc34704", },
{ }
};
static struct driver_d mc34704_driver = {
.name = DRIVERNAME,
.probe = mc34704_probe,
.of_compatible = DRV_OF_COMPAT(mc34704_dt_ids),
};
static int mc34704_init(void)

View File

@ -796,7 +796,9 @@ static int __init atmel_pmecc_nand_init_params(struct device_d *dev,
switch (mtd->writesize) {
case 2048:
case 4096:
host->pmecc_degree = PMECC_GF_DIMENSION_13;
case 8192:
host->pmecc_degree = (sector_size == 512) ?
PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14;
host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
host->pmecc_sector_number = mtd->writesize / sector_size;
host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes(
@ -1162,7 +1164,12 @@ static int __init atmel_nand_probe(struct device_d *dev)
nand_chip->ecc.mode = NAND_ECC_HW;
}
nand_chip->chip_delay = 20; /* 20us command delay time */
nand_chip->chip_delay = 40; /* 40us command delay time */
if (IS_ENABLED(CONFIG_NAND_ECC_BCH) &&
pdata->ecc_mode == NAND_ECC_SOFT_BCH) {
nand_chip->ecc.mode = NAND_ECC_SOFT_BCH;
}
if (host->board->bus_width_16) { /* 16-bit bus width */
nand_chip->options |= NAND_BUSWIDTH_16;

View File

@ -34,6 +34,7 @@ config DRIVER_NET_AR231X
config DRIVER_NET_ARC_EMAC
bool "ARC Ethernet MAC driver"
depends on HAS_DMA
select PHYLIB
help
This option enables support for the ARC EMAC ethernet
@ -46,6 +47,7 @@ config DRIVER_NET_AT91_ETHER
config DRIVER_NET_CALXEDA_XGMAC
bool "Calxeda xgmac"
depends on HAS_DMA
config DRIVER_NET_CS8900
bool "cs8900 ethernet driver"
@ -63,6 +65,7 @@ config DRIVER_NET_DAVINCI_EMAC
config DRIVER_NET_DESIGNWARE
bool "Designware Universal MAC ethernet driver"
depends on HAS_DMA
select PHYLIB
help
This option enables support for the Synopsys
@ -94,6 +97,7 @@ config DRIVER_NET_EP93XX
config DRIVER_NET_ETHOC
bool "OpenCores ethernet MAC driver"
depends on HAS_CACHE
select PHYLIB
help
This option enables support for the OpenCores 10/100 Mbps

View File

@ -199,19 +199,19 @@ static int gfar_open(struct eth_device *edev)
/* Initialize the Rx Buffer descriptors */
for (ix = 0; ix < RX_BUF_CNT; ix++) {
priv->rxbd[ix].status = RXBD_EMPTY;
priv->rxbd[ix].length = 0;
priv->rxbd[ix].bufPtr = (uint) NetRxPackets[ix];
out_be16(&priv->rxbd[ix].status, RXBD_EMPTY);
out_be16(&priv->rxbd[ix].length, 0);
out_be32(&priv->rxbd[ix].bufPtr, (uint) NetRxPackets[ix]);
}
priv->rxbd[RX_BUF_CNT - 1].status |= RXBD_WRAP;
out_be16(&priv->rxbd[RX_BUF_CNT - 1].status, RXBD_EMPTY | RXBD_WRAP);
/* Initialize the TX Buffer Descriptors */
for (ix = 0; ix < TX_BUF_CNT; ix++) {
priv->txbd[ix].status = 0;
priv->txbd[ix].length = 0;
priv->txbd[ix].bufPtr = 0;
out_be16(&priv->txbd[ix].status, 0);
out_be16(&priv->txbd[ix].length, 0);
out_be32(&priv->txbd[ix].bufPtr, 0);
}
priv->txbd[TX_BUF_CNT - 1].status |= TXBD_WRAP;
out_be16(&priv->txbd[TX_BUF_CNT - 1].status, TXBD_WRAP);
/* Enable Transmit and Receive */
setbits_be32(regs + GFAR_MACCFG1_OFFSET, GFAR_MACCFG1_RX_EN |
@ -366,30 +366,32 @@ static int gfar_send(struct eth_device *edev, void *packet, int length)
struct device_d *dev = edev->parent;
uint64_t start;
uint tidx;
uint16_t status;
tidx = priv->txidx;
priv->txbd[tidx].bufPtr = (uint) packet;
priv->txbd[tidx].length = length;
priv->txbd[tidx].status |= (TXBD_READY | TXBD_LAST |
TXBD_CRC | TXBD_INTERRUPT);
out_be32(&priv->txbd[tidx].bufPtr, (u32) packet);
out_be16(&priv->txbd[tidx].length, length);
out_be16(&priv->txbd[tidx].status,
in_be16(&priv->txbd[tidx].status) |
(TXBD_READY | TXBD_LAST | TXBD_CRC | TXBD_INTERRUPT));
/* Tell the DMA to go */
out_be32(regs + GFAR_TSTAT_OFFSET, GFAR_TSTAT_CLEAR_THALT);
/* Wait for buffer to be transmitted */
start = get_time_ns();
while (priv->txbd[tidx].status & TXBD_READY) {
while (in_be16(&priv->txbd[tidx].status) & TXBD_READY) {
if (is_timeout(start, 5 * MSECOND)) {
break;
}
}
if (priv->txbd[tidx].status & TXBD_READY) {
dev_err(dev, "tx timeout: 0x%x\n", priv->txbd[tidx].status);
status = in_be16(&priv->txbd[tidx].status);
if (status & TXBD_READY) {
dev_err(dev, "tx timeout: 0x%x\n", status);
return -EBUSY;
}
else if (priv->txbd[tidx].status & TXBD_STATS) {
dev_err(dev, "TX error: 0x%x\n", priv->txbd[tidx].status);
} else if (status & TXBD_STATS) {
dev_err(dev, "TX error: 0x%x\n", status);
return -EIO;
}
@ -403,31 +405,28 @@ static int gfar_recv(struct eth_device *edev)
struct gfar_private *priv = edev->priv;
struct device_d *dev = edev->parent;
void __iomem *regs = priv->regs;
int length;
uint16_t status, length;
if (priv->rxbd[priv->rxidx].status & RXBD_EMPTY) {
return 0; /* no data */
}
if (in_be16(&priv->rxbd[priv->rxidx].status) & RXBD_EMPTY)
return 0;
length = priv->rxbd[priv->rxidx].length;
length = in_be16(&priv->rxbd[priv->rxidx].length);
/* Send the packet up if there were no errors */
if (!(priv->rxbd[priv->rxidx].status & RXBD_STATS)) {
status = in_be16(&priv->rxbd[priv->rxidx].status);
if (!(status & RXBD_STATS))
net_receive(edev, NetRxPackets[priv->rxidx], length - 4);
} else {
dev_err(dev, "Got error %x\n",
(priv->rxbd[priv->rxidx].status & RXBD_STATS));
}
else
dev_err(dev, "Got error %x\n", status & RXBD_STATS);
priv->rxbd[priv->rxidx].length = 0;
out_be16(&priv->rxbd[priv->rxidx].length, 0);
status = RXBD_EMPTY;
/* Set the wrap bit if this is the last element in the list */
if ((priv->rxidx + 1) == RX_BUF_CNT)
priv->rxbd[priv->rxidx].status = RXBD_WRAP;
else
priv->rxbd[priv->rxidx].status = 0;
status |= RXBD_WRAP;
priv->rxbd[priv->rxidx].status |= RXBD_EMPTY;
out_be16(&priv->rxbd[priv->rxidx].status, status);
priv->rxidx = (priv->rxidx + 1) % RX_BUF_CNT;
if (in_be32(regs + GFAR_IEVENT_OFFSET) & GFAR_IEVENT_BSY) {
@ -517,8 +516,9 @@ static int gfar_probe(struct device_d *dev)
size = ((TX_BUF_CNT * sizeof(struct txbd8)) +
(RX_BUF_CNT * sizeof(struct rxbd8))) + BUF_ALIGN;
p = (char *)xmemalign(BUF_ALIGN, size);
priv->txbd = (struct txbd8 *)p;
priv->rxbd = (struct rxbd8 *)(p + (TX_BUF_CNT * sizeof(struct txbd8)));
priv->txbd = (struct txbd8 __iomem *)p;
priv->rxbd = (struct rxbd8 __iomem *)(p +
(TX_BUF_CNT * sizeof(struct txbd8)));
edev->priv = priv;
edev->init = gfar_init;

View File

@ -188,15 +188,15 @@
#define RXBD_STATS 0x003f
struct txbd8 {
ushort status; /* Status Fields */
ushort length; /* Buffer length */
uint bufPtr; /* Buffer Pointer */
uint16_t status; /* Status Fields */
uint16_t length; /* Buffer length */
uint32_t bufPtr; /* Buffer Pointer */
};
struct rxbd8 {
ushort status; /* Status Fields */
ushort length; /* Buffer Length */
uint bufPtr; /* Buffer Pointer */
uint16_t status; /* Status Fields */
uint16_t length; /* Buffer Length */
uint32_t bufPtr; /* Buffer Pointer */
};
/* eTSEC general control and status registers */
@ -275,8 +275,8 @@ struct gfar_private {
struct gfar_phy *gfar_mdio;
struct gfar_phy *gfar_tbi;
struct phy_info *phyinfo;
volatile struct txbd8 *txbd;
volatile struct rxbd8 *rxbd;
struct txbd8 __iomem *txbd;
struct rxbd8 __iomem *rxbd;
uint txidx;
uint rxidx;
uint phyaddr;

View File

@ -20,6 +20,8 @@ static const char *nand_ecc_modes[] = {
[NAND_ECC_SOFT] = "soft",
[NAND_ECC_HW] = "hw",
[NAND_ECC_HW_SYNDROME] = "hw_syndrome",
[NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
[NAND_ECC_SOFT_BCH] = "soft_bch",
};
/**

View File

@ -6,6 +6,7 @@ if REGULATOR
config REGULATOR_FIXED
bool "fixed/gpio regulator"
depends on GENERIC_GPIO
help
This enables a simple fixed regulator. It is used for regulators
which are not software controllable or controllable via gpio.

View File

@ -27,6 +27,7 @@
struct regulator_fixed {
int gpio;
int active_low;
int always_on;
struct regulator_dev rdev;
};
@ -44,6 +45,9 @@ static int regulator_fixed_disable(struct regulator_dev *rdev)
{
struct regulator_fixed *fix = container_of(rdev, struct regulator_fixed, rdev);
if (fix->always_on)
return 0;
if (!gpio_is_valid(fix->gpio))
return 0;
@ -79,6 +83,11 @@ static int regulator_fixed_probe(struct device_d *dev)
fix->rdev.ops = &fixed_ops;
if (of_find_property(dev->device_node, "regulator-always-on", NULL)) {
fix->always_on = 1;
regulator_fixed_enable(&fix->rdev);
}
ret = of_regulator_register(&fix->rdev, dev->device_node);
if (ret)
return ret;

View File

@ -109,14 +109,6 @@ static unsigned n_ports;
#define GS_CLOSE_TIMEOUT 15 /* seconds */
#ifdef VERBOSE_DEBUG
#define pr_vdebug(fmt, arg...) \
pr_debug(fmt, ##arg)
#else
#define pr_vdebug(fmt, arg...) \
({ if (0) pr_debug(fmt, ##arg); })
#endif
static unsigned gs_start_rx(struct gs_port *port)
{
struct list_head *pool = &port->read_pool;

View File

@ -1,5 +1,6 @@
config USB_EHCI
bool "EHCI driver"
depends on HAS_DMA
config USB_EHCI_OMAP
depends on ARCH_OMAP3

View File

@ -1,5 +1,6 @@
config DRIVER_VIDEO_IMX_IPUV3
bool "i.MX IPUv3 driver"
depends on ARCH_IMX
help
Support the IPUv3 found on Freescale i.MX51/53/6 SoCs

View File

@ -21,4 +21,11 @@ config WATCHDOG_IMX
depends on ARCH_IMX
help
Add support for watchdog found on Freescale i.MX SoCs.
config WATCHDOG_JZ4740
bool "Ingenic jz4740 SoC hardware watchdog"
depends on MACH_MIPS_XBURST
help
Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs.
endif

View File

@ -1,3 +1,4 @@
obj-$(CONFIG_WATCHDOG) += wd_core.o
obj-$(CONFIG_WATCHDOG_MXS28) += im28wd.o
obj-$(CONFIG_WATCHDOG_JZ4740) += jz4740.o
obj-$(CONFIG_WATCHDOG_IMX_RESET_SOURCE) += imxwd.o

View File

@ -159,6 +159,10 @@ static int imx_wd_probe(struct device_d *dev)
priv = xzalloc(sizeof(struct imx_wd));
priv->base = dev_request_mem_region(dev, 0);
if (!priv->base) {
dev_err(dev, "could not get memory region\n");
return -ENODEV;
}
priv->set_timeout = fn;
priv->wd.set_timeout = imx_watchdog_set_timeout;
priv->dev = dev;

102
drivers/watchdog/jz4740.c Normal file
View File

@ -0,0 +1,102 @@
/*
* JZ4740 Watchdog driver
*
* Copyright (C) 2014 Antony Pavlov <antonynpavlov@gmail.com>
*
* Based on jz4740_wdt.c from linux-3.15.
*
* Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <common.h>
#include <init.h>
#include <io.h>
#define JZ_REG_WDT_TIMER_DATA 0x0
#define JZ_REG_WDT_COUNTER_ENABLE 0x4
#define JZ_REG_WDT_TIMER_COUNTER 0x8
#define JZ_REG_WDT_TIMER_CONTROL 0xC
#define JZ_WDT_CLOCK_PCLK 0x1
#define JZ_WDT_CLOCK_RTC 0x2
#define JZ_WDT_CLOCK_EXT 0x4
#define JZ_WDT_CLOCK_DIV_SHIFT 3
#define JZ_WDT_CLOCK_DIV_1 (0 << JZ_WDT_CLOCK_DIV_SHIFT)
#define JZ_WDT_CLOCK_DIV_4 (1 << JZ_WDT_CLOCK_DIV_SHIFT)
#define JZ_WDT_CLOCK_DIV_16 (2 << JZ_WDT_CLOCK_DIV_SHIFT)
#define JZ_WDT_CLOCK_DIV_64 (3 << JZ_WDT_CLOCK_DIV_SHIFT)
#define JZ_WDT_CLOCK_DIV_256 (4 << JZ_WDT_CLOCK_DIV_SHIFT)
#define JZ_WDT_CLOCK_DIV_1024 (5 << JZ_WDT_CLOCK_DIV_SHIFT)
#define JZ_EXTAL 24000000
struct jz4740_wdt_drvdata {
void __iomem *base;
};
static struct jz4740_wdt_drvdata *reset_wd;
void __noreturn reset_cpu(unsigned long addr)
{
if (reset_wd) {
void __iomem *base = reset_wd->base;
writew(JZ_WDT_CLOCK_DIV_4 | JZ_WDT_CLOCK_EXT,
base + JZ_REG_WDT_TIMER_CONTROL);
writew(0, base + JZ_REG_WDT_TIMER_COUNTER);
/* reset after 4ms */
writew(JZ_EXTAL / 1000, base + JZ_REG_WDT_TIMER_DATA);
/* start wdt */
writeb(0x1, base + JZ_REG_WDT_COUNTER_ENABLE);
mdelay(1000);
} else
pr_err("%s: can't reset cpu\n", __func__);
hang();
}
EXPORT_SYMBOL(reset_cpu);
static int jz4740_wdt_probe(struct device_d *dev)
{
struct jz4740_wdt_drvdata *priv;
priv = xzalloc(sizeof(struct jz4740_wdt_drvdata));
priv->base = dev_request_mem_region(dev, 0);
if (!priv->base) {
dev_err(dev, "could not get memory region\n");
return -ENODEV;
}
if (!reset_wd)
reset_wd = priv;
dev->priv = priv;
return 0;
}
static __maybe_unused struct of_device_id jz4740_wdt_dt_ids[] = {
{
.compatible = "ingenic,jz4740-wdt",
}, {
/* sentinel */
}
};
static struct driver_d jz4740_wdt_driver = {
.name = "jz4740-wdt",
.probe = jz4740_wdt_probe,
.of_compatible = DRV_OF_COMPAT(jz4740_wdt_dt_ids),
};
device_platform_driver(jz4740_wdt_driver);

View File

@ -32,4 +32,19 @@ static inline unsigned int hweight8(unsigned int w)
return (res & 0x0F) + ((res >> 4) & 0x0F);
}
static inline unsigned long hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
return hweight32((unsigned int)(w >> 32)) +
hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
res = res + (res >> 8);
res = res + (res >> 16);
return (res + (res >> 32)) & 0x00000000000000FFul;
#endif
}
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */

View File

@ -1,6 +1,8 @@
#ifndef __INCLUDE_BBU_H
#define __INCLUDE_BBU_H
#include <asm-generic/errno.h>
struct bbu_data {
#define BBU_FLAG_FORCE (1 << 0)
#define BBU_FLAG_YES (1 << 1)

285
include/linux/bitmap.h Normal file
View File

@ -0,0 +1,285 @@
#ifndef __LINUX_BITMAP_H
#define __LINUX_BITMAP_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/kernel.h>
/*
* bitmaps provide bit arrays that consume one or more unsigned
* longs. The bitmap interface and available operations are listed
* here, in bitmap.h
*
* Function implementations generic to all architectures are in
* lib/bitmap.c. Functions implementations that are architecture
* specific are in various include/asm-<arch>/bitops.h headers
* and other arch/<arch> specific files.
*
* See lib/bitmap.c for more details.
*/
/*
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
*
* bitmap_zero(dst, nbits) *dst = 0UL
* bitmap_fill(dst, nbits) *dst = ~0UL
* bitmap_copy(dst, src, nbits) *dst = *src
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
* bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
* bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
*/
/*
* Also the following operations in asm/bitops.h apply to bitmaps.
*
* set_bit(bit, addr) *addr |= bit
* clear_bit(bit, addr) *addr &= ~bit
* change_bit(bit, addr) *addr ^= bit
* test_bit(bit, addr) Is bit set in *addr?
* test_and_set_bit(bit, addr) Set bit and return old value
* test_and_clear_bit(bit, addr) Clear bit and return old value
* test_and_change_bit(bit, addr) Change bit and return old value
* find_first_zero_bit(addr, nbits) Position first zero bit in *addr
* find_first_bit(addr, nbits) Position first set bit in *addr
* find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
* find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
/*
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
* to declare an array named 'name' of just enough unsigned longs to
* contain all bit positions from 0 to 'bits' - 1.
*/
/*
* lib/bitmap.c provides these functions:
*/
extern int __bitmap_empty(const unsigned long *bitmap, int bits);
extern int __bitmap_full(const unsigned long *bitmap, int bits);
extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
int bits);
extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
extern void bitmap_set(unsigned long *map, int i, int len);
extern void bitmap_clear(unsigned long *map, int start, int nr);
extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, int bits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
)
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
static inline void bitmap_zero(unsigned long *dst, int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
static inline void bitmap_fill(unsigned long *dst, int nbits)
{
size_t nlongs = BITS_TO_LONGS(nbits);
if (!small_const_nbits(nbits)) {
int len = (nlongs - 1) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
int nbits)
{
if (small_const_nbits(nbits))
*dst = *src;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
}
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
else
__bitmap_or(dst, src1, src2, nbits);
}
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
else
__bitmap_xor(dst, src1, src2, nbits);
}
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_complement(dst, src, nbits);
}
static inline int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_equal(src1, src2, nbits);
}
static inline int bitmap_intersects(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
else
return __bitmap_intersects(src1, src2, nbits);
}
static inline int bitmap_subset(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_subset(src1, src2, nbits);
}
static inline int bitmap_empty(const unsigned long *src, int nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_empty(src, nbits);
}
static inline int bitmap_full(const unsigned long *src, int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_full(src, nbits);
}
static inline int bitmap_weight(const unsigned long *src, int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
static inline void bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if (small_const_nbits(nbits))
*dst = *src >> n;
else
__bitmap_shift_right(dst, src, n, nbits);
}
static inline void bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_shift_left(dst, src, n, nbits);
}
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */

View File

@ -1,15 +1,227 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
#ifdef __BAREBOX__
#ifdef __KERNEL__
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#endif
/*
* Create a contiguous bitmask starting at bit position @l and ending at
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_first_bit((addr), (size)); \
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_from(bit, addr, size) \
for ((bit) = find_next_bit((addr), (size), (bit)); \
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
#define for_each_clear_bit(bit, addr, size) \
for ((bit) = find_first_zero_bit((addr), (size)); \
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
/* same as for_each_clear_bit() but use bit as value to start with */
#define for_each_clear_bit_from(bit, addr, size) \
for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
static __inline__ int get_bitmask_order(unsigned int count)
{
int order;
order = fls(count);
return order; /* We could be slightly more clever with -1 here... */
}
static __inline__ int get_count_order(unsigned int count)
{
int order;
order = fls(count) - 1;
if (count & (count - 1))
order++;
return order;
}
static inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
/**
* rol64 - rotate a 64-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u64 rol64(__u64 word, unsigned int shift)
{
return (word << shift) | (word >> (64 - shift));
}
/**
* ror64 - rotate a 64-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u64 ror64(__u64 word, unsigned int shift)
{
return (word >> shift) | (word << (64 - shift));
}
/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> (32 - shift));
}
/**
* ror32 - rotate a 32-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 ror32(__u32 word, unsigned int shift)
{
return (word >> shift) | (word << (32 - shift));
}
/**
* rol16 - rotate a 16-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u16 rol16(__u16 word, unsigned int shift)
{
return (word << shift) | (word >> (16 - shift));
}
/**
* ror16 - rotate a 16-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u16 ror16(__u16 word, unsigned int shift)
{
return (word >> shift) | (word << (16 - shift));
}
/**
* rol8 - rotate an 8-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u8 rol8(__u8 word, unsigned int shift)
{
return (word << shift) | (word >> (8 - shift));
}
/**
* ror8 - rotate an 8-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u8 ror8(__u8 word, unsigned int shift)
{
return (word >> shift) | (word << (8 - shift));
}
/**
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<32) to sign bit
*/
static inline __s32 sign_extend32(__u32 value, int index)
{
__u8 shift = 31 - index;
return (__s32)(value << shift) >> shift;
}
static inline unsigned fls_long(unsigned long l)
{
if (sizeof(l) == 4)
return fls(l);
return fls64(l);
}
/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
* On 64 bit arches this is a synomyn for __ffs
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
static inline unsigned long __ffs64(u64 word)
{
#if BITS_PER_LONG == 32
if (((u32)word) == 0UL)
return __ffs((u32)(word >> 32)) + 32;
#elif BITS_PER_LONG != 64
#error BITS_PER_LONG not 32 or 64
#endif
return __ffs((unsigned long)word);
}
#ifdef __KERNEL__
#ifndef set_mask_bits
#define set_mask_bits(ptr, _mask, _bits) \
({ \
const typeof(*ptr) mask = (_mask), bits = (_bits); \
typeof(*ptr) old, new; \
\
do { \
old = ACCESS_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
\
new; \
})
#endif
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum size to search
*
* Returns the bit number of the first set bit, or size.
*/
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
#endif
#endif /* __KERNEL__ */
#endif

View File

@ -175,5 +175,34 @@ extern unsigned short int htons(unsigned short int);
#endif /* OPTIMIZE */
static inline void le16_add_cpu(__le16 *var, u16 val)
{
*var = cpu_to_le16(le16_to_cpu(*var) + val);
}
static inline void le32_add_cpu(__le32 *var, u32 val)
{
*var = cpu_to_le32(le32_to_cpu(*var) + val);
}
static inline void le64_add_cpu(__le64 *var, u64 val)
{
*var = cpu_to_le64(le64_to_cpu(*var) + val);
}
static inline void be16_add_cpu(__be16 *var, u16 val)
{
*var = cpu_to_be16(be16_to_cpu(*var) + val);
}
static inline void be32_add_cpu(__be32 *var, u32 val)
{
*var = cpu_to_be32(be32_to_cpu(*var) + val);
}
static inline void be64_add_cpu(__be64 *var, u64 val)
{
*var = cpu_to_be64(be64_to_cpu(*var) + val);
}
#endif /* _LINUX_BYTEORDER_GENERIC_H */

View File

@ -4,6 +4,19 @@
#include <linux/compiler.h>
#include <linux/barebox-wrapper.h>
/*
* This looks more complex than it should be. But we need to
* get the type for the ~ right in round_down (it needs to be
* as wide as the result!), and we want to evaluate the macro
* arguments just once each.
*
* NOTE these functions only round to power-of-2 arguments. Use
* roundup/rounddown for non power-of-2-arguments.
*/
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define round_down(x, y) ((x) & ~__round_mask(x, y))
/*
* min()/max()/clamp() macros that also do
* strict type-checking.. See the

View File

@ -4,6 +4,9 @@
#include <linux/posix_types.h>
#include <asm/types.h>
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
#ifndef __KERNEL_STRICT_NAMES
typedef __u32 __kernel_dev_t;

View File

@ -3,22 +3,12 @@
#include <types.h>
/* Public routines */
void* malloc(size_t);
void free(void*);
void* realloc(void*, size_t);
void* memalign(size_t, size_t);
void* vallocc(size_t);
void* pvalloc(size_t);
void* calloc(size_t, size_t);
void cfree(void*);
int malloc_trim(size_t);
size_t malloc_usable_size(void*);
void *malloc(size_t);
void free(void *);
void *realloc(void *, size_t);
void *memalign(size_t, size_t);
void *calloc(size_t, size_t);
void malloc_stats(void);
int mallopt(int, int);
struct mallinfo mallinfo(void);
void *sbrk(ptrdiff_t increment);
#endif
#endif /* __MALLOC_H */

View File

@ -9,8 +9,11 @@
#define MSG_NOTICE 5 /* normal but significant condition */
#define MSG_INFO 6 /* informational */
#define MSG_DEBUG 7 /* debug-level messages */
#define MSG_VDEBUG 8 /* verbose debug messages */
#ifdef DEBUG
#ifdef VERBOSE_DEBUG
#define LOGLEVEL MSG_VDEBUG
#elif defined DEBUG
#define LOGLEVEL MSG_DEBUG
#else
#define LOGLEVEL CONFIG_COMPILE_LOGLEVEL
@ -46,6 +49,8 @@ int dev_printf(int level, const struct device_d *dev, const char *format, ...)
__dev_printf(6, (dev) , format , ## arg)
#define dev_dbg(dev, format, arg...) \
__dev_printf(7, (dev) , format , ## arg)
#define dev_vdbg(dev, format, arg...) \
__dev_printf(8, (dev) , format , ## arg)
#define __pr_printk(level, format, args...) \
({ \
@ -65,5 +70,6 @@ int dev_printf(int level, const struct device_d *dev, const char *format, ...)
#define pr_info(fmt, arg...) __pr_printk(6, pr_fmt(fmt), ##arg)
#define pr_debug(fmt, arg...) __pr_printk(7, pr_fmt(fmt), ##arg)
#define debug(fmt, arg...) __pr_printk(7, pr_fmt(fmt), ##arg)
#define pr_vdebug(fmt, arg...) __pr_printk(8, pr_fmt(fmt), ##arg)
#endif

View File

@ -45,3 +45,4 @@ obj-y += unlink-recursive.o
obj-$(CONFIG_STMP_DEVICE) += stmp-device.o
obj-y += wchar.o
obj-y += libfile.o
obj-y += bitmap.o

839
lib/bitmap.c Normal file
View File

@ -0,0 +1,839 @@
/*
* lib/bitmap.c
* Helper functions for bitmap.h.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <common.h>
#include <linux/ctype.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
/*
* bitmaps provide an array of bits, implemented using an an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
*
* The possible unused bits in the last, partially used word
* of a bitmap are 'don't care'. The implementation makes
* no particular effort to keep them zero. It ensures that
* their value will not affect the results of any operation.
* The bitmap operations that return Boolean (bitmap_empty,
* for example) or scalar (bitmap_weight, for example) results
* carefully filter out these unused bits from impacting their
* results.
*
* These operations actually hold to a slightly stronger rule:
* if you don't input any bitmaps to these ops that have some
* unused bits set, then they won't output any set unused bits
* in output bitmaps.
*
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
* for the best explanations of this ordering.
*/
int __bitmap_empty(const unsigned long *bitmap, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_empty);
int __bitmap_full(const unsigned long *bitmap, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (~bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_full);
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
return 0;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_equal);
void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
if (bits % BITS_PER_LONG)
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
}
EXPORT_SYMBOL(__bitmap_complement);
/**
* __bitmap_shift_right - logical right shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @bits : bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
* LS bits shifted off the bottom are lost.
*/
void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
unsigned long mask = (1UL << left) - 1;
for (k = 0; off + k < lim; ++k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take lower rem bits of
* word above and make them the top rem bits of result.
*/
if (!rem || off + k + 1 >= lim)
upper = 0;
else {
upper = src[off + k + 1];
if (off + k + 1 == lim - 1 && left)
upper &= mask;
}
lower = src[off + k];
if (left && off + k == lim - 1)
lower &= mask;
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
if (left && k == lim - 1)
dst[k] &= mask;
}
if (off)
memset(&dst[lim - off], 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_right);
/**
* __bitmap_shift_left - logical left shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @bits : bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
* and those MS bits shifted off the top are lost.
*/
void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
for (k = lim - off - 1; k >= 0; --k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take upper rem bits of
* word below and make them the bottom rem bits of result.
*/
if (rem && k > 0)
lower = src[k - 1];
else
lower = 0;
upper = src[k];
if (left && k == lim - 1)
upper &= (1UL << left) - 1;
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
if (left && k + off == lim - 1)
dst[k + off] &= (1UL << left) - 1;
}
if (off)
memset(dst, 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_left);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
unsigned long result = 0;
for (k = 0; k < nr; k++)
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
return result != 0;
}
EXPORT_SYMBOL(__bitmap_and);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_or);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_xor);
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
unsigned long result = 0;
for (k = 0; k < nr; k++)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
return result != 0;
}
EXPORT_SYMBOL(__bitmap_andnot);
int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
return 1;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 1;
return 0;
}
EXPORT_SYMBOL(__bitmap_intersects);
int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
return 0;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_subset);
int __bitmap_weight(const unsigned long *bitmap, int bits)
{
int k, w = 0, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]);
if (bits % BITS_PER_LONG)
w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
return w;
}
EXPORT_SYMBOL(__bitmap_weight);
void bitmap_set(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_set >= 0) {
*p |= mask_to_set;
nr -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
if (nr) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
EXPORT_SYMBOL(bitmap_set);
void bitmap_clear(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
nr -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (nr) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
}
EXPORT_SYMBOL(bitmap_clear);
/*
* bitmap_find_next_zero_area - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
*
* The @align_mask should be one less than a power of 2; the effect is that
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = __ALIGN_MASK(index, align_mask);
end = index + nr;
if (end > size)
return end;
i = find_next_bit(map, end, index);
if (i < end) {
start = i + 1;
goto again;
}
return index;
}
EXPORT_SYMBOL(bitmap_find_next_zero_area);
/*
* Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
* second version by Paul Jackson, third by Joe Korty.
*/
#define CHUNKSZ 32
#define nbits_to_hold_value(val) fls(val)
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
* @pos: a bit position in @buf (0 <= @pos < @bits)
* @bits: number of valid bit positions in @buf
*
* Map the bit at position @pos in @buf (of length @bits) to the
* ordinal of which set bit it is. If it is not set or if @pos
* is not a valid bit position, map to -1.
*
* If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively,
* and other @pos values will get mapped to 0. When @pos value 7
* gets mapped to (returns) @ord value 3 in this example, that means
* that bit 7 is the 3rd (starting with 0th) set bit in @buf.
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
{
int i, ord;
if (pos < 0 || pos >= bits || !test_bit(pos, buf))
return -1;
i = find_first_bit(buf, bits);
ord = 0;
while (i < pos) {
i = find_next_bit(buf, bits, i + 1);
ord++;
}
BUG_ON(i != pos);
return ord;
}
/**
* bitmap_ord_to_pos - find position of n-th set bit in bitmap
* @buf: pointer to bitmap
* @ord: ordinal bit position (n-th set bit, n >= 0)
* @bits: number of valid bit positions in @buf
*
* Map the ordinal offset of bit @ord in @buf to its position in @buf.
* Value of @ord should be in range 0 <= @ord < weight(buf), else
* results are undefined.
*
* If for example, just bits 4 through 7 are set in @buf, then @ord
* values 0 through 3 will get mapped to 4 through 7, respectively,
* and all other @ord values return undefined values. When @ord value 3
* gets mapped to (returns) @pos value 7 in this example, that means
* that the 3rd set bit (starting with 0th) is at position 7 in @buf.
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
{
int pos = 0;
if (ord >= 0 && ord < bits) {
int i;
for (i = find_first_bit(buf, bits);
i < bits && ord > 0;
i = find_next_bit(buf, bits, i + 1))
ord--;
if (i < bits && ord == 0)
pos = i;
}
return pos;
}
/**
* bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
* @dst: remapped result
* @src: subset to be remapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* If either of the @old and @new bitmaps are empty, or if @src and
* @dst point to the same location, then this routine copies @src
* to @dst.
*
* The positions of unset bits in @old are mapped to themselves
* (the identify map).
*
* Apply the above specified mapping to @src, placing the result in
* @dst, clearing any bits previously set in @dst.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @src comes into this routine
* with bits 1, 5 and 7 set, then @dst should leave with bits 1,
* 13 and 15 set.
*/
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new,
int bits)
{
int oldbit, w;
if (dst == src) /* following doesn't handle inplace remaps */
return;
bitmap_zero(dst, bits);
w = bitmap_weight(new, bits);
for_each_set_bit(oldbit, src, bits) {
int n = bitmap_pos_to_ord(old, oldbit, bits);
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
set_bit(bitmap_ord_to_pos(new, n % w, bits), dst);
}
}
EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
* @oldbit: bit position to be mapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* The positions of unset bits in @old are mapped to themselves
* (the identify map).
*
* Apply the above specified mapping to bit position @oldbit, returning
* the new bit position.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @oldbit is 5, then this routine
* returns 13.
*/
int bitmap_bitremap(int oldbit, const unsigned long *old,
const unsigned long *new, int bits)
{
int w = bitmap_weight(new, bits);
int n = bitmap_pos_to_ord(old, oldbit, bits);
if (n < 0 || w == 0)
return oldbit;
else
return bitmap_ord_to_pos(new, n % w, bits);
}
EXPORT_SYMBOL(bitmap_bitremap);
/**
* bitmap_onto - translate one bitmap relative to another
* @dst: resulting translated bitmap
* @orig: original untranslated bitmap
* @relmap: bitmap relative to which translated
* @bits: number of bits in each of these bitmaps
*
* Set the n-th bit of @dst iff there exists some m such that the
* n-th bit of @relmap is set, the m-th bit of @orig is set, and
* the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
* (If you understood the previous sentence the first time your
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
* using the the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
* weight of (number of set bits in) @relmap are mapped nowhere.
* In particular, if for all bits m set in @orig, m >= W, then
* @dst will end up empty. In situations where the possibility
* of such an empty result is not desired, one way to avoid it is
* to use the bitmap_fold() operator, below, to first fold the
* @orig bitmap over itself so that all its set bits x are in the
* range 0 <= x < W. The bitmap_fold() operator does this by
* setting the bit (m % W) in @dst, for each bit (m) set in @orig.
*
* Example [1] for bitmap_onto():
* Let's say @relmap has bits 30-39 set, and @orig has bits
* 1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
* @dst will have bits 31, 33, 35, 37 and 39 set.
*
* When bit 0 is set in @orig, it means turn on the bit in
* @dst corresponding to whatever is the first bit (if any)
* that is turned on in @relmap. Since bit 0 was off in the
* above example, we leave off that bit (bit 30) in @dst.
*
* When bit 1 is set in @orig (as in the above example), it
* means turn on the bit in @dst corresponding to whatever
* is the second bit that is turned on in @relmap. The second
* bit in @relmap that was turned on in the above example was
* bit 31, so we turned on bit 31 in @dst.
*
* Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
* because they were the 4th, 6th, 8th and 10th set bits
* set in @relmap, and the 4th, 6th, 8th and 10th bits of
* @orig (i.e. bits 3, 5, 7 and 9) were also set.
*
* When bit 11 is set in @orig, it means turn on the bit in
* @dst corresponding to whatever is the twelfth bit that is
* turned on in @relmap. In the above example, there were
* only ten bits turned on in @relmap (30..39), so that bit
* 11 was set in @orig had no affect on @dst.
*
* Example [2] for bitmap_fold() + bitmap_onto():
* Let's say @relmap has these ten bits set:
* 40 41 42 43 45 48 53 61 74 95
* (for the curious, that's 40 plus the first ten terms of the
* Fibonacci sequence.)
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
* avoid the possitility of an empty @dst result:
*
* unsigned long *tmp; // a temporary bitmap's bits
*
* bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
* bitmap_onto(dst, tmp, relmap, bits);
*
* Then this table shows what various values of @dst would be, for
* various @orig's. I list the zero-based positions of each set bit.
* The tmp column shows the intermediate result, as computed by
* using bitmap_fold() to fold the @orig bitmap modulo ten
* (the weight of @relmap).
*
* @orig tmp @dst
* 0 0 40
* 1 1 41
* 9 9 95
* 10 0 40 (*)
* 1 3 5 7 1 3 5 7 41 43 48 61
* 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
* 0 9 18 27 0 9 8 7 40 61 74 95
* 0 10 20 30 0 40
* 0 11 22 33 0 1 2 3 40 41 42 43
* 0 12 24 36 0 2 4 6 40 42 45 53
* 78 102 211 1 2 8 41 42 74 (*)
*
* (*) For these marked lines, if we hadn't first done bitmap_fold()
* into tmp, then the @dst result would have been empty.
*
* If either of @orig or @relmap is empty (no set bits), then @dst
* will be returned empty.
*
* If (as explained above) the only set bits in @orig are in positions
* m where m >= W, (where W is the weight of @relmap) then @dst will
* once again be returned empty.
*
* All bits in @dst not set by the above rule are cleared.
*/
void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits)
{
int n, m; /* same meaning as in above comment */
if (dst == orig) /* following doesn't handle inplace mappings */
return;
bitmap_zero(dst, bits);
/*
* The following code is a more efficient, but less
* obvious, equivalent to the loop:
* for (m = 0; m < bitmap_weight(relmap, bits); m++) {
* n = bitmap_ord_to_pos(orig, m, bits);
* if (test_bit(m, orig))
* set_bit(n, dst);
* }
*/
m = 0;
for_each_set_bit(n, relmap, bits) {
/* m == bitmap_pos_to_ord(relmap, n, bits) */
if (test_bit(m, orig))
set_bit(n, dst);
m++;
}
}
EXPORT_SYMBOL(bitmap_onto);
/**
* bitmap_fold - fold larger bitmap into smaller, modulo specified size
* @dst: resulting smaller bitmap
* @orig: original larger bitmap
* @sz: specified size
* @bits: number of bits in each of these bitmaps
*
* For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
* Clear all other bits in @dst. See further the comment and
* Example [2] for bitmap_onto() for why and how to use this.
*/
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits)
{
int oldbit;
if (dst == orig) /* following doesn't handle inplace mappings */
return;
bitmap_zero(dst, bits);
for_each_set_bit(oldbit, orig, bits)
set_bit(oldbit % sz, dst);
}
EXPORT_SYMBOL(bitmap_fold);
/*
* Common code for bitmap_*_region() routines.
* bitmap: array of unsigned longs corresponding to the bitmap
* pos: the beginning of the region
* order: region size (log base 2 of number of bits)
* reg_op: operation(s) to perform on that region of bitmap
*
* Can set, verify and/or release a region of bits in a bitmap,
* depending on which combination of REG_OP_* flag bits is set.
*
* A region of a bitmap is a sequence of bits in the bitmap, of
* some size '1 << order' (a power of two), aligned to that same
* '1 << order' power of two.
*
* Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
* Returns 0 in all other cases and reg_ops.
*/
enum {
REG_OP_ISFREE, /* true if region is all zero bits */
REG_OP_ALLOC, /* set all bits in region */
REG_OP_RELEASE, /* clear all bits in region */
};
static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
{
int nbits_reg; /* number of bits in region */
int index; /* index first long of region in bitmap */
int offset; /* bit offset region in bitmap[index] */
int nlongs_reg; /* num longs spanned by region in bitmap */
int nbitsinlong; /* num bits of region in each spanned long */
unsigned long mask; /* bitmask for one long of region */
int i; /* scans bitmap by longs */
int ret = 0; /* return value */
/*
* Either nlongs_reg == 1 (for small orders that fit in one long)
* or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
*/
nbits_reg = 1 << order;
index = pos / BITS_PER_LONG;
offset = pos - (index * BITS_PER_LONG);
nlongs_reg = BITS_TO_LONGS(nbits_reg);
nbitsinlong = min(nbits_reg, BITS_PER_LONG);
/*
* Can't do "mask = (1UL << nbitsinlong) - 1", as that
* overflows if nbitsinlong == BITS_PER_LONG.
*/
mask = (1UL << (nbitsinlong - 1));
mask += mask - 1;
mask <<= offset;
switch (reg_op) {
case REG_OP_ISFREE:
for (i = 0; i < nlongs_reg; i++) {
if (bitmap[index + i] & mask)
goto done;
}
ret = 1; /* all bits in region free (zero) */
break;
case REG_OP_ALLOC:
for (i = 0; i < nlongs_reg; i++)
bitmap[index + i] |= mask;
break;
case REG_OP_RELEASE:
for (i = 0; i < nlongs_reg; i++)
bitmap[index + i] &= ~mask;
break;
}
done:
return ret;
}
/**
* bitmap_find_free_region - find a contiguous aligned mem region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @bits: number of bits in the bitmap
* @order: region size (log base 2 of number of bits) to find
*
* Find a region of free (zero) bits in a @bitmap of @bits bits and
* allocate them (set them to one). Only consider regions of length
* a power (@order) of two, aligned to that power of two, which
* makes the search algorithm much faster.
*
* Return the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
{
int pos, end; /* scans bitmap by regions of size order */
for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
continue;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
return pos;
}
return -ENOMEM;
}
EXPORT_SYMBOL(bitmap_find_free_region);
/**
* bitmap_release_region - release allocated bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to release
* @order: region size (log base 2 of number of bits) to release
*
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*
* No return value.
*/
void bitmap_release_region(unsigned long *bitmap, int pos, int order)
{
__reg_op(bitmap, pos, order, REG_OP_RELEASE);
}
EXPORT_SYMBOL(bitmap_release_region);
/**
* bitmap_allocate_region - allocate bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to allocate
* @order: region size (log base 2 of number of bits) to allocate
*
* Allocate (set bits in) a specified region of a bitmap.
*
* Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
{
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
return -EBUSY;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
return 0;
}
EXPORT_SYMBOL(bitmap_allocate_region);
/**
* bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
* @dst: destination buffer
* @src: bitmap to copy
* @nbits: number of bits in the bitmap
*
* Require nbits % BITS_PER_LONG == 0.
*/
void bitmap_copy_le(void *dst, const unsigned long *src, int nbits)
{
unsigned long *d = dst;
int i;
for (i = 0; i < nbits/BITS_PER_LONG; i++) {
if (BITS_PER_LONG == 64)
d[i] = cpu_to_le64(src[i]);
else
d[i] = cpu_to_le32(src[i]);
}
}
EXPORT_SYMBOL(bitmap_copy_le);

View File

@ -14,8 +14,6 @@
printf ("%.*s", n, str); \
} while (0)
#define MAX_CMDBUF_SIZE 256
#define CTL_BACKSPACE ('\b')
#define DEL 255
#define DEL7 127
@ -25,90 +23,82 @@
#define getcmd_getch() getc()
#define getcmd_cbeep() getcmd_putch('\a')
#define HIST_MAX 20
#define HIST_SIZE MAX_CMDBUF_SIZE
struct history {
char *line;
struct list_head list;
};
static int hist_max = 0;
static int hist_add_idx = 0;
static int hist_cur = -1;
static unsigned hist_num = 0;
static LIST_HEAD(history_list);
static char* hist_list[HIST_MAX];
static char hist_lines[HIST_MAX][HIST_SIZE];
#define add_idx_minus_one() ((hist_add_idx == 0) ? hist_max : hist_add_idx-1)
static int hist_init(void)
{
int i;
hist_max = 0;
hist_add_idx = 0;
hist_cur = -1;
hist_num = 0;
for (i = 0; i < HIST_MAX; i++) {
hist_list[i] = hist_lines[i];
hist_list[i][0] = '\0';
}
return 0;
}
postcore_initcall(hist_init);
static struct list_head *history_current;
static int history_num_entries;
static void cread_add_to_hist(char *line)
{
strcpy(hist_list[hist_add_idx], line);
struct history *history;
char *newline;
if (++hist_add_idx >= HIST_MAX)
hist_add_idx = 0;
if (!list_empty(&history_list)) {
history = list_last_entry(&history_list, struct history, list);
if (hist_add_idx > hist_max)
hist_max = hist_add_idx;
if (!strcmp(line, history->line))
return;
}
hist_num++;
newline = strdup(line);
if (!newline)
return;
if (history_num_entries < 32) {
history = xzalloc(sizeof(*history));
history_num_entries++;
} else {
history = list_first_entry(&history_list, struct history, list);
free(history->line);
list_del(&history->list);
}
history->line = newline;
list_add_tail(&history->list, &history_list);
}
static char* hist_prev(void)
static const char *hist_prev(void)
{
char *ret;
int old_cur;
struct history *history;
if (hist_cur < 0)
return NULL;
if (history_current->prev == &history_list) {
history = list_entry(history_current, struct history, list);
getcmd_cbeep();
return history->line;
}
old_cur = hist_cur;
if (--hist_cur < 0)
hist_cur = hist_max;
history = list_entry(history_current->prev, struct history, list);
if (hist_cur == hist_add_idx) {
hist_cur = old_cur;
ret = NULL;
} else
ret = hist_list[hist_cur];
history_current = &history->list;
return (ret);
return history->line;
}
static char* hist_next(void)
static const char *hist_next(void)
{
char *ret;
struct history *history;
if (hist_cur < 0)
return NULL;
if (history_current->next == &history_list) {
history_current = &history_list;
return "";
}
if (hist_cur == hist_add_idx)
return NULL;
if (history_current == &history_list) {
getcmd_cbeep();
return "";
}
if (++hist_cur > hist_max)
hist_cur = 0;
history = list_entry(history_current->next, struct history, list);
if (hist_cur == hist_add_idx) {
ret = "";
} else
ret = hist_list[hist_cur];
history_current = &history->list;
return (ret);
return history->line;
}
#define BEGINNING_OF_LINE() { \
@ -198,6 +188,8 @@ int readline(const char *prompt, char *buf, int len)
complete_reset();
#endif
history_current = &history_list;
puts (prompt);
while (1) {
@ -302,18 +294,13 @@ int readline(const char *prompt, char *buf, int len)
case BB_KEY_UP:
case BB_KEY_DOWN:
{
char * hline;
const char *hline;
if (ichar == BB_KEY_UP)
hline = hist_prev();
else
hline = hist_next();
if (!hline) {
getcmd_cbeep();
continue;
}
/* nuke the current line */
/* first, go home */
BEGINNING_OF_LINE();
@ -346,9 +333,8 @@ int readline(const char *prompt, char *buf, int len)
len = eol_num;
buf[eol_num] = '\0'; /* lose the newline */
if (buf[0] && buf[0] != CREAD_HIST_CHAR)
if (buf[0])
cread_add_to_hist(buf);
hist_cur = hist_add_idx;
return len;
}