First implementation

This commit is contained in:
2019-12-14 11:56:22 +01:00
parent a13838dbc5
commit be6a9c6f73
1060 changed files with 326870 additions and 0 deletions

182
vendor/periph.io/x/periph/host/pmem/alloc.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
// Copyright 2016 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
package pmem
import (
"bytes"
"io"
"io/ioutil"
"reflect"
"sync"
"unsafe"
)
const pageSize = 4096
// Mem represents a section of memory that is usable by the DMA controller.
//
// Since this is physically allocated memory, that could potentially have been
// allocated in spite of OS consent, for example by asking the GPU directly, it
// is important to call Close() before process exit.
type Mem interface {
io.Closer
// Bytes returns the user space memory mapped buffer address as a slice of
// bytes.
//
// It is the raw view of the memory from this process.
Bytes() []byte
// AsPOD initializes a pointer to a POD (plain old data) to point to the
// memory mapped region.
//
// pp must be a pointer to:
//
// - pointer to a base size type (uint8, int64, float32, etc)
// - struct
// - array of the above
// - slice of the above
//
// and the value must be nil. Returns an error otherwise.
//
// If a pointer to a slice is passed in, it is initialized to the length and
// capacity set to the maximum number of elements this slice can represent.
//
// The pointer initialized points to the same address as Bytes().
AsPOD(pp interface{}) error
// PhysAddr is the physical address. It can be either 32 bits or 64 bits,
// depending on the bitness of the OS kernel, not on the user mode build,
// e.g. you could have compiled on a 32 bits Go toolchain but running on a
// 64 bits kernel.
PhysAddr() uint64
}
// MemAlloc represents contiguous physically locked memory that was allocated.
//
// The memory is mapped in user space.
//
// MemAlloc implements Mem.
type MemAlloc struct {
View
}
// Close unmaps the physical memory allocation.
func (m *MemAlloc) Close() error {
if err := munlock(m.orig); err != nil {
return err
}
return munmap(m.orig)
}
// Alloc allocates a continuous chunk of physical memory.
//
// Size must be rounded to 4Kb. Allocations of 4Kb will normally succeed.
// Allocations larger than 64Kb will likely fail due to kernel memory
// fragmentation; rebooting the host or reducing the number of running programs
// may help.
//
// The allocated memory is uncached.
func Alloc(size int) (*MemAlloc, error) {
if size == 0 || size&(pageSize-1) != 0 {
return nil, wrapf("allocated memory must be rounded to %d bytes", pageSize)
}
if isLinux && !isWSL() {
return allocLinux(size)
}
return nil, wrapf("memory allocation is not supported on this platform")
}
//
var (
wslOnce sync.Once
isWSLValue bool
)
// uallocMemLocked allocates user space memory and requests the OS to have the
// chunk to be locked into physical memory.
func uallocMemLocked(size int) ([]byte, error) {
// It is important to write to the memory so it is forced to be present.
b, err := uallocMem(size)
if err == nil {
for i := range b {
b[i] = 0
}
if err := mlock(b); err != nil {
// Ignore the unmap error.
_ = munmap(b)
return nil, wrapf("locking %d bytes failed: %v", size, err)
}
}
return b, err
}
// allocLinux allocates physical memory and returns a user view to it.
func allocLinux(size int) (*MemAlloc, error) {
// TODO(maruel): Implement the "shotgun approach". Allocate a ton of 4Kb
// pages and lock them. Then look at their physical pages and only keep the
// one useful. Then create a linear mapping in memory to simplify the user
// mode with a single linear user space virtual address but keep the
// individual page alive with their initial allocation. When done release
// each individual page.
if size > pageSize {
return nil, wrapf("large allocation is not yet implemented")
}
// First allocate a chunk of user space memory.
b, err := uallocMemLocked(size)
if err != nil {
return nil, err
}
pages := make([]uint64, (size+pageSize-1)/pageSize)
// Figure out the physical memory addresses.
for i := range pages {
pages[i], err = virtToPhys(toRaw(b[pageSize*i:]))
if err != nil {
return nil, err
}
if pages[i] == 0 {
return nil, wrapf("failed to read page %d", i)
}
}
for i := 1; i < len(pages); i++ {
// Fail if the memory is not contiguous.
if pages[i] != pages[i-1]+pageSize {
return nil, wrapf("failed to allocate %d bytes of continugous physical memory; page %d =0x%x; page %d=0x%x", size, i, pages[i], i-1, pages[i-1])
}
}
return &MemAlloc{View{Slice: b, phys: pages[0], orig: b}}, nil
}
// virtToPhys returns the physical memory address backing a virtual
// memory address.
func virtToPhys(virt uintptr) (uint64, error) {
physPage, err := ReadPageMap(virt)
if err != nil {
return 0, err
}
if physPage&(1<<63) == 0 {
// If high bit is not set, the page doesn't exist.
return 0, wrapf("0x%08x has no physical address", virt)
}
// Strip flags. See linux documentation on kernel.org for more details.
physPage &^= 0x1FF << 55
return physPage * pageSize, nil
}
func toRaw(b []byte) uintptr {
header := *(*reflect.SliceHeader)(unsafe.Pointer(&b))
return header.Data
}
// isWSL returns true if running under Windows Subsystem for Linux.
func isWSL() bool {
wslOnce.Do(func() {
if c, err := ioutil.ReadFile("/proc/sys/kernel/osrelease"); err == nil {
isWSLValue = bytes.Contains(c, []byte("Microsoft"))
}
})
return isWSLValue
}
var _ Mem = &MemAlloc{}

69
vendor/periph.io/x/periph/host/pmem/doc.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2016 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// Package pmem implements handling of physical memory for user space programs.
//
// To make things confusing, a modern computer has many view of the memory
// (address spaces):
//
// User
//
// User mode address space is the virtual address space that an application
// runs in. It is generally a tad less than half the addressable memory, so on
// a 32 bits system, the addressable range is 1.9Gb. For 64 bits OS, it depends
// but it usually at least 3.5Gb. The memory is virtual and can be flushed to
// disk in the swap file unless individual pages are locked.
//
// Kernel
//
// Kernel address space is the virtual address space the kernel sees. It often
// can see the currently active user space program on the current CPU core in
// addition to all the memory the kernel sees. The kernel memory pages that are
// not mlock()'ed are 'virtual' and can be flushed to disk in the swap file
// when there's not enough RAM available. On linux systems, the kernel
// addressed memory can be mapped in user space via `/dev/kmem`.
//
// Physical
//
// Physical memory address space is the actual address of each page in the DRAM
// chip and anything connected to the memory controller. The mapping may be
// different depending on what controller looks at the bus, like with IOMMU. So
// a peripheral (GPU, DMA controller) may have a different view of the physical
// memory than the host CPU. On linux systems, this memory can be mapped in
// user space via `/dev/mem`.
//
// CPU
//
// The CPU or its subsystems may memory map registers (for example, to control
// GPIO pins, clock speed, etc). This is not "real" memory, this is a view of
// registers but it still follows "mostly" the same semantic as DRAM backed
// physical memory.
//
// Some CPU memory may have very special semantic where the mere fact of
// reading has side effects. For example reading a specific register may
// latches another.
//
// CPU memory accesses are layered with multiple caches, usually named L1, L2
// and optionally L3. Some controllers (DMA) can see some cache levels (L2) but
// not others (L1) on some CPU architecture (bcm283x). This means that a user
// space program writing data to a memory page and immediately asking the DMA
// controller to read it may cause stale data to be read!
//
// Hypervisor
//
// Hypervisor can change the complete memory mapping as seen by the kernel.
// This is outside the scope of this project. :)
//
// Summary
//
// In practice, the semantics change between CPU manufacturers (Broadcom vs
// Allwinner) and between architectures (ARM vs x86). The most tricky one is to
// understand cached memory and how it affects coherence and performance.
// Uncached memory is extremely slow so it must only be used when necessary.
//
// References
//
// Overview of IOMMU:
// https://en.wikipedia.org/wiki/Input-output_memory_management_unit
package pmem

56
vendor/periph.io/x/periph/host/pmem/mem_linux.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2016 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
package pmem
import "syscall"
const isLinux = true
func mmap(fd uintptr, offset int64, length int) ([]byte, error) {
v, err := syscall.Mmap(int(fd), offset, length, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
if err != nil {
return nil, wrapf("failed to memory map: %v", err)
}
return v, nil
}
func munmap(b []byte) error {
if err := syscall.Munmap(b); err != nil {
return wrapf("failed to unmap memory: %v", err)
}
return nil
}
func mlock(b []byte) error {
if err := syscall.Mlock(b); err != nil {
return wrapf("failed to lock memory: %v", err)
}
return nil
}
func munlock(b []byte) error {
if err := syscall.Munlock(b); err != nil {
return wrapf("failed to unlock memory: %v", err)
}
return nil
}
// uallocMem allocates user space memory.
func uallocMem(size int) ([]byte, error) {
b, err := syscall.Mmap(
0,
0,
size,
syscall.PROT_READ|syscall.PROT_WRITE,
syscall.MAP_ANONYMOUS|syscall.MAP_LOCKED|syscall.MAP_NORESERVE|syscall.MAP_SHARED)
// syscall.MAP_HUGETLB / MAP_HUGE_2MB
// See /sys/kernel/mm/hugepages but both C.H.I.P. running Jessie and Raspbian
// Jessie do not expose huge pages. :(
if err != nil {
return nil, wrapf("allocating %d bytes failed: %v", size, err)
}
return b, err
}

30
vendor/periph.io/x/periph/host/pmem/mem_other.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2016 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// +build !linux
package pmem
const isLinux = false
func mmap(fd uintptr, offset int64, length int) ([]byte, error) {
return nil, wrapf("syscall.Mmap() not implemented on this OS")
}
func munmap(b []byte) error {
return wrapf("syscall.Munmap() not implemented on this OS")
}
func mlock(b []byte) error {
return wrapf("syscall.Mlock() not implemented on this OS")
}
func munlock(b []byte) error {
return wrapf("syscall.Munlock() not implemented on this OS")
}
// uallocMem allocates user space memory.
func uallocMem(size int) ([]byte, error) {
return make([]byte, size), nil
}

64
vendor/periph.io/x/periph/host/pmem/pagemap.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2016 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
package pmem
import (
"encoding/binary"
"errors"
"fmt"
"os"
)
// ReadPageMap reads a physical address mapping for a virtual page address from
// /proc/self/pagemap.
//
// It returns the physical address that corresponds to the start of the virtual
// page within which the virtual address virtAddr is located.
//
// The meaning of the return value is documented at
// https://www.kernel.org/doc/Documentation/vm/pagemap.txt
func ReadPageMap(virtAddr uintptr) (uint64, error) {
if !isLinux || isWSL() {
return 0, errors.New("pmem: pagemap is not supported on this platform")
}
return readPageMapLinux(virtAddr)
}
//
var (
pageMap fileIO
pageMapErr error
)
func readPageMapLinux(virtAddr uintptr) (uint64, error) {
var b [8]byte
mu.Lock()
defer mu.Unlock()
if pageMap == nil && pageMapErr == nil {
// Open /proc/self/pagemap.
//
// It is a uint64 array where the index represents the virtual 4Kb page
// number and the value represents the physical page properties backing
// this virtual page.
pageMap, pageMapErr = openFile("/proc/self/pagemap", os.O_RDONLY|os.O_SYNC)
}
if pageMapErr != nil {
return 0, pageMapErr
}
// Convert address to page number, then index in uint64 array.
offset := int64(virtAddr / pageSize * 8)
if _, err := pageMap.Seek(offset, os.SEEK_SET); err != nil {
return 0, fmt.Errorf("pmem: failed to seek at 0x%x for 0x%x: %v", offset, virtAddr, err)
}
n, err := pageMap.Read(b[:])
if err != nil {
return 0, fmt.Errorf("pmem: failed to read at 0x%x for 0x%x: %v", offset, virtAddr, err)
}
if n != len(b) {
return 0, fmt.Errorf("pmem: failed to read the amount of data %d", len(b))
}
return binary.LittleEndian.Uint64(b[:]), nil
}

89
vendor/periph.io/x/periph/host/pmem/smoketest.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2016 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
package pmem
import (
"bytes"
"math/rand"
)
// TestCopy is used by CPU drivers to verify that the DMA engine works
// correctly.
//
// It is not meant to be used by end users.
//
// TestCopy allocates two buffer via `alloc`, once as the source and one as the
// destination. It fills the source with random data and the destination with
// 0x11.
//
// `copyMem` is expected to copy the memory from pSrc to pDst, with an offset
// of `hole` and size `size-2*hole`.
//
// The function `copyMem` being tested is only given the buffer physical
// addresses and must copy the data without other help. It is expected to
//
// This confirm misaligned DMA copying works.
// leverage the host's DMA engine.
func TestCopy(size, holeSize int, alloc func(size int) (Mem, error), copyMem func(pDst, pSrc uint64) error) error {
pSrc, err2 := alloc(size)
if err2 != nil {
return err2
}
defer pSrc.Close()
pDst, err2 := alloc(size)
if err2 != nil {
return err2
}
defer pDst.Close()
dst := pDst.Bytes()
for i := range dst {
dst[i] = 0x11
}
src := make([]byte, size)
for i := range src {
src[i] = byte(rand.Int31())
}
copy(pSrc.Bytes(), src[:])
// Run the driver supplied memory copying code.
if err := copyMem(pDst.PhysAddr(), pSrc.PhysAddr()); err != nil {
return err
}
// Verifications.
for i := 0; i < holeSize; i++ {
if dst[i] != 0x11 {
return wrapf("DMA corrupted the buffer header: %x", dst[:holeSize])
}
if dst[size-1-i] != 0x11 {
return wrapf("DMA corrupted the buffer footer: %x", dst[size-1-holeSize:])
}
}
// Headers and footers were not corupted in the destination. Verify the inner
// view that should match.
x := src[:size-2*holeSize]
y := dst[holeSize : size-holeSize]
if !bytes.Equal(x, y) {
offset := 0
for len(x) != 0 && x[0] == y[0] {
x = x[1:]
y = y[1:]
offset++
}
for len(x) != 0 && x[len(x)-1] == y[len(y)-1] {
x = x[:len(x)-1]
y = y[:len(y)-1]
}
if len(x) > 32 {
x = x[:32]
}
if len(y) > 32 {
y = y[:32]
}
return wrapf("DMA corrupted the buffer at offset %d:\n%x\n%x", offset, x, y)
}
return nil
}

283
vendor/periph.io/x/periph/host/pmem/view.go generated vendored Normal file
View File

@ -0,0 +1,283 @@
// Copyright 2016 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
package pmem
import (
"fmt"
"io"
"os"
"reflect"
"sync"
"unsafe"
"periph.io/x/periph/host/fs"
)
// Slice can be transparently viewed as []byte, []uint32 or a struct.
type Slice []byte
// Uint32 returns a view of the byte slice as a []uint32.
func (s *Slice) Uint32() []uint32 {
header := *(*reflect.SliceHeader)(unsafe.Pointer(s))
header.Len /= 4
header.Cap /= 4
return *(*[]uint32)(unsafe.Pointer(&header))
}
// Bytes implements Mem.
func (s *Slice) Bytes() []byte {
return *s
}
// AsPOD implements Mem.
func (s *Slice) AsPOD(pp interface{}) error {
if pp == nil {
return wrapf("require Ptr, got nil")
}
vpp := reflect.ValueOf(pp)
if elemSize, err := isPS(len(*s), vpp); err == nil {
p := vpp.Elem()
t := p.Type().Elem()
if elemSize > len(*s) {
return wrapf("can't map slice of struct %s (size %d) on [%d]byte", t, elemSize, len(*s))
}
nbElems := len(*s) / elemSize
// Use casting black magic to set the internal slice headers.
hdr := (*reflect.SliceHeader)(unsafe.Pointer(p.UnsafeAddr()))
hdr.Data = ((*reflect.SliceHeader)(unsafe.Pointer(s))).Data
hdr.Len = nbElems
hdr.Cap = nbElems
return nil
}
size, err := isPP(vpp)
if err != nil {
return err
}
p := vpp.Elem()
t := p.Type().Elem()
if size > len(*s) {
return wrapf("can't map struct %s (size %d) on [%d]byte", t, size, len(*s))
}
// Use casting black magic to read the internal slice headers.
dest := unsafe.Pointer(((*reflect.SliceHeader)(unsafe.Pointer(s))).Data)
// Use reflection black magic to write to the original pointer.
p.Set(reflect.NewAt(t, dest))
return nil
}
// View represents a view of physical memory memory mapped into user space.
//
// It is usually used to map CPU registers into user space, usually I/O
// registers and the likes.
//
// It is not required to call Close(), the kernel will clean up on process
// shutdown.
type View struct {
Slice
orig []uint8 // Reference rounded to the lowest 4Kb page containing Slice.
phys uint64 // physical address of the base of Slice.
}
// Close unmaps the memory from the user address space.
//
// This is done naturally by the OS on process teardown (when the process
// exits) so this is not a hard requirement to call this function.
func (v *View) Close() error {
return munmap(v.orig)
}
// PhysAddr implements Mem.
func (v *View) PhysAddr() uint64 {
return v.phys
}
// MapGPIO returns a CPU specific memory mapping of the CPU I/O registers using
// /dev/gpiomem.
//
// At the moment, /dev/gpiomem is only supported on Raspbian Jessie via a
// specific kernel driver.
func MapGPIO() (*View, error) {
if isLinux {
return mapGPIOLinux()
}
return nil, wrapf("/dev/gpiomem is not supported on this platform")
}
// Map returns a memory mapped view of arbitrary physical memory range using OS
// provided functionality.
//
// Maps size of memory, rounded on a 4kb window.
//
// This function is dangerous and should be used wisely. It normally requires
// super privileges (root). On Linux, it leverages /dev/mem.
func Map(base uint64, size int) (*View, error) {
if isLinux {
return mapLinux(base, size)
}
return nil, wrapf("physical memory mapping is not supported on this platform")
}
// MapAsPOD is a leaky shorthand of calling Map(base, sizeof(v)) then AsPOD(v).
//
// There is no way to reclaim the memory map.
//
// A slice cannot be used, as it does not have inherent size. Use an aray
// instead.
func MapAsPOD(base uint64, i interface{}) error {
// Automatically determine the necessary size. Because of this, slice of
// unspecified length cannot be used here.
if i == nil {
return wrapf("require Ptr, got nil")
}
v := reflect.ValueOf(i)
size, err := isPP(v)
if err != nil {
return err
}
m, err := Map(base, size)
if err != nil {
return err
}
return m.AsPOD(i)
}
//
// Keep a cache of open file handles instead of opening and closing repeatedly.
var (
mu sync.Mutex
gpioMemErr error
gpioMemView *View
devMem fileIO
devMemErr error
openFile = openFileOrig
)
type fileIO interface {
io.Closer
io.Seeker
io.Reader
Fd() uintptr
}
func openFileOrig(path string, flag int) (fileIO, error) {
f, err := fs.Open(path, flag)
if err != nil {
return nil, err
}
return f, nil
}
// mapGPIOLinux is purely Raspbian specific.
func mapGPIOLinux() (*View, error) {
mu.Lock()
defer mu.Unlock()
if gpioMemView == nil && gpioMemErr == nil {
if f, err := openFile("/dev/gpiomem", os.O_RDWR|os.O_SYNC); err == nil {
defer f.Close()
if i, err := mmap(f.Fd(), 0, pageSize); err == nil {
gpioMemView = &View{Slice: i, orig: i, phys: 0}
} else {
gpioMemErr = wrapf("failed to memory map in user space GPIO memory: %v", err)
}
} else {
gpioMemErr = wrapf("failed to open GPIO memory: %v", err)
}
}
return gpioMemView, gpioMemErr
}
// mapLinux leverages /dev/mem to map a view of physical memory.
func mapLinux(base uint64, size int) (*View, error) {
f, err := openDevMemLinux()
if err != nil {
return nil, err
}
// Align base and size at 4Kb.
offset := int(base & 0xFFF)
i, err := mmap(f.Fd(), int64(base&^0xFFF), (size+offset+0xFFF)&^0xFFF)
if err != nil {
return nil, wrapf("mapping at 0x%x failed: %v", base, err)
}
return &View{Slice: i[offset : offset+size], orig: i, phys: base + uint64(offset)}, nil
}
func openDevMemLinux() (fileIO, error) {
mu.Lock()
defer mu.Unlock()
if devMem == nil && devMemErr == nil {
if devMem, devMemErr = openFile("/dev/mem", os.O_RDWR|os.O_SYNC); devMemErr != nil {
devMemErr = wrapf("failed to open physical memory: %v", devMemErr)
}
}
return devMem, devMemErr
}
func isAcceptableInner(t reflect.Type) error {
switch k := t.Kind(); k {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float32, reflect.Float64:
return nil
case reflect.Array:
return isAcceptableInner(t.Elem())
case reflect.Struct:
for i := 0; i < t.NumField(); i++ {
if err := isAcceptableInner(t.Field(i).Type); err != nil {
return err
}
}
return nil
default:
return wrapf("require Ptr to Ptr to a POD type, got Ptr to Ptr to %s", k)
}
}
// isPP makes sure it is a pointer to a nil-pointer to something. It does
// sanity checks to reduce likelihood of a panic().
func isPP(pp reflect.Value) (int, error) {
if k := pp.Kind(); k != reflect.Ptr {
return 0, wrapf("require Ptr, got %s of %s", k, pp.Type().Name())
}
p := pp.Elem()
if k := p.Kind(); k != reflect.Ptr {
return 0, wrapf("require Ptr to Ptr, got %s", k)
}
if !p.IsNil() {
return 0, wrapf("require Ptr to Ptr to be nil")
}
// p.Elem() can't be used since it's a nil pointer. Use the type instead.
t := p.Type().Elem()
if err := isAcceptableInner(t); err != nil {
return 0, err
}
return int(t.Size()), nil
}
// isPS makes sure it is a pointer to a nil-slice of something. It does
// sanity checks to reduce likelihood of a panic().
func isPS(bufSize int, ps reflect.Value) (int, error) {
if k := ps.Kind(); k != reflect.Ptr {
return 0, wrapf("require Ptr, got %s of %s", k, ps.Type().Name())
}
s := ps.Elem()
if k := s.Kind(); k != reflect.Slice {
return 0, wrapf("require Ptr to Slice, got %s", k)
}
if !s.IsNil() {
return 0, wrapf("require Ptr to Slice to be nil")
}
// s.Elem() can't be used since it's a nil slice. Use the type instead.
t := s.Type().Elem()
if err := isAcceptableInner(t); err != nil {
return 0, err
}
return int(t.Size()), nil
}
func wrapf(format string, a ...interface{}) error {
return fmt.Errorf("pmem: "+format, a...)
}