Skip to content
Snippets Groups Projects
Commit 538dce54 authored by John Hodge's avatar John Hodge
Browse files

Kernel Core - A bunch of changes from fiddling with USB

parent 78119c7a
Branches
No related merge requests found
......@@ -4,6 +4,7 @@
// Core/async-v3/buffer.rs
//! Asynchronous buffer handles
/*
/// Buffer providing a location for read data (incoming / mutable)
pub struct ReadBuffer<'a> {
}
......@@ -15,9 +16,18 @@ pub struct ReadBufferHandle<'a> {
// Needs to hold a borrow on the buffer
buf: &'a ReadBuffer<'a>,
}
pub struct WriteBufferHandle<'a> {
*/
pub enum WriteBufferHandle<'async: 'local,'local> {
/// Buffer that outlives the async stack
Long(&'async mut [u8]),
/// A buffer that doesn't outlive the async stack (and will have to be buffered by something)
Short(&'local mut [u8]),
///// A buffer with ownership being passed in.
//Owned(Vec<u8>),
}
/*
impl ReadBuffer<'a>
{
// UNSAFE: If this is leaked while borrowed, the borrow will access invalidated memory
......@@ -39,4 +49,5 @@ impl ReadBuffer<'a>
todo!("ReadBuffer::borrow");
}
}
*/
......@@ -6,12 +6,13 @@
mod waiter;
pub mod mutex;
//pub mod buffer;
pub mod buffer;
pub use self::waiter::{Layer, ObjectHandle, StackPush};
pub use self::waiter::{Object, Waiter, WaitResult};
pub use self::mutex::Mutex;
//pub use self::buffer::{WriteBuffer, ReadBuffer, WriteBufferHandle, ReadBufferHandle};
pub use self::buffer::WriteBufferHandle;
//{WriteBuffer, ReadBuffer, WriteBufferHandle, ReadBufferHandle};
......@@ -56,6 +56,7 @@ struct ObjectInner
result: Spinlock<Option<usize>>,
}
/// A handle to an Object (as passed to state updates)
#[derive(Clone)]
pub struct ObjectHandle
{
ptr: ::core::ptr::NonNull<ObjectInner>,
......
......@@ -75,7 +75,7 @@ impl ::device_manager::Driver for PCIChildBusDriver
let bridge_type = (read_word(addr, 3) >> 16) & 0x7F;
// 0x00 == Normal device, 0x01 = PCI-PCI Bridge
// -> There should only be one PCI bridge handler, but bind low just in case
if bridge_type == 0x01 { 1 } else {0 }
if bridge_type == 0x01 { 1 } else { 0 }
}
fn bind(&self, bus_dev: &mut ::device_manager::BusDevice) -> Box<::device_manager::DriverInstance+'static>
{
......@@ -151,7 +151,10 @@ impl ::device_manager::BusDevice for PCIDev
}
match parse_bar(self.addr, 4+block_id as u8)
{
BAR::None => ::device_manager::IOBinding::IO(0,0),
BAR::None => {
log_error!("PCI bind_io - Request for BAR{} of {:#x} which isn't populated", block_id, self.addr);
::device_manager::IOBinding::IO(0,0)
},
BAR::IO(b,s) => ::device_manager::IOBinding::IO(b,s),
BAR::Mem(base, size, _prefetchable) => {
// TODO: Ensure safety by preventing multiple bindings to a BAR
......@@ -241,6 +244,7 @@ fn parse_bar(addr: u16, word: u8) -> BAR
let value = read_word(addr, word);
if value == 0
{
log_debug!("parse_bar: None");
BAR::None
}
else if value & 1 == 0
......@@ -249,6 +253,7 @@ fn parse_bar(addr: u16, word: u8) -> BAR
let one_value = read_word(addr, word);
let size = !(one_value & 0xFFFF_FFF0) + 1;
write_word(addr, word, value);
log_debug!("parse_bar: (memory) one_value={:#x}, size={:#x}, value={:#x}", one_value, size, value);
// memory BAR
let pf = (value >> 3) & 1;
let ty = (value >> 1) & 3;
......@@ -276,7 +281,7 @@ fn parse_bar(addr: u16, word: u8) -> BAR
write_word(addr, word, 0xFFFF);
let one_value = read_word(addr, word);
let size = ( !(one_value & 0xFFFC) + 1 ) & 0xFFFF;
log_debug!("one_value = {:#x}, size={:#x}, value={:#x}", one_value, size, value);
log_debug!("parse_bar: (IO) one_value = {:#x}, size={:#x}, value={:#x}", one_value, size, value);
write_word(addr, word, value);
BAR::IO( (value & 0xFFFC) as u16, size as u16 )
}
......
......@@ -55,6 +55,15 @@ impl<T: ?Sized> Aref<T>
pub fn borrow(&self) -> ArefBorrow<T> {
self.__inner.borrow()
}
/// Obtain a mutable reference to the inner (if unique)
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if this.__inner.count.load(Ordering::SeqCst) == 0 {
Some(&mut this.__inner.data)
}
else {
None
}
}
}
impl<T: ?Sized> ops::Deref for Aref<T>
{
......
......@@ -13,6 +13,7 @@ pub use self::vec::Vec;
pub use self::sparse_vec::SparseVec;
pub use self::string::String;
pub use self::lazy_static::LazyStatic;
pub use self::vec_deque::VecDeque;
pub use self::pod::POD;
pub use self::pod::{as_byte_slice, as_byte_slice_mut};
......@@ -35,6 +36,7 @@ pub mod queue;
#[macro_use]
pub mod vec;
pub mod sparse_vec;
pub mod vec_deque;
#[macro_use]
pub mod string;
......
......@@ -14,6 +14,7 @@ impl<T> !POD for *const T {}
impl<T> !POD for *mut T {}
impl<'a, T> !POD for &'a T {}
impl<'a, T> !POD for &'a mut T {}
// TODO: Can there be an impl for the atomics?
pub fn as_byte_slice<T: ?Sized + POD>(s: &T) -> &[u8] {
// SAFE: Plain-old-data
......
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/lib/vec_deque.rs
//! Dynamic array backed dequeue
use memory::heap::ArrayAlloc;
pub struct VecDeque<T>
{
data: ArrayAlloc<T>,
ofs: usize,
len: usize,
}
impl<T> VecDeque<T>
{
pub const fn new_const() -> VecDeque<T> {
VecDeque {
data: ArrayAlloc::empty(),
ofs: 0,
len: 0,
}
}
fn reserve_cap(&mut self, size: usize) {
let usize_bits: u32 = (::core::mem::size_of::<usize>() * 8) as u32;
let newcap = ::lib::num::round_up(size, 1 << (usize_bits - size.leading_zeros()));
if newcap > self.data.count()
{
let orig_cap = self.data.count();
if self.data.expand(newcap)
{
// Copy any entries that were in the front of the list
let n_ents_before_end = orig_cap - self.ofs;
let space_before_end = self.data.count() - self.ofs;
if n_ents_before_end < self.len
{
let n_ents_to_move = self.len - n_ents_before_end;
// Move this many entries from the start of the allocation to the end
if space_before_end < self.len {
// Insufficient space in the newly allocated space to fit all of the tail, partial copy
let to_tail_count = space_before_end - orig_cap;
let shift_back_count = self.len - space_before_end;
// SAFE: Meh
unsafe {
::core::ptr::copy_nonoverlapping(self.data.get_ptr(0), self.data.get_ptr_mut(orig_cap), to_tail_count);
::core::ptr::copy(self.data.get_ptr(to_tail_count), self.data.get_ptr_mut(0), shift_back_count);
}
}
else {
// Contigious copy
// SAFE: Meh.
unsafe {
::core::ptr::copy_nonoverlapping(self.data.get_ptr(0), self.data.get_ptr_mut(orig_cap), n_ents_to_move);
}
}
}
}
else
{
// Allocate a new version
let mut new_alloc = ArrayAlloc::new(newcap);
if self.len > 0
{
if self.data.count() - self.ofs < self.len {
// Data is contigious
// SAFE: Copying valid data
unsafe {
::core::ptr::copy(self.data.get_ptr(self.ofs), new_alloc.get_ptr_mut(0), self.len);
}
}
else {
// Data is _not_ contigious
let seg1_len = self.data.count() - self.ofs;
let seg2_len = self.len - seg1_len;
// SAFE: Copying valid data
unsafe {
::core::ptr::copy(self.data.get_ptr(self.ofs), new_alloc.get_ptr_mut(0), seg1_len);
::core::ptr::copy(self.data.get_ptr(0), new_alloc.get_ptr_mut(seg1_len), seg2_len);
}
}
}
// New allocation: Offset is now zero
self.ofs = 0;
//log_debug!("self.data={:?}, new_alloc = {:?}", self.data, new_alloc);
self.data = new_alloc;
}
}
}
pub fn push_back(&mut self, v: T) {
let new_len = self.len + 1;
self.reserve_cap(new_len);
let pos = (self.ofs + self.len) % self.data.count();
// SAFE: Correct write
unsafe {
::core::ptr::write(self.data.get_ptr_mut(pos), v);
}
self.len += 1;
}
pub fn pop_front(&mut self) -> Option<T> {
if self.len == 0 {
None
}
else {
let pos = self.ofs;
self.len -= 1;
self.ofs = (self.ofs + 1) % self.data.count();
// SAFE: Correct read
unsafe {
Some( ::core::ptr::read(self.data.get_ptr(pos)) )
}
}
}
}
......@@ -10,6 +10,7 @@ pub use sync::mutex::Mutex;
pub use sync::semaphore::Semaphore;
pub use sync::rwlock::RwLock;
pub use sync::event_channel::EventChannel;
pub use self::queue::Queue;
pub use self::atomic::AtomicU32;
#[macro_use]
......@@ -20,6 +21,7 @@ pub mod semaphore;
pub mod rwlock;
pub mod event_channel;
pub mod queue;
pub mod atomic;
......
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/sync/queue.rs
//! Thread-safe (optionally unbounded) generic queue
#[allow(unused_imports)]
use prelude::*;
use sync::Spinlock;
use core::cell::UnsafeCell;
use sync::Mutex;
use lib::VecDeque;
pub struct Queue<T: Send>
{
lock: Spinlock<bool>,
// Separate from the lock because WaitQueue::wait() takes a bool lock
queue: UnsafeCell< ::threads::WaitQueue >,
data: Mutex<VecDeque<T>>,
}
unsafe impl<T: Send> Sync for Queue<T> {
}
unsafe impl<T: Send> Send for Queue<T> {
}
impl<T: Send> Queue<T>
{
pub const fn new_const() -> Self
{
Queue {
lock: Spinlock::new(false),
queue: UnsafeCell::new(::threads::WaitQueue::new()),
data: Mutex::new(VecDeque::new_const()),
}
}
pub fn push(&self, v: T) {
// 1. Push the value.
self.data.lock().push_back(v);
// 2. Set and signal
if let Some(mut lh) = self.lock.try_lock_cpu() {
*lh = true;
// SAFE: Locked by the above lock
unsafe {
(*self.queue.get()).wake_one();
}
}
else {
// TODO: How can this be handled? What will wake the CPU?
}
}
pub fn wait_pop(&self) -> T {
loop
{
// 1. Check for pending (in the queue)
if let Some(v) = self.data.lock().pop_front() {
return v;
}
// 2. if nothing, lock spinlock and check if the inner flag is set
let mut lh = self.lock.lock();
// - If set, loop
if *lh {
*lh = false;
continue ;
}
// - Else, sleep
else {
// SAFE: This lock controls this waiter.
unsafe {
(*self.queue.get()).wait(lh);
}
}
}
}
}
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment