mail archive of the barebox mailing list
 help / color / mirror / Atom feed
* [PATCH 00/11] ramfs: Use dynamically sized chunks
@ 2020-06-15  6:02 Sascha Hauer
  2020-06-15  6:02 ` [PATCH 01/11] update list.h from Linux-5.7 Sascha Hauer
                   ` (10 more replies)
  0 siblings, 11 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

So far ramfs uses equally sized chunks. This series changes it to use
dynamically sized chunks instead. When making a file bigger ramfs now
always tries to put the additional data in a single chunk. With this
we get fewer chunks and with it better performance as we have less list
iterations to do.
Also this allows us to provide a memmap implementation for ramfs which
works for a good bunch of cases. When a file is created, truncated to
its final size and then filled with data, the data will end up in one
contiguous memory region. In this case we can provide a pointer to this
data which is important for usecases where we would otherwise duplicate
big image files in memory.

Sascha

Sascha Hauer (11):
  update list.h from Linux-5.7
  fs: Add destroy_inode callbacks to filesystems
  fs: Make iput() accept NULL pointers
  fs: free inodes we no longer need
  digest: Drop usage of memmap
  fs: ramfs: Return -ENOSPC
  fs: ramfs: Drop dead code
  fs: ramfs: Use dynamically sized chunks
  fs: ramfs: Implement memmap
  libfile: copy_file: Fix calling discard_range
  libfile: copy_file: explicitly truncate to final size

 crypto/digest.c          |  27 +-
 fs/cramfs/cramfs.c       |  10 +
 fs/devfs.c               |   8 +
 fs/fs.c                  |   7 +-
 fs/nfs.c                 |   8 +
 fs/ramfs.c               | 349 ++++++++++---------
 fs/squashfs/squashfs.c   |  10 +-
 include/linux/compiler.h |   1 -
 include/linux/list.h     | 700 ++++++++++++++++++++++++++++++---------
 include/linux/poison.h   |  26 ++
 include/linux/types.h    |  12 +
 lib/libfile.c            |  25 +-
 12 files changed, 826 insertions(+), 357 deletions(-)
 create mode 100644 include/linux/poison.h

-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 01/11] update list.h from Linux-5.7
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-06-15  6:02 ` [PATCH 02/11] fs: Add destroy_inode callbacks to filesystems Sascha Hauer
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

This updates include/linux/list.h from Linux-5.7.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 include/linux/compiler.h |   1 -
 include/linux/list.h     | 700 ++++++++++++++++++++++++++++++---------
 include/linux/poison.h   |  26 ++
 include/linux/types.h    |  12 +
 4 files changed, 581 insertions(+), 158 deletions(-)
 create mode 100644 include/linux/poison.h

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f597c7abae..f61a458414 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -250,7 +250,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 		__read_once_size(&(x), __u.__c, sizeof(x));		\
 	else								\
 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
-	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
 	__u.__val;							\
 })
 #define READ_ONCE(x) __READ_ONCE(x, 1)
diff --git a/include/linux/list.h b/include/linux/list.h
index af5edc9a76..1341806b59 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -1,7 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _LINUX_LIST_H
 #define _LINUX_LIST_H
 
-#include <linux/stddef.h> /* for NULL */
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/poison.h>
+#include <linux/const.h>
 #include <linux/kernel.h>
 
 /*
@@ -14,46 +18,60 @@
  * using the generic single-entry routines.
  */
 
-#define LIST_POISON1  ((void *) 0x00100100)
-#define LIST_POISON2  ((void *) 0x00200200)
-static inline void prefetch(const void *x) {;}
-
-struct list_head {
-	struct list_head *next, *prev;
-};
-
 #define LIST_HEAD_INIT(name) { &(name), &(name) }
 
 #define LIST_HEAD(name) \
 	struct list_head name = LIST_HEAD_INIT(name)
 
+/**
+ * INIT_LIST_HEAD - Initialize a list_head structure
+ * @list: list_head structure to be initialized.
+ *
+ * Initializes the list_head to point to itself.  If it is a list header,
+ * the result is an empty list.
+ */
 static inline void INIT_LIST_HEAD(struct list_head *list)
 {
-	list->next = list;
+	WRITE_ONCE(list->next, list);
 	list->prev = list;
 }
 
+#ifdef CONFIG_DEBUG_LIST
+extern bool __list_add_valid(struct list_head *new,
+			      struct list_head *prev,
+			      struct list_head *next);
+extern bool __list_del_entry_valid(struct list_head *entry);
+#else
+static inline bool __list_add_valid(struct list_head *new,
+				struct list_head *prev,
+				struct list_head *next)
+{
+	return true;
+}
+static inline bool __list_del_entry_valid(struct list_head *entry)
+{
+	return true;
+}
+#endif
+
 /*
  * Insert a new entry between two known consecutive entries.
  *
  * This is only for internal list manipulation where we know
  * the prev/next entries already!
  */
-#ifndef CONFIG_DEBUG_LIST
 static inline void __list_add(struct list_head *new,
 			      struct list_head *prev,
 			      struct list_head *next)
 {
+	if (!__list_add_valid(new, prev, next))
+		return;
+
 	next->prev = new;
 	new->next = next;
 	new->prev = prev;
-	prev->next = new;
+	WRITE_ONCE(prev->next, new);
 }
-#else
-extern void __list_add(struct list_head *new,
-			      struct list_head *prev,
-			      struct list_head *next);
-#endif
 
 /**
  * list_add - add a new entry
@@ -63,14 +81,10 @@ extern void __list_add(struct list_head *new,
  * Insert a new entry after the specified head.
  * This is good for implementing stacks.
  */
-#ifndef CONFIG_DEBUG_LIST
 static inline void list_add(struct list_head *new, struct list_head *head)
 {
 	__list_add(new, head, head->next);
 }
-#else
-extern void list_add(struct list_head *new, struct list_head *head);
-#endif
 
 
 /**
@@ -96,7 +110,29 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
 static inline void __list_del(struct list_head * prev, struct list_head * next)
 {
 	next->prev = prev;
-	prev->next = next;
+	WRITE_ONCE(prev->next, next);
+}
+
+/*
+ * Delete a list entry and clear the 'prev' pointer.
+ *
+ * This is a special-purpose list clearing method used in the networking code
+ * for lists allocated as per-cpu, where we don't want to incur the extra
+ * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this
+ * needs to check the node 'prev' pointer instead of calling list_empty().
+ */
+static inline void __list_del_clearprev(struct list_head *entry)
+{
+	__list_del(entry->prev, entry->next);
+	entry->prev = NULL;
+}
+
+static inline void __list_del_entry(struct list_head *entry)
+{
+	if (!__list_del_entry_valid(entry))
+		return;
+
+	__list_del(entry->prev, entry->next);
 }
 
 /**
@@ -105,16 +141,12 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
  * Note: list_empty() on entry does not return true after this, the entry is
  * in an undefined state.
  */
-#ifndef CONFIG_DEBUG_LIST
 static inline void list_del(struct list_head *entry)
 {
-	__list_del(entry->prev, entry->next);
+	__list_del_entry(entry);
 	entry->next = LIST_POISON1;
 	entry->prev = LIST_POISON2;
 }
-#else
-extern void list_del(struct list_head *entry);
-#endif
 
 /**
  * list_replace - replace old entry by new one
@@ -132,20 +164,44 @@ static inline void list_replace(struct list_head *old,
 	new->prev->next = new;
 }
 
+/**
+ * list_replace_init - replace old entry by new one and initialize the old one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * If @old was empty, it will be overwritten.
+ */
 static inline void list_replace_init(struct list_head *old,
-					struct list_head *new)
+				     struct list_head *new)
 {
 	list_replace(old, new);
 	INIT_LIST_HEAD(old);
 }
 
+/**
+ * list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position
+ * @entry1: the location to place entry2
+ * @entry2: the location to place entry1
+ */
+static inline void list_swap(struct list_head *entry1,
+			     struct list_head *entry2)
+{
+	struct list_head *pos = entry2->prev;
+
+	list_del(entry2);
+	list_replace(entry1, entry2);
+	if (pos == entry1)
+		pos = entry2;
+	list_add(entry1, pos);
+}
+
 /**
  * list_del_init - deletes entry from list and reinitialize it.
  * @entry: the element to delete from the list.
  */
 static inline void list_del_init(struct list_head *entry)
 {
-	__list_del(entry->prev, entry->next);
+	__list_del_entry(entry);
 	INIT_LIST_HEAD(entry);
 }
 
@@ -156,7 +212,7 @@ static inline void list_del_init(struct list_head *entry)
  */
 static inline void list_move(struct list_head *list, struct list_head *head)
 {
-	__list_del(list->prev, list->next);
+	__list_del_entry(list);
 	list_add(list, head);
 }
 
@@ -168,10 +224,44 @@ static inline void list_move(struct list_head *list, struct list_head *head)
 static inline void list_move_tail(struct list_head *list,
 				  struct list_head *head)
 {
-	__list_del(list->prev, list->next);
+	__list_del_entry(list);
 	list_add_tail(list, head);
 }
 
+/**
+ * list_bulk_move_tail - move a subsection of a list to its tail
+ * @head: the head that will follow our entry
+ * @first: first entry to move
+ * @last: last entry to move, can be the same as first
+ *
+ * Move all entries between @first and including @last before @head.
+ * All three entries must belong to the same linked list.
+ */
+static inline void list_bulk_move_tail(struct list_head *head,
+				       struct list_head *first,
+				       struct list_head *last)
+{
+	first->prev->next = last->next;
+	last->next->prev = first->prev;
+
+	head->prev->next = first;
+	first->prev = head->prev;
+
+	last->next = head;
+	head->prev = last;
+}
+
+/**
+ * list_is_first -- tests whether @list is the first entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_first(const struct list_head *list,
+					const struct list_head *head)
+{
+	return list->prev == head;
+}
+
 /**
  * list_is_last - tests whether @list is the last entry in list @head
  * @list: the entry to test
@@ -189,7 +279,7 @@ static inline int list_is_last(const struct list_head *list,
  */
 static inline int list_empty(const struct list_head *head)
 {
-	return head->next == head;
+	return READ_ONCE(head->next) == head;
 }
 
 /**
@@ -211,6 +301,38 @@ static inline int list_empty_careful(const struct list_head *head)
 	return (next == head) && (next == head->prev);
 }
 
+/**
+ * list_rotate_left - rotate the list to the left
+ * @head: the head of the list
+ */
+static inline void list_rotate_left(struct list_head *head)
+{
+	struct list_head *first;
+
+	if (!list_empty(head)) {
+		first = head->next;
+		list_move_tail(first, head);
+	}
+}
+
+/**
+ * list_rotate_to_front() - Rotate list to specific item.
+ * @list: The desired new front of the list.
+ * @head: The head of the list.
+ *
+ * Rotates list so that @list becomes the new front of the list.
+ */
+static inline void list_rotate_to_front(struct list_head *list,
+					struct list_head *head)
+{
+	/*
+	 * Deletes the list head from the list denoted by @head and
+	 * places it as the tail of @list, this effectively rotates the
+	 * list so that @list is at the front.
+	 */
+	list_move_tail(head, list);
+}
+
 /**
  * list_is_singular - tests whether a list has just one entry.
  * @head: the list to test.
@@ -220,29 +342,112 @@ static inline int list_is_singular(const struct list_head *head)
 	return !list_empty(head) && (head->next == head->prev);
 }
 
-static inline void __list_splice(struct list_head *list,
-				 struct list_head *head)
+static inline void __list_cut_position(struct list_head *list,
+		struct list_head *head, struct list_head *entry)
+{
+	struct list_head *new_first = entry->next;
+	list->next = head->next;
+	list->next->prev = list;
+	list->prev = entry;
+	entry->next = list;
+	head->next = new_first;
+	new_first->prev = head;
+}
+
+/**
+ * list_cut_position - cut a list into two
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *	and if so we won't cut the list
+ *
+ * This helper moves the initial part of @head, up to and
+ * including @entry, from @head to @list. You should
+ * pass on @entry an element you know is on @head. @list
+ * should be an empty list or a list you do not care about
+ * losing its data.
+ *
+ */
+static inline void list_cut_position(struct list_head *list,
+		struct list_head *head, struct list_head *entry)
+{
+	if (list_empty(head))
+		return;
+	if (list_is_singular(head) &&
+		(head->next != entry && head != entry))
+		return;
+	if (entry == head)
+		INIT_LIST_HEAD(list);
+	else
+		__list_cut_position(list, head, entry);
+}
+
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list.  You should pass
+ * in @entry an element you know is on @head.  @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+				   struct list_head *head,
+				   struct list_head *entry)
+{
+	if (head->next == entry) {
+		INIT_LIST_HEAD(list);
+		return;
+	}
+	list->next = head->next;
+	list->next->prev = list;
+	list->prev = entry->prev;
+	list->prev->next = list;
+	head->next = entry;
+	entry->prev = head;
+}
+
+static inline void __list_splice(const struct list_head *list,
+				 struct list_head *prev,
+				 struct list_head *next)
 {
 	struct list_head *first = list->next;
 	struct list_head *last = list->prev;
-	struct list_head *at = head->next;
 
-	first->prev = head;
-	head->next = first;
+	first->prev = prev;
+	prev->next = first;
 
-	last->next = at;
-	at->prev = last;
+	last->next = next;
+	next->prev = last;
+}
+
+/**
+ * list_splice - join two lists, this is designed for stacks
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(const struct list_head *list,
+				struct list_head *head)
+{
+	if (!list_empty(list))
+		__list_splice(list, head, head->next);
 }
 
 /**
- * list_splice - join two lists
+ * list_splice_tail - join two lists, each list being a queue
  * @list: the new list to add.
  * @head: the place to add it in the first list.
  */
-static inline void list_splice(struct list_head *list, struct list_head *head)
+static inline void list_splice_tail(struct list_head *list,
+				struct list_head *head)
 {
 	if (!list_empty(list))
-		__list_splice(list, head);
+		__list_splice(list, head->prev, head);
 }
 
 /**
@@ -256,7 +461,24 @@ static inline void list_splice_init(struct list_head *list,
 				    struct list_head *head)
 {
 	if (!list_empty(list)) {
-		__list_splice(list, head);
+		__list_splice(list, head, head->next);
+		INIT_LIST_HEAD(list);
+	}
+}
+
+/**
+ * list_splice_tail_init - join two lists and reinitialise the emptied list
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * Each of the lists is a queue.
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_tail_init(struct list_head *list,
+					 struct list_head *head)
+{
+	if (!list_empty(list)) {
+		__list_splice(list, head->prev, head);
 		INIT_LIST_HEAD(list);
 	}
 }
@@ -265,7 +487,7 @@ static inline void list_splice_init(struct list_head *list,
  * list_entry - get the struct for this entry
  * @ptr:	the &struct list_head pointer.
  * @type:	the type of the struct this is embedded in.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  */
 #define list_entry(ptr, type, member) \
 	container_of(ptr, type, member)
@@ -274,7 +496,7 @@ static inline void list_splice_init(struct list_head *list,
  * list_first_entry - get the first element from a list
  * @ptr:	the list head to take the element from.
  * @type:	the type of the struct this is embedded in.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  *
  * Note, that list is expected to be not empty.
  */
@@ -283,25 +505,44 @@ static inline void list_splice_init(struct list_head *list,
 
 /**
  * list_last_entry - get the last element from a list
- * @head: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @ptr:	the list head to take the element from.
+ * @type:	the type of the struct this is embedded in.
+ * @member:	the name of the list_head within the struct.
  *
  * Note, that list is expected to be not empty.
  */
-#define list_last_entry(head, type, member) \
-	list_entry((head)->prev, type, member)
+#define list_last_entry(ptr, type, member) \
+	list_entry((ptr)->prev, type, member)
 
 /**
  * list_first_entry_or_null - get the first element from a list
  * @ptr:	the list head to take the element from.
  * @type:	the type of the struct this is embedded in.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  *
  * Note that if the list is empty, it returns NULL.
  */
-#define list_first_entry_or_null(ptr, type, member) \
-	(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+#define list_first_entry_or_null(ptr, type, member) ({ \
+	struct list_head *head__ = (ptr); \
+	struct list_head *pos__ = READ_ONCE(head__->next); \
+	pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
+})
+
+/**
+ * list_next_entry - get the next element in list
+ * @pos:	the type * to cursor
+ * @member:	the name of the list_head within the struct.
+ */
+#define list_next_entry(pos, member) \
+	list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * list_prev_entry - get the prev element in list
+ * @pos:	the type * to cursor
+ * @member:	the name of the list_head within the struct.
+ */
+#define list_prev_entry(pos, member) \
+	list_entry((pos)->member.prev, typeof(*(pos)), member)
 
 /**
  * list_for_each	-	iterate over a list
@@ -309,21 +550,17 @@ static inline void list_splice_init(struct list_head *list,
  * @head:	the head for your list.
  */
 #define list_for_each(pos, head) \
-	for (pos = (head)->next; prefetch(pos->next), pos != (head); \
-		pos = pos->next)
+	for (pos = (head)->next; pos != (head); pos = pos->next)
 
 /**
- * __list_for_each	-	iterate over a list
+ * list_for_each_continue - continue iteration over a list
  * @pos:	the &struct list_head to use as a loop cursor.
  * @head:	the head for your list.
  *
- * This variant differs from list_for_each() in that it's the
- * simplest possible list iteration code, no prefetching is done.
- * Use this for code that knows the list to be very short (empty
- * or 1 entry) most of the time.
+ * Continue to iterate over a list, continuing after the current position.
  */
-#define __list_for_each(pos, head) \
-	for (pos = (head)->next; pos != (head); pos = pos->next)
+#define list_for_each_continue(pos, head) \
+	for (pos = pos->next; pos != (head); pos = pos->next)
 
 /**
  * list_for_each_prev	-	iterate over a list backwards
@@ -331,8 +568,7 @@ static inline void list_splice_init(struct list_head *list,
  * @head:	the head for your list.
  */
 #define list_for_each_prev(pos, head) \
-	for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
-		pos = pos->prev)
+	for (pos = (head)->prev; pos != (head); pos = pos->prev)
 
 /**
  * list_for_each_safe - iterate over a list safe against removal of list entry
@@ -344,33 +580,44 @@ static inline void list_splice_init(struct list_head *list,
 	for (pos = (head)->next, n = pos->next; pos != (head); \
 		pos = n, n = pos->next)
 
+/**
+ * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
+ * @pos:	the &struct list_head to use as a loop cursor.
+ * @n:		another &struct list_head to use as temporary storage
+ * @head:	the head for your list.
+ */
+#define list_for_each_prev_safe(pos, n, head) \
+	for (pos = (head)->prev, n = pos->prev; \
+	     pos != (head); \
+	     pos = n, n = pos->prev)
+
 /**
  * list_for_each_entry	-	iterate over list of given type
  * @pos:	the type * to use as a loop cursor.
  * @head:	the head for your list.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  */
 #define list_for_each_entry(pos, head, member)				\
-	for (pos = list_entry((head)->next, typeof(*pos), member);	\
-	     prefetch(pos->member.next), &pos->member != (head); 	\
-	     pos = list_entry(pos->member.next, typeof(*pos), member))
+	for (pos = list_first_entry(head, typeof(*pos), member);	\
+	     &pos->member != (head);					\
+	     pos = list_next_entry(pos, member))
 
 /**
  * list_for_each_entry_reverse - iterate backwards over list of given type.
  * @pos:	the type * to use as a loop cursor.
  * @head:	the head for your list.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  */
 #define list_for_each_entry_reverse(pos, head, member)			\
-	for (pos = list_entry((head)->prev, typeof(*pos), member);	\
-	     prefetch(pos->member.prev), &pos->member != (head); 	\
-	     pos = list_entry(pos->member.prev, typeof(*pos), member))
+	for (pos = list_last_entry(head, typeof(*pos), member);		\
+	     &pos->member != (head); 					\
+	     pos = list_prev_entry(pos, member))
 
 /**
  * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
  * @pos:	the type * to use as a start point
  * @head:	the head of the list
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  *
  * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
  */
@@ -381,87 +628,129 @@ static inline void list_splice_init(struct list_head *list,
  * list_for_each_entry_continue - continue iteration over list of given type
  * @pos:	the type * to use as a loop cursor.
  * @head:	the head for your list.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  *
  * Continue to iterate over list of given type, continuing after
  * the current position.
  */
 #define list_for_each_entry_continue(pos, head, member) 		\
-	for (pos = list_entry(pos->member.next, typeof(*pos), member);	\
-	     prefetch(pos->member.next), &pos->member != (head);	\
-	     pos = list_entry(pos->member.next, typeof(*pos), member))
+	for (pos = list_next_entry(pos, member);			\
+	     &pos->member != (head);					\
+	     pos = list_next_entry(pos, member))
+
+/**
+ * list_for_each_entry_continue_reverse - iterate backwards from the given point
+ * @pos:	the type * to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the list_head within the struct.
+ *
+ * Start to iterate over list of given type backwards, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue_reverse(pos, head, member)		\
+	for (pos = list_prev_entry(pos, member);			\
+	     &pos->member != (head);					\
+	     pos = list_prev_entry(pos, member))
 
 /**
  * list_for_each_entry_from - iterate over list of given type from the current point
  * @pos:	the type * to use as a loop cursor.
  * @head:	the head for your list.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  *
  * Iterate over list of given type, continuing from current position.
  */
 #define list_for_each_entry_from(pos, head, member) 			\
-	for (; prefetch(pos->member.next), &pos->member != (head);	\
-	     pos = list_entry(pos->member.next, typeof(*pos), member))
+	for (; &pos->member != (head);					\
+	     pos = list_next_entry(pos, member))
+
+/**
+ * list_for_each_entry_from_reverse - iterate backwards over list of given type
+ *                                    from the current point
+ * @pos:	the type * to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, continuing from current position.
+ */
+#define list_for_each_entry_from_reverse(pos, head, member)		\
+	for (; &pos->member != (head);					\
+	     pos = list_prev_entry(pos, member))
 
 /**
  * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  * @pos:	the type * to use as a loop cursor.
  * @n:		another type * to use as temporary storage
  * @head:	the head for your list.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  */
 #define list_for_each_entry_safe(pos, n, head, member)			\
-	for (pos = list_entry((head)->next, typeof(*pos), member),	\
-		n = list_entry(pos->member.next, typeof(*pos), member);	\
+	for (pos = list_first_entry(head, typeof(*pos), member),	\
+		n = list_next_entry(pos, member);			\
 	     &pos->member != (head); 					\
-	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+	     pos = n, n = list_next_entry(n, member))
 
 /**
- * list_for_each_entry_safe_continue
+ * list_for_each_entry_safe_continue - continue list iteration safe against removal
  * @pos:	the type * to use as a loop cursor.
  * @n:		another type * to use as temporary storage
  * @head:	the head for your list.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  *
  * Iterate over list of given type, continuing after current point,
  * safe against removal of list entry.
  */
 #define list_for_each_entry_safe_continue(pos, n, head, member) 		\
-	for (pos = list_entry(pos->member.next, typeof(*pos), member), 		\
-		n = list_entry(pos->member.next, typeof(*pos), member);		\
+	for (pos = list_next_entry(pos, member), 				\
+		n = list_next_entry(pos, member);				\
 	     &pos->member != (head);						\
-	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+	     pos = n, n = list_next_entry(n, member))
 
 /**
- * list_for_each_entry_safe_from
+ * list_for_each_entry_safe_from - iterate over list from current point safe against removal
  * @pos:	the type * to use as a loop cursor.
  * @n:		another type * to use as temporary storage
  * @head:	the head for your list.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  *
  * Iterate over list of given type from current point, safe against
  * removal of list entry.
  */
 #define list_for_each_entry_safe_from(pos, n, head, member) 			\
-	for (n = list_entry(pos->member.next, typeof(*pos), member);		\
+	for (n = list_next_entry(pos, member);					\
 	     &pos->member != (head);						\
-	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+	     pos = n, n = list_next_entry(n, member))
 
 /**
- * list_for_each_entry_safe_reverse
+ * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
  * @pos:	the type * to use as a loop cursor.
  * @n:		another type * to use as temporary storage
  * @head:	the head for your list.
- * @member:	the name of the list_struct within the struct.
+ * @member:	the name of the list_head within the struct.
  *
  * Iterate backwards over list of given type, safe against removal
  * of list entry.
  */
 #define list_for_each_entry_safe_reverse(pos, n, head, member)		\
-	for (pos = list_entry((head)->prev, typeof(*pos), member),	\
-		n = list_entry(pos->member.prev, typeof(*pos), member);	\
+	for (pos = list_last_entry(head, typeof(*pos), member),		\
+		n = list_prev_entry(pos, member);			\
 	     &pos->member != (head); 					\
-	     pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+	     pos = n, n = list_prev_entry(n, member))
+
+/**
+ * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
+ * @pos:	the loop cursor used in the list_for_each_entry_safe loop
+ * @n:		temporary storage used in list_for_each_entry_safe
+ * @member:	the name of the list_head within the struct.
+ *
+ * list_safe_reset_next is not safe to use in general if the list may be
+ * modified concurrently (eg. the lock is dropped in the loop body). An
+ * exception to this is if the cursor element (pos) is pinned in the list,
+ * and list_safe_reset_next is called after re-taking the lock and before
+ * completing the current iteration of the loop body.
+ */
+#define list_safe_reset_next(pos, n, member)				\
+	n = list_next_entry(pos, member)
 
 /**
  * list_add_sort - add a new entry to a sorted list
@@ -473,7 +762,7 @@ static inline void list_splice_init(struct list_head *list,
  * This is useful for implementing queues.
  */
 static inline void list_add_sort(struct list_head *new, struct list_head *head,
-		int (*compare)(struct list_head *a, struct list_head *b))
+               int (*compare)(struct list_head *a, struct list_head *b))
 {
 	struct list_head *pos, *insert = head;
 
@@ -494,14 +783,6 @@ static inline void list_add_sort(struct list_head *new, struct list_head *head,
  * You lose the ability to access the tail in O(1).
  */
 
-struct hlist_head {
-	struct hlist_node *first;
-};
-
-struct hlist_node {
-	struct hlist_node *next, **pprev;
-};
-
 #define HLIST_HEAD_INIT { .first = NULL }
 #define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
@@ -511,25 +792,58 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h)
 	h->pprev = NULL;
 }
 
+/**
+ * hlist_unhashed - Has node been removed from list and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed
+ * state.  For example, hlist_nulls_del_init_rcu() does leave the
+ * node in unhashed state, but hlist_nulls_del() does not.
+ */
 static inline int hlist_unhashed(const struct hlist_node *h)
 {
 	return !h->pprev;
 }
 
+/**
+ * hlist_unhashed_lockless - Version of hlist_unhashed for lockless use
+ * @h: Node to be checked
+ *
+ * This variant of hlist_unhashed() must be used in lockless contexts
+ * to avoid potential load-tearing.  The READ_ONCE() is paired with the
+ * various WRITE_ONCE() in hlist helpers that are defined below.
+ */
+static inline int hlist_unhashed_lockless(const struct hlist_node *h)
+{
+	return !READ_ONCE(h->pprev);
+}
+
+/**
+ * hlist_empty - Is the specified hlist_head structure an empty hlist?
+ * @h: Structure to check.
+ */
 static inline int hlist_empty(const struct hlist_head *h)
 {
-	return !h->first;
+	return !READ_ONCE(h->first);
 }
 
 static inline void __hlist_del(struct hlist_node *n)
 {
 	struct hlist_node *next = n->next;
 	struct hlist_node **pprev = n->pprev;
-	*pprev = next;
+
+	WRITE_ONCE(*pprev, next);
 	if (next)
-		next->pprev = pprev;
+		WRITE_ONCE(next->pprev, pprev);
 }
 
+/**
+ * hlist_del - Delete the specified hlist_node from its list
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in hashed state.  Use
+ * hlist_del_init() or similar instead to unhash @n.
+ */
 static inline void hlist_del(struct hlist_node *n)
 {
 	__hlist_del(n);
@@ -537,6 +851,12 @@ static inline void hlist_del(struct hlist_node *n)
 	n->pprev = LIST_POISON2;
 }
 
+/**
+ * hlist_del_init - Delete the specified hlist_node from its list and initialize
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in unhashed state.
+ */
 static inline void hlist_del_init(struct hlist_node *n)
 {
 	if (!hlist_unhashed(n)) {
@@ -545,95 +865,161 @@ static inline void hlist_del_init(struct hlist_node *n)
 	}
 }
 
+/**
+ * hlist_add_head - add a new entry at the beginning of the hlist
+ * @n: new entry to be added
+ * @h: hlist head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
 {
 	struct hlist_node *first = h->first;
-	n->next = first;
+	WRITE_ONCE(n->next, first);
 	if (first)
-		first->pprev = &n->next;
-	h->first = n;
-	n->pprev = &h->first;
+		WRITE_ONCE(first->pprev, &n->next);
+	WRITE_ONCE(h->first, n);
+	WRITE_ONCE(n->pprev, &h->first);
 }
 
-/* next must be != NULL */
+/**
+ * hlist_add_before - add a new entry before the one specified
+ * @n: new entry to be added
+ * @next: hlist node to add it before, which must be non-NULL
+ */
 static inline void hlist_add_before(struct hlist_node *n,
-					struct hlist_node *next)
+				    struct hlist_node *next)
+{
+	WRITE_ONCE(n->pprev, next->pprev);
+	WRITE_ONCE(n->next, next);
+	WRITE_ONCE(next->pprev, &n->next);
+	WRITE_ONCE(*(n->pprev), n);
+}
+
+/**
+ * hlist_add_behing - add a new entry after the one specified
+ * @n: new entry to be added
+ * @prev: hlist node to add it after, which must be non-NULL
+ */
+static inline void hlist_add_behind(struct hlist_node *n,
+				    struct hlist_node *prev)
 {
-	n->pprev = next->pprev;
-	n->next = next;
-	next->pprev = &n->next;
-	*(n->pprev) = n;
+	WRITE_ONCE(n->next, prev->next);
+	WRITE_ONCE(prev->next, n);
+	WRITE_ONCE(n->pprev, &prev->next);
+
+	if (n->next)
+		WRITE_ONCE(n->next->pprev, &n->next);
+}
+
+/**
+ * hlist_add_fake - create a fake hlist consisting of a single headless node
+ * @n: Node to make a fake list out of
+ *
+ * This makes @n appear to be its own predecessor on a headless hlist.
+ * The point of this is to allow things like hlist_del() to work correctly
+ * in cases where there is no list.
+ */
+static inline void hlist_add_fake(struct hlist_node *n)
+{
+	n->pprev = &n->next;
 }
 
-static inline void hlist_add_after(struct hlist_node *n,
-					struct hlist_node *next)
+/**
+ * hlist_fake: Is this node a fake hlist?
+ * @h: Node to check for being a self-referential fake hlist.
+ */
+static inline bool hlist_fake(struct hlist_node *h)
+{
+	return h->pprev == &h->next;
+}
+
+/**
+ * hlist_is_singular_node - is node the only element of the specified hlist?
+ * @n: Node to check for singularity.
+ * @h: Header for potentially singular list.
+ *
+ * Check whether the node is the only node of the head without
+ * accessing head, thus avoiding unnecessary cache misses.
+ */
+static inline bool
+hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
 {
-	next->next = n->next;
-	n->next = next;
-	next->pprev = &n->next;
+	return !n->next && n->pprev == &h->first;
+}
 
-	if(next->next)
-		next->next->pprev  = &next->next;
+/**
+ * hlist_move_list - Move an hlist
+ * @old: hlist_head for old list.
+ * @new: hlist_head for new list.
+ *
+ * Move a list from one list head to another. Fixup the pprev
+ * reference of the first entry if it exists.
+ */
+static inline void hlist_move_list(struct hlist_head *old,
+				   struct hlist_head *new)
+{
+	new->first = old->first;
+	if (new->first)
+		new->first->pprev = &new->first;
+	old->first = NULL;
 }
 
 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
 
 #define hlist_for_each(pos, head) \
-	for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
-	     pos = pos->next)
+	for (pos = (head)->first; pos ; pos = pos->next)
 
 #define hlist_for_each_safe(pos, n, head) \
 	for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
 	     pos = n)
 
+#define hlist_entry_safe(ptr, type, member) \
+	({ typeof(ptr) ____ptr = (ptr); \
+	   ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+	})
+
 /**
  * hlist_for_each_entry	- iterate over list of given type
- * @tpos:	the type * to use as a loop cursor.
- * @pos:	the &struct hlist_node to use as a loop cursor.
+ * @pos:	the type * to use as a loop cursor.
  * @head:	the head for your list.
  * @member:	the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry(tpos, pos, head, member)			 \
-	for (pos = (head)->first;					 \
-	     pos && ({ prefetch(pos->next); 1;}) &&			 \
-		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
-	     pos = pos->next)
+#define hlist_for_each_entry(pos, head, member)				\
+	for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+	     pos;							\
+	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
- * @tpos:	the type * to use as a loop cursor.
- * @pos:	the &struct hlist_node to use as a loop cursor.
+ * @pos:	the type * to use as a loop cursor.
  * @member:	the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_continue(tpos, pos, member)		 \
-	for (pos = (pos)->next;						 \
-	     pos && ({ prefetch(pos->next); 1;}) &&			 \
-		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
-	     pos = pos->next)
+#define hlist_for_each_entry_continue(pos, member)			\
+	for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+	     pos;							\
+	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_from - iterate over a hlist continuing from current point
- * @tpos:	the type * to use as a loop cursor.
- * @pos:	the &struct hlist_node to use as a loop cursor.
+ * @pos:	the type * to use as a loop cursor.
  * @member:	the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_from(tpos, pos, member)			 \
-	for (; pos && ({ prefetch(pos->next); 1;}) &&			 \
-		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
-	     pos = pos->next)
+#define hlist_for_each_entry_from(pos, member)				\
+	for (; pos;							\
+	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @tpos:	the type * to use as a loop cursor.
- * @pos:	the &struct hlist_node to use as a loop cursor.
- * @n:		another &struct hlist_node to use as temporary storage
+ * @pos:	the type * to use as a loop cursor.
+ * @n:		a &struct hlist_node to use as temporary storage
  * @head:	the head for your list.
  * @member:	the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_safe(tpos, pos, n, head, member) 		 \
-	for (pos = (head)->first;					 \
-	     pos && ({ n = pos->next; 1; }) && 				 \
-		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
-	     pos = n)
+#define hlist_for_each_entry_safe(pos, n, head, member) 		\
+	for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+	     pos && ({ n = pos->member.next; 1; });			\
+	     pos = hlist_entry_safe(n, typeof(*pos), member))
 
 #endif
diff --git a/include/linux/poison.h b/include/linux/poison.h
new file mode 100644
index 0000000000..b92eb2f95b
--- /dev/null
+++ b/include/linux/poison.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_POISON_H
+#define _LINUX_POISON_H
+
+/********** include/linux/list.h **********/
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1  ((void *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2  ((void *) 0x122 + POISON_POINTER_DELTA)
+
+#endif
diff --git a/include/linux/types.h b/include/linux/types.h
index c19d1dc053..fa7684e31e 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -206,4 +206,16 @@ struct ustat {
 	char			f_fpack[6];
 };
 
+struct list_head {
+	struct list_head *next, *prev;
+};
+
+struct hlist_head {
+	struct hlist_node *first;
+};
+
+struct hlist_node {
+	struct hlist_node *next, **pprev;
+};
+
 #endif /* _LINUX_TYPES_H */
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 02/11] fs: Add destroy_inode callbacks to filesystems
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
  2020-06-15  6:02 ` [PATCH 01/11] update list.h from Linux-5.7 Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-06-15  6:02 ` [PATCH 03/11] fs: Make iput() accept NULL pointers Sascha Hauer
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

Several filesystems rely on the default function which frees
the struct inode * rather than the filesystem specific inode
which the inode is embedded in. This works because the inode
is the first element in the filesystem specific inode. Let's
not depend on this behaviour and for clarity add the destroy_inode
callbacks to all filesystems.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 fs/cramfs/cramfs.c     | 10 ++++++++++
 fs/devfs.c             |  8 ++++++++
 fs/nfs.c               |  8 ++++++++
 fs/ramfs.c             |  8 ++++++++
 fs/squashfs/squashfs.c | 10 +++++++++-
 5 files changed, 43 insertions(+), 1 deletion(-)

diff --git a/fs/cramfs/cramfs.c b/fs/cramfs/cramfs.c
index 99cbdb920c..3ea6bd437e 100644
--- a/fs/cramfs/cramfs.c
+++ b/fs/cramfs/cramfs.c
@@ -333,6 +333,15 @@ static struct inode *cramfs_alloc_inode(struct super_block *sb)
 	return &info->i_inode;
 }
 
+static void cramfs_destroy_inode(struct inode *inode)
+{
+	struct cramfs_inode_info *info;
+
+	info = to_cramfs_inode_info(inode);
+
+	free(info);
+}
+
 static int cramfs_iterate(struct file *file, struct dir_context *ctx)
 {
 	struct dentry *dentry = file->f_path.dentry;
@@ -427,6 +436,7 @@ static const struct inode_operations cramfs_symlink_inode_operations =
 
 static const struct super_operations cramfs_ops = {
 	.alloc_inode = cramfs_alloc_inode,
+	.destroy_inode = cramfs_destroy_inode,
 };
 
 static int cramfs_probe(struct device_d *dev)
diff --git a/fs/devfs.c b/fs/devfs.c
index b503f277ac..df229cca48 100644
--- a/fs/devfs.c
+++ b/fs/devfs.c
@@ -200,6 +200,13 @@ static struct inode *devfs_alloc_inode(struct super_block *sb)
 	return &node->inode;
 }
 
+static void devfs_destroy_inode(struct inode *inode)
+{
+	struct devfs_inode *node = container_of(inode, struct devfs_inode, inode);
+
+	free(node);
+}
+
 static int devfs_iterate(struct file *file, struct dir_context *ctx)
 {
 	struct cdev *cdev;
@@ -314,6 +321,7 @@ static const struct inode_operations devfs_dir_inode_operations =
 
 static const struct super_operations devfs_ops = {
 	.alloc_inode = devfs_alloc_inode,
+	.destroy_inode = devfs_destroy_inode,
 };
 
 static int devfs_probe(struct device_d *dev)
diff --git a/fs/nfs.c b/fs/nfs.c
index 15ddab7915..6c4637281d 100644
--- a/fs/nfs.c
+++ b/fs/nfs.c
@@ -1202,6 +1202,13 @@ static struct inode *nfs_alloc_inode(struct super_block *sb)
 	return &node->inode;
 }
 
+static void nfs_destroy_inode(struct inode *inode)
+{
+	struct nfs_inode *node = nfsi(inode);
+
+	free(node);
+}
+
 static const struct inode_operations nfs_file_inode_operations;
 static const struct file_operations nfs_dir_operations;
 static const struct inode_operations nfs_dir_inode_operations;
@@ -1273,6 +1280,7 @@ static const struct inode_operations nfs_dir_inode_operations =
 
 static const struct super_operations nfs_ops = {
 	.alloc_inode = nfs_alloc_inode,
+	.destroy_inode = nfs_destroy_inode,
 };
 
 static char *rootnfsopts;
diff --git a/fs/ramfs.c b/fs/ramfs.c
index 5328775ee0..341b6130de 100644
--- a/fs/ramfs.c
+++ b/fs/ramfs.c
@@ -396,8 +396,16 @@ static struct inode *ramfs_alloc_inode(struct super_block *sb)
 	return &node->inode;
 }
 
+static void ramfs_destroy_inode(struct inode *inode)
+{
+	struct ramfs_inode *node = to_ramfs_inode(inode);
+
+	free(node);
+}
+
 static const struct super_operations ramfs_ops = {
 	.alloc_inode = ramfs_alloc_inode,
+	.destroy_inode = ramfs_destroy_inode,
 };
 
 static int ramfs_probe(struct device_d *dev)
diff --git a/fs/squashfs/squashfs.c b/fs/squashfs/squashfs.c
index 38aff6d5b8..cb2d936ea4 100644
--- a/fs/squashfs/squashfs.c
+++ b/fs/squashfs/squashfs.c
@@ -76,8 +76,16 @@ static struct inode *squashfs_alloc_inode(struct super_block *sb)
 	return &node->vfs_inode;
 }
 
+static void squashfs_destroy_inode(struct inode *inode)
+{
+	struct squashfs_inode_info *node = squashfs_i(inode);
+
+	free(inode);
+}
+
 static const struct super_operations squashfs_super_ops = {
-        .alloc_inode = squashfs_alloc_inode,
+	.alloc_inode = squashfs_alloc_inode,
+	.destroy_inode = squashfs_destroy_inode,
 };
 
 static int squashfs_probe(struct device_d *dev)
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 03/11] fs: Make iput() accept NULL pointers
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
  2020-06-15  6:02 ` [PATCH 01/11] update list.h from Linux-5.7 Sascha Hauer
  2020-06-15  6:02 ` [PATCH 02/11] fs: Add destroy_inode callbacks to filesystems Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-06-15  6:02 ` [PATCH 04/11] fs: free inodes we no longer need Sascha Hauer
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

Let iput() accept NULL pointers so that users do not have to test for
it.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 fs/fs.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/fs/fs.c b/fs/fs.c
index d8389323aa..cecb3d70e0 100644
--- a/fs/fs.c
+++ b/fs/fs.c
@@ -1087,6 +1087,9 @@ void iget_failed(struct inode *inode)
 
 void iput(struct inode *inode)
 {
+	if (!inode)
+		return;
+
 	if (!inode->i_count)
 		return;
 
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 04/11] fs: free inodes we no longer need
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
                   ` (2 preceding siblings ...)
  2020-06-15  6:02 ` [PATCH 03/11] fs: Make iput() accept NULL pointers Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-08-03 22:02   ` Ahmad Fatoum
  2020-06-15  6:02 ` [PATCH 05/11] digest: Drop usage of memmap Sascha Hauer
                   ` (6 subsequent siblings)
  10 siblings, 1 reply; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

So far we freed the no longer needed inodes only at unmount time.
Let's trust our reference counting a bit more and free them once
the reference counter hits zero.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 fs/fs.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/fs/fs.c b/fs/fs.c
index cecb3d70e0..e04cadfe5d 100644
--- a/fs/fs.c
+++ b/fs/fs.c
@@ -1090,10 +1090,12 @@ void iput(struct inode *inode)
 	if (!inode)
 		return;
 
-	if (!inode->i_count)
-		return;
-
 	inode->i_count--;
+
+	if (!inode->i_count) {
+		list_del(&inode->i_sb_list);
+		destroy_inode(inode);
+	}
 }
 
 struct inode *iget(struct inode *inode)
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 05/11] digest: Drop usage of memmap
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
                   ` (3 preceding siblings ...)
  2020-06-15  6:02 ` [PATCH 04/11] fs: free inodes we no longer need Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-06-15  6:02 ` [PATCH 06/11] fs: ramfs: Return -ENOSPC Sascha Hauer
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

digest_file_window() first tries to memmap the file before it falls back
to reading it. This is quite unnecessary, we can just always read.

Moreover, memmapping a file has problems with the current code. A
"md5sum foo" result in the filesize argument being MAX_LFS_FILESIZE.
This is fine for files where the file is just read up to the end in
this case, but for memmapped buffers this results in digesting
MAX_LFS_FILESIZE bytes which is wrong. This problem is not apparent
at the moment as there are only a few files which are memmappable,
and on these (/dev/mem, /dev/ram0) digest commands are normally
called with an explicit size argument. This changes once ramfs starts
supporting memmap, so better drop memmapping in the digest code now.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 crypto/digest.c | 27 +--------------------------
 1 file changed, 1 insertion(+), 26 deletions(-)

diff --git a/crypto/digest.c b/crypto/digest.c
index e67e8fba0d..d23245e15f 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -253,32 +253,12 @@ out_free:
 	return ret;
 }
 
-static int digest_update_from_memory(struct digest *d,
-				     const unsigned char *buf,
-				     loff_t size)
-{
-	while (size) {
-		unsigned long now = min_t(typeof(size), PAGE_SIZE, size);
-		int ret;
-
-		ret = digest_update_interruptible(d, buf, now);
-		if (ret)
-			return ret;
-
-		size -= now;
-		buf  += now;
-	}
-
-	return 0;
-}
-
 int digest_file_window(struct digest *d, const char *filename,
 		       unsigned char *hash,
 		       const unsigned char *sig,
 		       loff_t start, loff_t size)
 {
 	int fd, ret;
-	unsigned char *buf;
 
 	ret = digest_init(d);
 	if (ret)
@@ -290,12 +270,7 @@ int digest_file_window(struct digest *d, const char *filename,
 		return -errno;
 	}
 
-	buf = memmap(fd, PROT_READ);
-	if (buf == MAP_FAILED)
-		ret = digest_update_from_fd(d, fd, start, size);
-	else
-		ret = digest_update_from_memory(d, buf + start, size);
-
+	ret = digest_update_from_fd(d, fd, start, size);
 	if (ret)
 		goto out;
 
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 06/11] fs: ramfs: Return -ENOSPC
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
                   ` (4 preceding siblings ...)
  2020-06-15  6:02 ` [PATCH 05/11] digest: Drop usage of memmap Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-06-15  6:02 ` [PATCH 07/11] fs: ramfs: Drop dead code Sascha Hauer
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

When ramfs fails to allocate more memory then returning -ENOSPC is
more appropriate then -ENOMEM.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 fs/ramfs.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/fs/ramfs.c b/fs/ramfs.c
index 341b6130de..800b03af29 100644
--- a/fs/ramfs.c
+++ b/fs/ramfs.c
@@ -367,7 +367,7 @@ static int ramfs_truncate(struct device_d *dev, FILE *f, loff_t size)
 		} else {
 			node->data = ramfs_get_chunk();
 			if (!node->data)
-				return -ENOMEM;
+				return -ENOSPC;
 			data = node->data;
 			oldchunks = 1;
 		}
@@ -378,7 +378,7 @@ static int ramfs_truncate(struct device_d *dev, FILE *f, loff_t size)
 		while (newchunks > oldchunks) {
 			data->next = ramfs_get_chunk();
 			if (!data->next)
-				return -ENOMEM;
+				return -ENOSPC;
 			data = data->next;
 			oldchunks++;
 		}
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 07/11] fs: ramfs: Drop dead code
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
                   ` (5 preceding siblings ...)
  2020-06-15  6:02 ` [PATCH 06/11] fs: ramfs: Return -ENOSPC Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-06-15  6:02 ` [PATCH 08/11] fs: ramfs: Use dynamically sized chunks Sascha Hauer
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 fs/ramfs.c | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/fs/ramfs.c b/fs/ramfs.c
index 800b03af29..2b6df07996 100644
--- a/fs/ramfs.c
+++ b/fs/ramfs.c
@@ -34,9 +34,6 @@ struct ramfs_chunk {
 struct ramfs_inode {
 	struct inode inode;
 	char *name;
-	struct ramfs_inode *parent;
-	struct ramfs_inode *next;
-	struct ramfs_inode *child;
 	char *symlink;
 	ulong mode;
 
@@ -53,10 +50,6 @@ static inline struct ramfs_inode *to_ramfs_inode(struct inode *inode)
 	return container_of(inode, struct ramfs_inode, inode);
 }
 
-struct ramfs_priv {
-	struct ramfs_inode root;
-};
-
 /* ---------------------------------------------------------------*/
 
 static const struct super_operations ramfs_ops;
@@ -411,16 +404,9 @@ static const struct super_operations ramfs_ops = {
 static int ramfs_probe(struct device_d *dev)
 {
 	struct inode *inode;
-	struct ramfs_priv *priv = xzalloc(sizeof(struct ramfs_priv));
 	struct fs_device_d *fsdev = dev_to_fs_device(dev);
 	struct super_block *sb = &fsdev->sb;
 
-	dev->priv = priv;
-
-	priv->root.name = "/";
-	priv->root.mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO;
-	priv->root.parent = &priv->root;
-
 	sb->s_op = &ramfs_ops;
 
 	inode = ramfs_get_inode(sb, NULL, S_IFDIR);
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 08/11] fs: ramfs: Use dynamically sized chunks
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
                   ` (6 preceding siblings ...)
  2020-06-15  6:02 ` [PATCH 07/11] fs: ramfs: Drop dead code Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-07-02 14:28   ` Ahmad Fatoum
  2020-06-15  6:02 ` [PATCH 09/11] fs: ramfs: Implement memmap Sascha Hauer
                   ` (2 subsequent siblings)
  10 siblings, 1 reply; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

This changes the way ramfs stores its data. So far we used equally sized
chunks, this patch changes it to use chunks in a size that fits our
needs. The chunks are always allocated in the size they are needed for
the current truncation. Only if we fail to allocate all desired memory
at once we fall back to allocating smaller chunks. Together with using
the generic list implementation this results in smaller code and has
the advantage that many image files end up being contiguously in memory
and thus we can provide a memmap for them. Files will end up
contiguously in memory when they are first created, then truncated to
the final size and then filled up with data. This is something which
is normally easily achievable when desired.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 fs/ramfs.c | 307 +++++++++++++++++++++++++++--------------------------
 1 file changed, 159 insertions(+), 148 deletions(-)

diff --git a/fs/ramfs.c b/fs/ramfs.c
index 2b6df07996..ebe03de736 100644
--- a/fs/ramfs.c
+++ b/fs/ramfs.c
@@ -23,12 +23,15 @@
 #include <errno.h>
 #include <linux/stat.h>
 #include <xfuncs.h>
+#include <linux/sizes.h>
 
 #define CHUNK_SIZE	(4096 * 2)
 
 struct ramfs_chunk {
 	char *data;
-	struct ramfs_chunk *next;
+	unsigned long ofs;
+	int size;
+	struct list_head list;
 };
 
 struct ramfs_inode {
@@ -37,12 +40,14 @@ struct ramfs_inode {
 	char *symlink;
 	ulong mode;
 
-	ulong size;
-	struct ramfs_chunk *data;
+	/* bytes used in this inode */
+	unsigned long size;
+	/* bytes currently allocated for this inode */
+	unsigned long alloc_size;
+
+	struct list_head data;
 
-	/* Points to recently used chunk */
-	int recent_chunk;
-	struct ramfs_chunk *recent_chunkp;
+	struct ramfs_chunk *current_chunk;
 };
 
 static inline struct ramfs_inode *to_ramfs_inode(struct inode *inode)
@@ -89,18 +94,25 @@ static struct inode *ramfs_get_inode(struct super_block *sb, const struct inode
 	return inode;
 }
 
-static struct ramfs_chunk *ramfs_get_chunk(void)
+#define MIN_SIZE SZ_8K
+
+static struct ramfs_chunk *ramfs_get_chunk(unsigned long size)
 {
 	struct ramfs_chunk *data = malloc(sizeof(struct ramfs_chunk));
+
 	if (!data)
 		return NULL;
 
-	data->data = calloc(CHUNK_SIZE, 1);
+	if (size < MIN_SIZE)
+		size = MIN_SIZE;
+
+	data->data = calloc(size, 1);
 	if (!data->data) {
 		free(data);
 		return NULL;
 	}
-	data->next = NULL;
+
+	data->size = size;
 
 	return data;
 }
@@ -160,23 +172,6 @@ static int ramfs_symlink(struct inode *dir, struct dentry *dentry,
 
 static int ramfs_unlink(struct inode *dir, struct dentry *dentry)
 {
-	struct inode *inode = d_inode(dentry);
-
-	if (inode) {
-		struct ramfs_inode *node = to_ramfs_inode(inode);
-		struct ramfs_chunk *chunk = node->data;
-
-		node->data = NULL;
-
-		while (chunk) {
-			struct ramfs_chunk *tmp = chunk;
-
-			chunk = chunk->next;
-
-			ramfs_put_chunk(tmp);
-		}
-	}
-
 	return simple_unlink(dir, dentry);
 }
 
@@ -200,80 +195,57 @@ static const struct inode_operations ramfs_dir_inode_operations =
 	.create = ramfs_create,
 };
 
-static struct ramfs_chunk *ramfs_find_chunk(struct ramfs_inode *node, int chunk)
+static struct ramfs_chunk *ramfs_find_chunk(struct ramfs_inode *node,
+					    unsigned long pos, int *ofs, int *len)
 {
-	struct ramfs_chunk *data;
-	int left = chunk;
+	struct ramfs_chunk *data, *cur = node->current_chunk;
 
-	if (chunk == 0)
-		return node->data;
+	if (cur && pos >= cur->ofs)
+		data = cur;
+	else
+		data = list_first_entry(&node->data, struct ramfs_chunk, list);
 
-	if (node->recent_chunk == chunk)
-		return node->recent_chunkp;
+	list_for_each_entry_from(data, &node->data, list) {
+		if (data->ofs + data->size > pos) {
+			*ofs = pos - data->ofs;
+			*len = data->ofs + data->size - pos;
 
-	if (node->recent_chunk < chunk && node->recent_chunk != 0) {
-		/* Start at last known chunk */
-		data = node->recent_chunkp;
-		left -= node->recent_chunk;
-	} else {
-		/* Start at first chunk */
-		data = node->data;
-	}
+			node->current_chunk = data;
 
-	while (left--)
-		data = data->next;
+			return data;
+		}
+	}
 
-	node->recent_chunkp = data;
-	node->recent_chunk = chunk;
+	pr_err("%s: no chunk for pos %ld found\n", __func__, pos);
 
-	return data;
+	return NULL;
 }
 
 static int ramfs_read(struct device_d *_dev, FILE *f, void *buf, size_t insize)
 {
 	struct inode *inode = f->f_inode;
 	struct ramfs_inode *node = to_ramfs_inode(inode);
-	int chunk;
 	struct ramfs_chunk *data;
-	int ofs;
-	int now;
-	int pos = f->pos;
+	int ofs, len, now;
+	unsigned long pos = f->pos;
 	int size = insize;
 
-	chunk = pos / CHUNK_SIZE;
-	debug("%s: reading from chunk %d\n", __FUNCTION__, chunk);
+	debug("%s: %p %d @ %lld\n", __func__, node, insize, f->pos);
+
+	while (size) {
+		data = ramfs_find_chunk(node, pos, &ofs, &len);
+		if (!data)
+			return -EINVAL;
 
-	/* Position ourself in stream */
-	data = ramfs_find_chunk(node, chunk);
-	ofs = pos % CHUNK_SIZE;
+		debug("%s: pos: %ld ofs: %d len: %d\n", __func__, pos, ofs, len);
+
+		now = min(size, len);
 
-	/* Read till end of current chunk */
-	if (ofs) {
-		now = min(size, CHUNK_SIZE - ofs);
-		debug("Reading till end of node. size: %d\n", size);
 		memcpy(buf, data->data + ofs, now);
+
 		size -= now;
-		pos += now;
 		buf += now;
-		if (pos > node->size)
-			node->size = now;
-		data = data->next;
-	}
-
-	/* Do full chunks */
-	while (size >= CHUNK_SIZE) {
-		debug("do full chunk. size: %d\n", size);
-		memcpy(buf, data->data, CHUNK_SIZE);
-		data = data->next;
-		size -= CHUNK_SIZE;
-		pos += CHUNK_SIZE;
-		buf += CHUNK_SIZE;
-	}
-
-	/* And the rest */
-	if (size) {
-		debug("do rest. size: %d\n", size);
-		memcpy(buf, data->data, size);
+		pos += now;
 	}
 
 	return insize;
@@ -283,100 +255,135 @@ static int ramfs_write(struct device_d *_dev, FILE *f, const void *buf, size_t i
 {
 	struct inode *inode = f->f_inode;
 	struct ramfs_inode *node = to_ramfs_inode(inode);
-	int chunk;
 	struct ramfs_chunk *data;
-	int ofs;
-	int now;
-	int pos = f->pos;
+	int ofs, len, now;
+	unsigned long pos = f->pos;
 	int size = insize;
 
-	chunk = f->pos / CHUNK_SIZE;
-	debug("%s: writing to chunk %d\n", __FUNCTION__, chunk);
+	debug("%s: %p %d @ %lld\n", __func__, node, insize, f->pos);
+
+	while (size) {
+		data = ramfs_find_chunk(node, pos, &ofs, &len);
+		if (!data)
+			return -EINVAL;
+
+		debug("%s: pos: %ld ofs: %d len: %d\n", __func__, pos, ofs, len);
 
-	/* Position ourself in stream */
-	data = ramfs_find_chunk(node, chunk);
-	ofs = f->pos % CHUNK_SIZE;
+		now = min(size, len);
 
-	/* Write till end of current chunk */
-	if (ofs) {
-		now = min(size, CHUNK_SIZE - ofs);
-		debug("writing till end of node. size: %d\n", size);
 		memcpy(data->data + ofs, buf, now);
+
 		size -= now;
-		pos += now;
 		buf += now;
-		if (pos > node->size)
-			node->size = now;
-		data = data->next;
+		pos += now;
+	}
+
+	return insize;
+}
+
+static void ramfs_truncate_down(struct ramfs_inode *node, unsigned long size)
+{
+	struct ramfs_chunk *data, *tmp;
+
+	list_for_each_entry_safe(data, tmp, &node->data, list) {
+		if (data->ofs >= size) {
+			list_del(&data->list);
+			node->alloc_size -= data->size;
+			ramfs_put_chunk(data);
+		}
 	}
 
-	/* Do full chunks */
-	while (size >= CHUNK_SIZE) {
-		debug("do full chunk. size: %d\n", size);
-		memcpy(data->data, buf, CHUNK_SIZE);
-		data = data->next;
-		size -= CHUNK_SIZE;
-		pos += CHUNK_SIZE;
-		buf += CHUNK_SIZE;
+	node->current_chunk = NULL;
+}
+
+static int ramfs_truncate_up(struct ramfs_inode *node, unsigned long size)
+{
+	struct ramfs_chunk *data, *tmp;
+	LIST_HEAD(list);
+	unsigned long add = size - node->alloc_size;
+	unsigned long chunksize = add;
+	unsigned long alloc_size = 0;
+
+	if (node->alloc_size >= size)
+		return 0;
+
+	/*
+	 * We first try to allocate all space we need in a single chunk.
+	 * This may fail because of fragmented memory, so in case we cannot
+	 * allocate memory we successively decrease the chunk size until
+	 * we have enough allocations made.
+	 */
+	while (1) {
+		unsigned long now = min(chunksize, add);
+
+		data = ramfs_get_chunk(now);
+		if (!data) {
+			/* No luck, try with smaller chunk size */
+			chunksize >>= 1;
+
+			/* If we do not have even 128KiB then go out */
+			if (chunksize < SZ_128K)
+				goto out;
+
+			continue;
+		}
+
+		data->ofs = node->alloc_size + alloc_size;
+
+		alloc_size += data->size;
+
+		list_add_tail(&data->list, &list);
+
+		if (add <= data->size)
+			break;
+
+		add -= data->size;
 	}
 
-	/* And the rest */
-	if (size) {
-		debug("do rest. size: %d\n", size);
-		memcpy(data->data, buf, size);
+	list_splice_tail(&list, &node->data);
+
+	node->alloc_size += alloc_size;
+
+	return 0;
+
+out:
+	list_for_each_entry_safe(data, tmp, &list, list) {
+		list_del(&data->list);
+		ramfs_put_chunk(data);
 	}
 
-	return insize;
+	return -ENOSPC;
 }
 
 static int ramfs_truncate(struct device_d *dev, FILE *f, loff_t size)
 {
 	struct inode *inode = f->f_inode;
 	struct ramfs_inode *node = to_ramfs_inode(inode);
-	int oldchunks, newchunks;
-	struct ramfs_chunk *data = node->data;
-
-	newchunks = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
-	oldchunks = (node->size + CHUNK_SIZE - 1) / CHUNK_SIZE;
-
-	if (newchunks < oldchunks) {
-		if (!newchunks)
-			node->data = NULL;
-		while (newchunks--)
-			data = data->next;
-		while (data) {
-			struct ramfs_chunk *tmp;
-			tmp = data->next;
-			ramfs_put_chunk(data);
-			data = tmp;
-		}
-		if (node->recent_chunk > newchunks)
-			node->recent_chunk = 0;
-	}
+	int ret;
 
-	if (newchunks > oldchunks) {
-		if (data) {
-			data = ramfs_find_chunk(node, oldchunks - 1);
-		} else {
-			node->data = ramfs_get_chunk();
-			if (!node->data)
-				return -ENOSPC;
-			data = node->data;
-			oldchunks = 1;
-		}
+	/*
+	 * This is a malloc based filesystem, no need to support more
+	 * memory than fits into unsigned long.
+	 */
+	if (size > ULONG_MAX)
+		return -ENOSPC;
 
-		while (data->next)
-			data = data->next;
+	debug("%s: %p cur: %ld new: %lld alloc: %ld\n", __func__, node,
+	       node->size, size, node->alloc_size);
 
-		while (newchunks > oldchunks) {
-			data->next = ramfs_get_chunk();
-			if (!data->next)
-				return -ENOSPC;
-			data = data->next;
-			oldchunks++;
-		}
+	if (size == node->size)
+		return 0;
+
+	if (size < node->size) {
+		ramfs_truncate_down(node, size);
+	} else {
+		ret = ramfs_truncate_up(node, size);
+		if (ret)
+			return ret;
 	}
+
 	node->size = size;
+
 	return 0;
 }
 
@@ -386,6 +393,8 @@ static struct inode *ramfs_alloc_inode(struct super_block *sb)
 
 	node = xzalloc(sizeof(*node));
 
+	INIT_LIST_HEAD(&node->data);
+
 	return &node->inode;
 }
 
@@ -393,6 +402,8 @@ static void ramfs_destroy_inode(struct inode *inode)
 {
 	struct ramfs_inode *node = to_ramfs_inode(inode);
 
+	ramfs_truncate_down(node, 0);
+
 	free(node);
 }
 
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 09/11] fs: ramfs: Implement memmap
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
                   ` (7 preceding siblings ...)
  2020-06-15  6:02 ` [PATCH 08/11] fs: ramfs: Use dynamically sized chunks Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-06-15  6:02 ` [PATCH 10/11] libfile: copy_file: Fix calling discard_range Sascha Hauer
  2020-06-15  6:02 ` [PATCH 11/11] libfile: copy_file: explicitly truncate to final size Sascha Hauer
  10 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

When an inode only has a single chunk then we can support memmap for
it.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 fs/ramfs.c | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/fs/ramfs.c b/fs/ramfs.c
index ebe03de736..84aa56f83e 100644
--- a/fs/ramfs.c
+++ b/fs/ramfs.c
@@ -387,6 +387,25 @@ static int ramfs_truncate(struct device_d *dev, FILE *f, loff_t size)
 	return 0;
 }
 
+static int ramfs_memmap(struct device_d *_dev, FILE *f, void **map, int flags)
+{
+	struct inode *inode = f->f_inode;
+	struct ramfs_inode *node = to_ramfs_inode(inode);
+	struct ramfs_chunk *data;
+
+	if (list_empty(&node->data))
+		return -EINVAL;
+
+	if (!list_is_singular(&node->data))
+		return -EINVAL;
+
+	data = list_first_entry(&node->data, struct ramfs_chunk, list);
+
+	*map = data->data;
+
+	return 0;
+}
+
 static struct inode *ramfs_alloc_inode(struct super_block *sb)
 {
 	struct ramfs_inode *node;
@@ -434,6 +453,7 @@ static void ramfs_remove(struct device_d *dev)
 static struct fs_driver_d ramfs_driver = {
 	.read      = ramfs_read,
 	.write     = ramfs_write,
+	.memmap    = ramfs_memmap,
 	.truncate  = ramfs_truncate,
 	.flags     = FS_DRIVER_NO_DEV,
 	.drv = {
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 10/11] libfile: copy_file: Fix calling discard_range
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
                   ` (8 preceding siblings ...)
  2020-06-15  6:02 ` [PATCH 09/11] fs: ramfs: Implement memmap Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  2020-06-15  6:02 ` [PATCH 11/11] libfile: copy_file: explicitly truncate to final size Sascha Hauer
  10 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

discard range is called with unitialized size argument. Call it after
stat() on the source filedescriptor. Also there's no point in continuing
when stat() on the source filedescriptor fails, so bail out in this
case.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 lib/libfile.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/lib/libfile.c b/lib/libfile.c
index b4d87b624a..9de938b2d3 100644
--- a/lib/libfile.c
+++ b/lib/libfile.c
@@ -364,14 +364,15 @@ int copy_file(const char *src, const char *dst, int verbose)
 		goto out;
 	}
 
-	discard_range(dstfd, srcstat.st_size, 0);
+	ret = stat(src, &srcstat);
+	if (ret)
+		goto out;
 
-	if (verbose) {
-		if (stat(src, &srcstat) < 0)
-			srcstat.st_size = 0;
+	if (srcstat.st_size != FILESIZE_MAX)
+		discard_range(dstfd, srcstat.st_size, 0);
 
+	if (verbose)
 		init_progression_bar(srcstat.st_size);
-	}
 
 	while (1) {
 		r = read(srcfd, rw_buf, RW_BUF_SIZE);
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 11/11] libfile: copy_file: explicitly truncate to final size
  2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
                   ` (9 preceding siblings ...)
  2020-06-15  6:02 ` [PATCH 10/11] libfile: copy_file: Fix calling discard_range Sascha Hauer
@ 2020-06-15  6:02 ` Sascha Hauer
  10 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-06-15  6:02 UTC (permalink / raw)
  To: Barebox List

When possible truncate the destination file to the final size
explicitly. This allows for example ramfs to put the resulting
file contiguously into memory.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 lib/libfile.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/lib/libfile.c b/lib/libfile.c
index 9de938b2d3..863b6833a5 100644
--- a/lib/libfile.c
+++ b/lib/libfile.c
@@ -353,10 +353,6 @@ int copy_file(const char *src, const char *dst, int verbose)
 		goto out;
 	}
 
-	/* Set O_TRUNC only if file exist and is a regular file */
-	if (!s && S_ISREG(dststat.st_mode))
-		mode |= O_TRUNC;
-
 	dstfd = open(dst, mode);
 	if (dstfd < 0) {
 		printf("could not open %s: %s\n", dst, errno_str());
@@ -364,12 +360,22 @@ int copy_file(const char *src, const char *dst, int verbose)
 		goto out;
 	}
 
+	ret = ftruncate(dstfd, 0);
+	if (ret)
+		goto out;
+
 	ret = stat(src, &srcstat);
 	if (ret)
 		goto out;
 
-	if (srcstat.st_size != FILESIZE_MAX)
+	if (srcstat.st_size != FILESIZE_MAX) {
 		discard_range(dstfd, srcstat.st_size, 0);
+		if (S_ISREG(dststat.st_mode)) {
+			ret = ftruncate(dstfd, srcstat.st_size);
+			if (ret)
+				goto out;
+		}
+	}
 
 	if (verbose)
 		init_progression_bar(srcstat.st_size);
-- 
2.27.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 08/11] fs: ramfs: Use dynamically sized chunks
  2020-06-15  6:02 ` [PATCH 08/11] fs: ramfs: Use dynamically sized chunks Sascha Hauer
@ 2020-07-02 14:28   ` Ahmad Fatoum
  2020-07-05 14:14     ` Sascha Hauer
  0 siblings, 1 reply; 16+ messages in thread
From: Ahmad Fatoum @ 2020-07-02 14:28 UTC (permalink / raw)
  To: Sascha Hauer, Barebox List



On 6/15/20 8:02 AM, Sascha Hauer wrote:
> This changes the way ramfs stores its data. So far we used equally sized
> chunks, this patch changes it to use chunks in a size that fits our
> needs. The chunks are always allocated in the size they are needed for
> the current truncation. Only if we fail to allocate all desired memory
> at once we fall back to allocating smaller chunks. Together with using
> the generic list implementation this results in smaller code and has
> the advantage that many image files end up being contiguously in memory
> and thus we can provide a memmap for them. Files will end up
> contiguously in memory when they are first created, then truncated to
> the final size and then filled up with data. This is something which
> is normally easily achievable when desired.
> 
> Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
> ---
>  fs/ramfs.c | 307 +++++++++++++++++++++++++++--------------------------
>  1 file changed, 159 insertions(+), 148 deletions(-)
> 
> diff --git a/fs/ramfs.c b/fs/ramfs.c
> index 2b6df07996..ebe03de736 100644
> --- a/fs/ramfs.c
> +++ b/fs/ramfs.c
> @@ -23,12 +23,15 @@
>  #include <errno.h>
>  #include <linux/stat.h>
>  #include <xfuncs.h>
> +#include <linux/sizes.h>
>  
>  #define CHUNK_SIZE	(4096 * 2)
>  
>  struct ramfs_chunk {
>  	char *data;
> -	struct ramfs_chunk *next;
> +	unsigned long ofs;
> +	int size;
> +	struct list_head list;
>  };
>  
>  struct ramfs_inode {
> @@ -37,12 +40,14 @@ struct ramfs_inode {
>  	char *symlink;
>  	ulong mode;
>  
> -	ulong size;
> -	struct ramfs_chunk *data;
> +	/* bytes used in this inode */
> +	unsigned long size;
> +	/* bytes currently allocated for this inode */
> +	unsigned long alloc_size;
> +
> +	struct list_head data;
>  
> -	/* Points to recently used chunk */
> -	int recent_chunk;
> -	struct ramfs_chunk *recent_chunkp;
> +	struct ramfs_chunk *current_chunk;
>  };
>  
>  static inline struct ramfs_inode *to_ramfs_inode(struct inode *inode)
> @@ -89,18 +94,25 @@ static struct inode *ramfs_get_inode(struct super_block *sb, const struct inode
>  	return inode;
>  }
>  
> -static struct ramfs_chunk *ramfs_get_chunk(void)
> +#define MIN_SIZE SZ_8K
> +
> +static struct ramfs_chunk *ramfs_get_chunk(unsigned long size)
>  {
>  	struct ramfs_chunk *data = malloc(sizeof(struct ramfs_chunk));
> +
>  	if (!data)
>  		return NULL;
>  
> -	data->data = calloc(CHUNK_SIZE, 1);
> +	if (size < MIN_SIZE)
> +		size = MIN_SIZE;
> +
> +	data->data = calloc(size, 1);
>  	if (!data->data) {
>  		free(data);
>  		return NULL;
>  	}
> -	data->next = NULL;
> +
> +	data->size = size;
>  
>  	return data;
>  }
> @@ -160,23 +172,6 @@ static int ramfs_symlink(struct inode *dir, struct dentry *dentry,
>  
>  static int ramfs_unlink(struct inode *dir, struct dentry *dentry)
>  {
> -	struct inode *inode = d_inode(dentry);
> -
> -	if (inode) {
> -		struct ramfs_inode *node = to_ramfs_inode(inode);
> -		struct ramfs_chunk *chunk = node->data;
> -
> -		node->data = NULL;
> -
> -		while (chunk) {
> -			struct ramfs_chunk *tmp = chunk;
> -
> -			chunk = chunk->next;
> -
> -			ramfs_put_chunk(tmp);
> -		}
> -	}
> -
>  	return simple_unlink(dir, dentry);
>  }
>  
> @@ -200,80 +195,57 @@ static const struct inode_operations ramfs_dir_inode_operations =
>  	.create = ramfs_create,
>  };
>  
> -static struct ramfs_chunk *ramfs_find_chunk(struct ramfs_inode *node, int chunk)
> +static struct ramfs_chunk *ramfs_find_chunk(struct ramfs_inode *node,
> +					    unsigned long pos, int *ofs, int *len)
>  {
> -	struct ramfs_chunk *data;
> -	int left = chunk;
> +	struct ramfs_chunk *data, *cur = node->current_chunk;
>  
> -	if (chunk == 0)
> -		return node->data;
> +	if (cur && pos >= cur->ofs)
> +		data = cur;
> +	else
> +		data = list_first_entry(&node->data, struct ramfs_chunk, list);
>  
> -	if (node->recent_chunk == chunk)
> -		return node->recent_chunkp;
> +	list_for_each_entry_from(data, &node->data, list) {
> +		if (data->ofs + data->size > pos) {
> +			*ofs = pos - data->ofs;
> +			*len = data->ofs + data->size - pos;
>  
> -	if (node->recent_chunk < chunk && node->recent_chunk != 0) {
> -		/* Start at last known chunk */
> -		data = node->recent_chunkp;
> -		left -= node->recent_chunk;
> -	} else {
> -		/* Start at first chunk */
> -		data = node->data;
> -	}
> +			node->current_chunk = data;
>  
> -	while (left--)
> -		data = data->next;
> +			return data;
> +		}
> +	}
>  
> -	node->recent_chunkp = data;
> -	node->recent_chunk = chunk;
> +	pr_err("%s: no chunk for pos %ld found\n", __func__, pos);
>  
> -	return data;
> +	return NULL;
>  }
>  
>  static int ramfs_read(struct device_d *_dev, FILE *f, void *buf, size_t insize)
>  {
>  	struct inode *inode = f->f_inode;
>  	struct ramfs_inode *node = to_ramfs_inode(inode);
> -	int chunk;
>  	struct ramfs_chunk *data;
> -	int ofs;
> -	int now;
> -	int pos = f->pos;
> +	int ofs, len, now;
> +	unsigned long pos = f->pos;
>  	int size = insize;
>  
> -	chunk = pos / CHUNK_SIZE;
> -	debug("%s: reading from chunk %d\n", __FUNCTION__, chunk);
> +	debug("%s: %p %d @ %lld\n", __func__, node, insize, f->pos);
> +
> +	while (size) {
> +		data = ramfs_find_chunk(node, pos, &ofs, &len);
> +		if (!data)
> +			return -EINVAL;
>  
> -	/* Position ourself in stream */
> -	data = ramfs_find_chunk(node, chunk);
> -	ofs = pos % CHUNK_SIZE;
> +		debug("%s: pos: %ld ofs: %d len: %d\n", __func__, pos, ofs, len);
> +
> +		now = min(size, len);
>  
> -	/* Read till end of current chunk */
> -	if (ofs) {
> -		now = min(size, CHUNK_SIZE - ofs);
> -		debug("Reading till end of node. size: %d\n", size);
>  		memcpy(buf, data->data + ofs, now);
> +
>  		size -= now;
> -		pos += now;
>  		buf += now;
> -		if (pos > node->size)
> -			node->size = now;
> -		data = data->next;
> -	}
> -
> -	/* Do full chunks */
> -	while (size >= CHUNK_SIZE) {
> -		debug("do full chunk. size: %d\n", size);
> -		memcpy(buf, data->data, CHUNK_SIZE);
> -		data = data->next;
> -		size -= CHUNK_SIZE;
> -		pos += CHUNK_SIZE;
> -		buf += CHUNK_SIZE;
> -	}
> -
> -	/* And the rest */
> -	if (size) {
> -		debug("do rest. size: %d\n", size);
> -		memcpy(buf, data->data, size);
> +		pos += now;
>  	}
>  
>  	return insize;
> @@ -283,100 +255,135 @@ static int ramfs_write(struct device_d *_dev, FILE *f, const void *buf, size_t i
>  {
>  	struct inode *inode = f->f_inode;
>  	struct ramfs_inode *node = to_ramfs_inode(inode);
> -	int chunk;
>  	struct ramfs_chunk *data;
> -	int ofs;
> -	int now;
> -	int pos = f->pos;
> +	int ofs, len, now;
> +	unsigned long pos = f->pos;

On 32-bit systems, you are truncating a 64-bit pos to 32-bit here. Is this intended?

>  	int size = insize;
>  
> -	chunk = f->pos / CHUNK_SIZE;
> -	debug("%s: writing to chunk %d\n", __FUNCTION__, chunk);
> +	debug("%s: %p %d @ %lld\n", __func__, node, insize, f->pos);
> +
> +	while (size) {
> +		data = ramfs_find_chunk(node, pos, &ofs, &len);
> +		if (!data)
> +			return -EINVAL;
> +
> +		debug("%s: pos: %ld ofs: %d len: %d\n", __func__, pos, ofs, len);
>  
> -	/* Position ourself in stream */
> -	data = ramfs_find_chunk(node, chunk);
> -	ofs = f->pos % CHUNK_SIZE;
> +		now = min(size, len);
>  
> -	/* Write till end of current chunk */
> -	if (ofs) {
> -		now = min(size, CHUNK_SIZE - ofs);
> -		debug("writing till end of node. size: %d\n", size);
>  		memcpy(data->data + ofs, buf, now);
> +
>  		size -= now;
> -		pos += now;
>  		buf += now;
> -		if (pos > node->size)
> -			node->size = now;
> -		data = data->next;
> +		pos += now;
> +	}
> +
> +	return insize;
> +}
> +
> +static void ramfs_truncate_down(struct ramfs_inode *node, unsigned long size)
> +{
> +	struct ramfs_chunk *data, *tmp;
> +
> +	list_for_each_entry_safe(data, tmp, &node->data, list) {
> +		if (data->ofs >= size) {
> +			list_del(&data->list);
> +			node->alloc_size -= data->size;
> +			ramfs_put_chunk(data);
> +		}
>  	}
>  
> -	/* Do full chunks */
> -	while (size >= CHUNK_SIZE) {
> -		debug("do full chunk. size: %d\n", size);
> -		memcpy(data->data, buf, CHUNK_SIZE);
> -		data = data->next;
> -		size -= CHUNK_SIZE;
> -		pos += CHUNK_SIZE;
> -		buf += CHUNK_SIZE;
> +	node->current_chunk = NULL;
> +}
> +
> +static int ramfs_truncate_up(struct ramfs_inode *node, unsigned long size)
> +{
> +	struct ramfs_chunk *data, *tmp;
> +	LIST_HEAD(list);
> +	unsigned long add = size - node->alloc_size;
> +	unsigned long chunksize = add;
> +	unsigned long alloc_size = 0;
> +
> +	if (node->alloc_size >= size)
> +		return 0;
> +
> +	/*
> +	 * We first try to allocate all space we need in a single chunk.
> +	 * This may fail because of fragmented memory, so in case we cannot
> +	 * allocate memory we successively decrease the chunk size until
> +	 * we have enough allocations made.
> +	 */
> +	while (1) {
> +		unsigned long now = min(chunksize, add);
> +
> +		data = ramfs_get_chunk(now);
> +		if (!data) {
> +			/* No luck, try with smaller chunk size */
> +			chunksize >>= 1;
> +
> +			/* If we do not have even 128KiB then go out */
> +			if (chunksize < SZ_128K)
> +				goto out;
> +
> +			continue;
> +		}
> +
> +		data->ofs = node->alloc_size + alloc_size;
> +
> +		alloc_size += data->size;
> +
> +		list_add_tail(&data->list, &list);
> +
> +		if (add <= data->size)
> +			break;
> +
> +		add -= data->size;
>  	}
>  
> -	/* And the rest */
> -	if (size) {
> -		debug("do rest. size: %d\n", size);
> -		memcpy(data->data, buf, size);
> +	list_splice_tail(&list, &node->data);
> +
> +	node->alloc_size += alloc_size;
> +
> +	return 0;
> +
> +out:
> +	list_for_each_entry_safe(data, tmp, &list, list) {
> +		list_del(&data->list);
> +		ramfs_put_chunk(data);
>  	}
>  
> -	return insize;
> +	return -ENOSPC;
>  }
>  
>  static int ramfs_truncate(struct device_d *dev, FILE *f, loff_t size)
>  {
>  	struct inode *inode = f->f_inode;
>  	struct ramfs_inode *node = to_ramfs_inode(inode);
> -	int oldchunks, newchunks;
> -	struct ramfs_chunk *data = node->data;
> -
> -	newchunks = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
> -	oldchunks = (node->size + CHUNK_SIZE - 1) / CHUNK_SIZE;
> -
> -	if (newchunks < oldchunks) {
> -		if (!newchunks)
> -			node->data = NULL;
> -		while (newchunks--)
> -			data = data->next;
> -		while (data) {
> -			struct ramfs_chunk *tmp;
> -			tmp = data->next;
> -			ramfs_put_chunk(data);
> -			data = tmp;
> -		}
> -		if (node->recent_chunk > newchunks)
> -			node->recent_chunk = 0;
> -	}
> +	int ret;
>  
> -	if (newchunks > oldchunks) {
> -		if (data) {
> -			data = ramfs_find_chunk(node, oldchunks - 1);
> -		} else {
> -			node->data = ramfs_get_chunk();
> -			if (!node->data)
> -				return -ENOSPC;
> -			data = node->data;
> -			oldchunks = 1;
> -		}
> +	/*
> +	 * This is a malloc based filesystem, no need to support more
> +	 * memory than fits into unsigned long.
> +	 */
> +	if (size > ULONG_MAX)
> +		return -ENOSPC;
>  
> -		while (data->next)
> -			data = data->next;
> +	debug("%s: %p cur: %ld new: %lld alloc: %ld\n", __func__, node,
> +	       node->size, size, node->alloc_size);
>  
> -		while (newchunks > oldchunks) {
> -			data->next = ramfs_get_chunk();
> -			if (!data->next)
> -				return -ENOSPC;
> -			data = data->next;
> -			oldchunks++;
> -		}
> +	if (size == node->size)
> +		return 0;
> +
> +	if (size < node->size) {
> +		ramfs_truncate_down(node, size);
> +	} else {
> +		ret = ramfs_truncate_up(node, size);
> +		if (ret)
> +			return ret;
>  	}
> +
>  	node->size = size;
> +
>  	return 0;
>  }
>  
> @@ -386,6 +393,8 @@ static struct inode *ramfs_alloc_inode(struct super_block *sb)
>  
>  	node = xzalloc(sizeof(*node));
>  
> +	INIT_LIST_HEAD(&node->data);
> +
>  	return &node->inode;
>  }
>  
> @@ -393,6 +402,8 @@ static void ramfs_destroy_inode(struct inode *inode)
>  {
>  	struct ramfs_inode *node = to_ramfs_inode(inode);
>  
> +	ramfs_truncate_down(node, 0);
> +
>  	free(node);
>  }
>  
> 

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 08/11] fs: ramfs: Use dynamically sized chunks
  2020-07-02 14:28   ` Ahmad Fatoum
@ 2020-07-05 14:14     ` Sascha Hauer
  0 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-07-05 14:14 UTC (permalink / raw)
  To: Ahmad Fatoum; +Cc: Barebox List

On Thu, Jul 02, 2020 at 04:28:26PM +0200, Ahmad Fatoum wrote:
> 
> 
> On 6/15/20 8:02 AM, Sascha Hauer wrote:
> > This changes the way ramfs stores its data. So far we used equally sized
> > chunks, this patch changes it to use chunks in a size that fits our
> > needs. The chunks are always allocated in the size they are needed for
> > the current truncation. Only if we fail to allocate all desired memory
> > at once we fall back to allocating smaller chunks. Together with using
> > the generic list implementation this results in smaller code and has
> > the advantage that many image files end up being contiguously in memory
> > and thus we can provide a memmap for them. Files will end up
> > contiguously in memory when they are first created, then truncated to
> > the final size and then filled up with data. This is something which
> > is normally easily achievable when desired.
> > 
> > Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
> > ---
> >  fs/ramfs.c | 307 +++++++++++++++++++++++++++--------------------------
> >  1 file changed, 159 insertions(+), 148 deletions(-)
> > 
> > diff --git a/fs/ramfs.c b/fs/ramfs.c
> > index 2b6df07996..ebe03de736 100644
> > --- a/fs/ramfs.c
> > +++ b/fs/ramfs.c
> > @@ -23,12 +23,15 @@
> >  #include <errno.h>
> >  #include <linux/stat.h>
> >  #include <xfuncs.h>
> > +#include <linux/sizes.h>
> >  
> >  #define CHUNK_SIZE	(4096 * 2)
> >  
> >  struct ramfs_chunk {
> >  	char *data;
> > -	struct ramfs_chunk *next;
> > +	unsigned long ofs;
> > +	int size;
> > +	struct list_head list;
> >  };
> >  
> >  struct ramfs_inode {
> > @@ -37,12 +40,14 @@ struct ramfs_inode {
> >  	char *symlink;
> >  	ulong mode;
> >  
> > -	ulong size;
> > -	struct ramfs_chunk *data;
> > +	/* bytes used in this inode */
> > +	unsigned long size;
> > +	/* bytes currently allocated for this inode */
> > +	unsigned long alloc_size;
> > +
> > +	struct list_head data;
> >  
> > -	/* Points to recently used chunk */
> > -	int recent_chunk;
> > -	struct ramfs_chunk *recent_chunkp;
> > +	struct ramfs_chunk *current_chunk;
> >  };
> >  
> >  static inline struct ramfs_inode *to_ramfs_inode(struct inode *inode)
> > @@ -89,18 +94,25 @@ static struct inode *ramfs_get_inode(struct super_block *sb, const struct inode
> >  	return inode;
> >  }
> >  
> > -static struct ramfs_chunk *ramfs_get_chunk(void)
> > +#define MIN_SIZE SZ_8K
> > +
> > +static struct ramfs_chunk *ramfs_get_chunk(unsigned long size)
> >  {
> >  	struct ramfs_chunk *data = malloc(sizeof(struct ramfs_chunk));
> > +
> >  	if (!data)
> >  		return NULL;
> >  
> > -	data->data = calloc(CHUNK_SIZE, 1);
> > +	if (size < MIN_SIZE)
> > +		size = MIN_SIZE;
> > +
> > +	data->data = calloc(size, 1);
> >  	if (!data->data) {
> >  		free(data);
> >  		return NULL;
> >  	}
> > -	data->next = NULL;
> > +
> > +	data->size = size;
> >  
> >  	return data;
> >  }
> > @@ -160,23 +172,6 @@ static int ramfs_symlink(struct inode *dir, struct dentry *dentry,
> >  
> >  static int ramfs_unlink(struct inode *dir, struct dentry *dentry)
> >  {
> > -	struct inode *inode = d_inode(dentry);
> > -
> > -	if (inode) {
> > -		struct ramfs_inode *node = to_ramfs_inode(inode);
> > -		struct ramfs_chunk *chunk = node->data;
> > -
> > -		node->data = NULL;
> > -
> > -		while (chunk) {
> > -			struct ramfs_chunk *tmp = chunk;
> > -
> > -			chunk = chunk->next;
> > -
> > -			ramfs_put_chunk(tmp);
> > -		}
> > -	}
> > -
> >  	return simple_unlink(dir, dentry);
> >  }
> >  
> > @@ -200,80 +195,57 @@ static const struct inode_operations ramfs_dir_inode_operations =
> >  	.create = ramfs_create,
> >  };
> >  
> > -static struct ramfs_chunk *ramfs_find_chunk(struct ramfs_inode *node, int chunk)
> > +static struct ramfs_chunk *ramfs_find_chunk(struct ramfs_inode *node,
> > +					    unsigned long pos, int *ofs, int *len)
> >  {
> > -	struct ramfs_chunk *data;
> > -	int left = chunk;
> > +	struct ramfs_chunk *data, *cur = node->current_chunk;
> >  
> > -	if (chunk == 0)
> > -		return node->data;
> > +	if (cur && pos >= cur->ofs)
> > +		data = cur;
> > +	else
> > +		data = list_first_entry(&node->data, struct ramfs_chunk, list);
> >  
> > -	if (node->recent_chunk == chunk)
> > -		return node->recent_chunkp;
> > +	list_for_each_entry_from(data, &node->data, list) {
> > +		if (data->ofs + data->size > pos) {
> > +			*ofs = pos - data->ofs;
> > +			*len = data->ofs + data->size - pos;
> >  
> > -	if (node->recent_chunk < chunk && node->recent_chunk != 0) {
> > -		/* Start at last known chunk */
> > -		data = node->recent_chunkp;
> > -		left -= node->recent_chunk;
> > -	} else {
> > -		/* Start at first chunk */
> > -		data = node->data;
> > -	}
> > +			node->current_chunk = data;
> >  
> > -	while (left--)
> > -		data = data->next;
> > +			return data;
> > +		}
> > +	}
> >  
> > -	node->recent_chunkp = data;
> > -	node->recent_chunk = chunk;
> > +	pr_err("%s: no chunk for pos %ld found\n", __func__, pos);
> >  
> > -	return data;
> > +	return NULL;
> >  }
> >  
> >  static int ramfs_read(struct device_d *_dev, FILE *f, void *buf, size_t insize)
> >  {
> >  	struct inode *inode = f->f_inode;
> >  	struct ramfs_inode *node = to_ramfs_inode(inode);
> > -	int chunk;
> >  	struct ramfs_chunk *data;
> > -	int ofs;
> > -	int now;
> > -	int pos = f->pos;
> > +	int ofs, len, now;
> > +	unsigned long pos = f->pos;
> >  	int size = insize;
> >  
> > -	chunk = pos / CHUNK_SIZE;
> > -	debug("%s: reading from chunk %d\n", __FUNCTION__, chunk);
> > +	debug("%s: %p %d @ %lld\n", __func__, node, insize, f->pos);
> > +
> > +	while (size) {
> > +		data = ramfs_find_chunk(node, pos, &ofs, &len);
> > +		if (!data)
> > +			return -EINVAL;
> >  
> > -	/* Position ourself in stream */
> > -	data = ramfs_find_chunk(node, chunk);
> > -	ofs = pos % CHUNK_SIZE;
> > +		debug("%s: pos: %ld ofs: %d len: %d\n", __func__, pos, ofs, len);
> > +
> > +		now = min(size, len);
> >  
> > -	/* Read till end of current chunk */
> > -	if (ofs) {
> > -		now = min(size, CHUNK_SIZE - ofs);
> > -		debug("Reading till end of node. size: %d\n", size);
> >  		memcpy(buf, data->data + ofs, now);
> > +
> >  		size -= now;
> > -		pos += now;
> >  		buf += now;
> > -		if (pos > node->size)
> > -			node->size = now;
> > -		data = data->next;
> > -	}
> > -
> > -	/* Do full chunks */
> > -	while (size >= CHUNK_SIZE) {
> > -		debug("do full chunk. size: %d\n", size);
> > -		memcpy(buf, data->data, CHUNK_SIZE);
> > -		data = data->next;
> > -		size -= CHUNK_SIZE;
> > -		pos += CHUNK_SIZE;
> > -		buf += CHUNK_SIZE;
> > -	}
> > -
> > -	/* And the rest */
> > -	if (size) {
> > -		debug("do rest. size: %d\n", size);
> > -		memcpy(buf, data->data, size);
> > +		pos += now;
> >  	}
> >  
> >  	return insize;
> > @@ -283,100 +255,135 @@ static int ramfs_write(struct device_d *_dev, FILE *f, const void *buf, size_t i
> >  {
> >  	struct inode *inode = f->f_inode;
> >  	struct ramfs_inode *node = to_ramfs_inode(inode);
> > -	int chunk;
> >  	struct ramfs_chunk *data;
> > -	int ofs;
> > -	int now;
> > -	int pos = f->pos;
> > +	int ofs, len, now;
> > +	unsigned long pos = f->pos;
> 
> On 32-bit systems, you are truncating a 64-bit pos to 32-bit here. Is this intended?

It's a RAM filesystem, so we can't handle files that are bigger than the
address space here. Yes, this is intended, I wanted to avoid the burden
of doing 64bit math on 32bit systems.

Sascha

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 04/11] fs: free inodes we no longer need
  2020-06-15  6:02 ` [PATCH 04/11] fs: free inodes we no longer need Sascha Hauer
@ 2020-08-03 22:02   ` Ahmad Fatoum
  2020-08-10 11:13     ` Sascha Hauer
  0 siblings, 1 reply; 16+ messages in thread
From: Ahmad Fatoum @ 2020-08-03 22:02 UTC (permalink / raw)
  To: Sascha Hauer, Barebox List

Hello Sascha,

On 6/15/20 8:02 AM, Sascha Hauer wrote:
> So far we freed the no longer needed inodes only at unmount time.
> Let's trust our reference counting a bit more and free them once
> the reference counter hits zero.
> 
> Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
> ---
>  fs/fs.c | 8 +++++---
>  1 file changed, 5 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/fs.c b/fs/fs.c
> index cecb3d70e0..e04cadfe5d 100644
> --- a/fs/fs.c
> +++ b/fs/fs.c
> @@ -1090,10 +1090,12 @@ void iput(struct inode *inode)
>  	if (!inode)
>  		return;
>  
> -	if (!inode->i_count)
> -		return;
> -
>  	inode->i_count--;
> +
> +	if (!inode->i_count) {
> +		list_del(&inode->i_sb_list);

There is no explicit initialization of i_sb_list anywhere, only
list_add.

Without reverting this patch, I can reproduce null pointer dereference
booting from squashfs as i_sb_list remained uninitialized when this
line is executed.

Can you drop this patch for now?


> +		destroy_inode(inode);
> +	}
>  }
>  
>  struct inode *iget(struct inode *inode)
> 

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 04/11] fs: free inodes we no longer need
  2020-08-03 22:02   ` Ahmad Fatoum
@ 2020-08-10 11:13     ` Sascha Hauer
  0 siblings, 0 replies; 16+ messages in thread
From: Sascha Hauer @ 2020-08-10 11:13 UTC (permalink / raw)
  To: Ahmad Fatoum; +Cc: Barebox List

Hi Ahmad,

On Tue, Aug 04, 2020 at 12:02:37AM +0200, Ahmad Fatoum wrote:
> Hello Sascha,
> 
> On 6/15/20 8:02 AM, Sascha Hauer wrote:
> > So far we freed the no longer needed inodes only at unmount time.
> > Let's trust our reference counting a bit more and free them once
> > the reference counter hits zero.
> > 
> > Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
> > ---
> >  fs/fs.c | 8 +++++---
> >  1 file changed, 5 insertions(+), 3 deletions(-)
> > 
> > diff --git a/fs/fs.c b/fs/fs.c
> > index cecb3d70e0..e04cadfe5d 100644
> > --- a/fs/fs.c
> > +++ b/fs/fs.c
> > @@ -1090,10 +1090,12 @@ void iput(struct inode *inode)
> >  	if (!inode)
> >  		return;
> >  
> > -	if (!inode->i_count)
> > -		return;
> > -
> >  	inode->i_count--;
> > +
> > +	if (!inode->i_count) {
> > +		list_del(&inode->i_sb_list);
> 
> There is no explicit initialization of i_sb_list anywhere, only
> list_add.
> 
> Without reverting this patch, I can reproduce null pointer dereference
> booting from squashfs as i_sb_list remained uninitialized when this
> line is executed.
> 
> Can you drop this patch for now?

Find a fix for this issue in your inbox.

Sascha

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2020-08-10 11:13 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-15  6:02 [PATCH 00/11] ramfs: Use dynamically sized chunks Sascha Hauer
2020-06-15  6:02 ` [PATCH 01/11] update list.h from Linux-5.7 Sascha Hauer
2020-06-15  6:02 ` [PATCH 02/11] fs: Add destroy_inode callbacks to filesystems Sascha Hauer
2020-06-15  6:02 ` [PATCH 03/11] fs: Make iput() accept NULL pointers Sascha Hauer
2020-06-15  6:02 ` [PATCH 04/11] fs: free inodes we no longer need Sascha Hauer
2020-08-03 22:02   ` Ahmad Fatoum
2020-08-10 11:13     ` Sascha Hauer
2020-06-15  6:02 ` [PATCH 05/11] digest: Drop usage of memmap Sascha Hauer
2020-06-15  6:02 ` [PATCH 06/11] fs: ramfs: Return -ENOSPC Sascha Hauer
2020-06-15  6:02 ` [PATCH 07/11] fs: ramfs: Drop dead code Sascha Hauer
2020-06-15  6:02 ` [PATCH 08/11] fs: ramfs: Use dynamically sized chunks Sascha Hauer
2020-07-02 14:28   ` Ahmad Fatoum
2020-07-05 14:14     ` Sascha Hauer
2020-06-15  6:02 ` [PATCH 09/11] fs: ramfs: Implement memmap Sascha Hauer
2020-06-15  6:02 ` [PATCH 10/11] libfile: copy_file: Fix calling discard_range Sascha Hauer
2020-06-15  6:02 ` [PATCH 11/11] libfile: copy_file: explicitly truncate to final size Sascha Hauer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox