summarylogtreecommitdiffstats
path: root/01-btrfs-alloc.patch
blob: 78424f18faadc9a09587b784f69476e937fcdd14 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#http://debbugs.gnu.org/cgi/bugreport.cgi?bug=20570

diff --git a/tests/cp/fiemap-empty.sh b/tests/cp/fiemap-empty.sh
index a5eaac5..b3b2cd7 100755
--- a/tests/cp/fiemap-empty.sh
+++ b/tests/cp/fiemap-empty.sh
@@ -22,6 +22,7 @@ print_ver_ cp
 # FIXME: enable any part of this test that is still relevant,
 # or, if none are relevant (now that cp does not handle unwritten
 # extents), just remove the test altogether.
+# Note also if checking allocations may need to sync first on BTRFS at least
 skip_ 'disabled for now'
 
 touch fiemap_chk
diff --git a/tests/dd/sparse.sh b/tests/dd/sparse.sh
index 4fdabad..626b405 100755
--- a/tests/dd/sparse.sh
+++ b/tests/dd/sparse.sh
@@ -17,6 +17,7 @@
 
 . "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
 print_ver_ dd
+is_local_dir_ . || very_expensive_
 require_sparse_support_
 
 # Ensure basic sparse generation works
@@ -50,6 +51,9 @@ dd if=/dev/zero    of=file.in bs=1M count=1 seek=1 conv=notrunc || fail=1
 
 kb_alloc() { du -k "$1"|cut -f1; }
 
+# sync out data for async allocators like NFS/BTRFS
+# sync file.in || fail=1
+
 # If our just-created input file appears to be too small,
 # skip the remaining tests.  On at least Solaris 10 with NFS,
 # file.in is reported to occupy <= 1KiB for about 50 seconds
@@ -58,7 +62,10 @@ if test $(kb_alloc file.in) -gt 3000; then
 
   # Ensure NUL blocks smaller than the block size are not made sparse.
   # Here, with a 2MiB block size, dd's conv=sparse must *not* introduce a hole.
-  dd if=file.in of=file.out bs=2M conv=sparse
+  dd if=file.in of=file.out bs=2M conv=sparse || fail=1
+
+  # Intermittently BTRFS returns 0 allocation for file.out unless synced
+  sync file.out || framework_failure_
   test 2500 -lt $(kb_alloc file.out) || fail=1
 
   # Note we recreate a sparse file first to avoid
diff --git a/tests/du/2g.sh b/tests/du/2g.sh
index 5f04488..12c2eed 100755
--- a/tests/du/2g.sh
+++ b/tests/du/2g.sh
@@ -24,7 +24,6 @@ print_ver_ du
 # Creating a 2GB file counts as 'very expensive'.
 very_expensive_
 
-
 # Get number of free kilobytes on current partition, so we can
 # skip this test if there is insufficient free space.
 free_kb=$(df -k --output=avail . | tail -n1)
@@ -42,16 +42,21 @@
 }
 
 big=big
-rm -f $big
-test -t 1 || printf 'creating a 2GB file...\n'
-for i in $(seq 100); do
-  # Note: 2147483648 == 2^31. Print floor(2^31/100) per iteration.
-  printf %21474836s x >> $big || fail=1
-  # On the final iteration, append the remaining 48 bytes.
-  test $i = 100 && { printf %48s x >> $big || fail=1; }
-  test -t 1 && printf 'creating a 2GB file: %d%% complete\r' $i
-done
-echo
+if ! fallocate -l2G $big; then
+  rm -f $big
+  {
+    is_local_dir_ . || skip 'Not writing 2GB data to remote'
+    for i in $(seq 100); do
+      # Note: 2147483648 == 2^31. Print floor(2^31/100) per iteration.
+      printf %21474836s x || fail=1
+    done
+    # After the final iteration, append the remaining 48 bytes.
+    printf %48s x || fail=1
+  } > $big || fail=1
+fi
+
+# The allocation may be done asynchronously (BTRFS for example)
+sync $big || framework_failure_
 
 du -k $big > out1 || fail=1
 rm -f $big
-- 
2.3.4