...

Source file src/github.com/peterbourgon/diskv/v3/issues_test.go

Documentation: github.com/peterbourgon/diskv/v3

     1  package diskv
     2  
     3  import (
     4  	"bytes"
     5  	"io/ioutil"
     6  	"math/rand"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  )
    11  
    12  // ReadStream from cache shouldn't panic on a nil dereference from a nonexistent
    13  // Compression :)
    14  func TestIssue2A(t *testing.T) {
    15  	d := New(Options{
    16  		BasePath:     "test-issue-2a",
    17  		CacheSizeMax: 1024,
    18  	})
    19  	defer d.EraseAll()
    20  
    21  	input := "abcdefghijklmnopqrstuvwxy"
    22  	key, writeBuf, sync := "a", bytes.NewBufferString(input), false
    23  	if err := d.WriteStream(key, writeBuf, sync); err != nil {
    24  		t.Fatal(err)
    25  	}
    26  
    27  	for i := 0; i < 2; i++ {
    28  		began := time.Now()
    29  		rc, err := d.ReadStream(key, false)
    30  		if err != nil {
    31  			t.Fatal(err)
    32  		}
    33  		buf, err := ioutil.ReadAll(rc)
    34  		if err != nil {
    35  			t.Fatal(err)
    36  		}
    37  		if !cmpBytes(buf, []byte(input)) {
    38  			t.Fatalf("read #%d: '%s' != '%s'", i+1, string(buf), input)
    39  		}
    40  		rc.Close()
    41  		t.Logf("read #%d in %s", i+1, time.Since(began))
    42  	}
    43  }
    44  
    45  // ReadStream on a key that resolves to a directory should return an error.
    46  func TestIssue2B(t *testing.T) {
    47  	blockTransform := func(s string) []string {
    48  		transformBlockSize := 3
    49  		sliceSize := len(s) / transformBlockSize
    50  		pathSlice := make([]string, sliceSize)
    51  		for i := 0; i < sliceSize; i++ {
    52  			from, to := i*transformBlockSize, (i*transformBlockSize)+transformBlockSize
    53  			pathSlice[i] = s[from:to]
    54  		}
    55  		return pathSlice
    56  	}
    57  
    58  	d := New(Options{
    59  		BasePath:     "test-issue-2b",
    60  		Transform:    blockTransform,
    61  		CacheSizeMax: 0,
    62  	})
    63  	defer d.EraseAll()
    64  
    65  	v := []byte{'1', '2', '3'}
    66  	if err := d.Write("abcabc", v); err != nil {
    67  		t.Fatal(err)
    68  	}
    69  
    70  	_, err := d.ReadStream("abc", false)
    71  	if err == nil {
    72  		t.Fatal("ReadStream('abc') should return error")
    73  	}
    74  	t.Logf("ReadStream('abc') returned error: %v", err)
    75  }
    76  
    77  // Ensure ReadStream with direct=true isn't racy.
    78  func TestIssue17(t *testing.T) {
    79  	var (
    80  		basePath = "test-data"
    81  	)
    82  
    83  	dWrite := New(Options{
    84  		BasePath:     basePath,
    85  		CacheSizeMax: 0,
    86  	})
    87  	defer dWrite.EraseAll()
    88  
    89  	dRead := New(Options{
    90  		BasePath:     basePath,
    91  		CacheSizeMax: 50,
    92  	})
    93  
    94  	cases := map[string]string{
    95  		"a": `1234567890`,
    96  		"b": `2345678901`,
    97  		"c": `3456789012`,
    98  		"d": `4567890123`,
    99  		"e": `5678901234`,
   100  	}
   101  
   102  	for k, v := range cases {
   103  		if err := dWrite.Write(k, []byte(v)); err != nil {
   104  			t.Fatalf("during write: %s", err)
   105  		}
   106  		dRead.Read(k) // ensure it's added to cache
   107  	}
   108  
   109  	var wg sync.WaitGroup
   110  	start := make(chan struct{})
   111  	for k, v := range cases {
   112  		wg.Add(1)
   113  		go func(k, v string) {
   114  			<-start
   115  			dRead.ReadStream(k, true)
   116  			wg.Done()
   117  		}(k, v)
   118  	}
   119  	close(start)
   120  	wg.Wait()
   121  }
   122  
   123  // Test for issue #40, where acquiring two stream readers on the same k/v pair
   124  // caused the value to be written into the cache twice, messing up the
   125  // size calculations.
   126  func TestIssue40(t *testing.T) {
   127  	var (
   128  		basePath = "test-data"
   129  	)
   130  	// Simplest transform function: put all the data files into the base dir.
   131  	flatTransform := func(s string) []string { return []string{} }
   132  
   133  	// Initialize a new diskv store, rooted at "my-data-dir",
   134  	// with a 100 byte cache.
   135  	d := New(Options{
   136  		BasePath:     basePath,
   137  		Transform:    flatTransform,
   138  		CacheSizeMax: 100,
   139  	})
   140  
   141  	defer d.EraseAll()
   142  
   143  	// Write a 50 byte value, filling the cache half-way
   144  	k1 := "key1"
   145  	d1 := make([]byte, 50)
   146  	rand.Read(d1)
   147  	d.Write(k1, d1)
   148  
   149  	// Get *two* read streams on it. Because the key is not yet in the cache,
   150  	// and will not be in the cache until a stream is fully read, both
   151  	// readers use the 'siphon' object, which always writes to the cache
   152  	// after reading.
   153  	s1, err := d.ReadStream(k1, false)
   154  	if err != nil {
   155  		t.Fatal(err)
   156  	}
   157  	s2, err := d.ReadStream(k1, false)
   158  	if err != nil {
   159  		t.Fatal(err)
   160  	}
   161  	// When each stream is drained, the underlying siphon will write
   162  	// the value into the cache's map and increment the cache size.
   163  	// This means we will have 1 entry in the cache map
   164  	// ("key1" mapping to a 50 byte slice) but the cache size will be 100,
   165  	// because the buggy code does not check if an entry already exists
   166  	// in the map.
   167  	// s1 drains:
   168  	//   cache[k] = v
   169  	//   cacheSize += len(v)
   170  	// s2 drains:
   171  	//   cache[k] = v /* overwrites existing */
   172  	//   cacheSize += len(v) /* blindly adds to the cache size */
   173  	ioutil.ReadAll(s1)
   174  	ioutil.ReadAll(s2)
   175  
   176  	// Now write a different k/v pair, with a 60 byte array.
   177  	k2 := "key2"
   178  	d2 := make([]byte, 60)
   179  	rand.Read(d2)
   180  	d.Write(k2, d2)
   181  	// The act of reading the k/v pair back out causes it to be cached.
   182  	// Because the cache is only 100 bytes, it needs to delete existing
   183  	// entries to make room.
   184  	// If the cache is buggy, it will delete the single 50-byte entry
   185  	// from the cache map & decrement cacheSize by 50... but because
   186  	// cacheSize was improperly incremented twice earlier, this will
   187  	// leave us with no entries in the cacheMap but with cacheSize==50.
   188  	// Since CacheSizeMax-cacheSize (100-50) is less than 60, there
   189  	// is no room in the cache for this entry and it panics.
   190  	d.Read(k2)
   191  }
   192  

View as plain text