File stressdisk-1.0.13.obscpio of Package stressdisk
07070100000000000081A400000000000000000000000164995F2300000061000000000000000000000000000000000000001D00000000stressdisk-1.0.13/.gitignore*~
/Junk
/stressdisk
/stressdisk.log
/stressdisk.exe
/stressdisk_stats.json
/upload
/dist
dist/
07070100000001000081A400000000000000000000000164995F2300000467000000000000000000000000000000000000002300000000stressdisk-1.0.13/.goreleaser.yaml# Documentation at https://goreleaser.com
before:
hooks:
# You may remove this if you don't use go modules.
- go mod tidy
# you may remove this if you don't need go generate
# - go generate ./...
builds:
- env:
- CGO_ENABLED=0
goos:
- windows
- darwin
- linux
- freebsd
- netbsd
goarch:
- amd64
- 386
- arm
- arm64
archives:
- format: tar.gz
# this name template makes the OS and Arch compatible with the results of uname.
name_template: >-
{{ .ProjectName }}_
{{- if eq .Os "darwin" }}macOS
{{- else }}{{ .Os }}{{ end }}_
{{- if eq .Arch "amd64" }}x86_64
{{- else if eq .Arch "386" }}i386
{{- else }}{{ .Arch }}{{ end }}
{{- if .Arm }}v{{ .Arm }}{{ end }}
# use zip for windows archives
format_overrides:
- goos: windows
format: zip
files:
- README.md
- COPYING
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ incpatch .Version }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
07070100000002000081A400000000000000000000000164995F2300000447000000000000000000000000000000000000001A00000000stressdisk-1.0.13/COPYINGCopyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
07070100000003000081A400000000000000000000000164995F2300001F67000000000000000000000000000000000000001C00000000stressdisk-1.0.13/README.mdStressDisk
==========
This is a program designed to stress test your disks and find failures
in them.
Use it to soak test your new disks / memory cards / USB sticks before
trusting your valuable data to it.
Use it to soak test your new PC hardware also for the same reason.
Note that it turns out to be quite a sensitive memory tester too so
errors can sometimes be caused by bad RAM in your computer rather than
disk errors.
Install
-------
StressDisk is a Go program and comes as a single binary file.
Download the relevant binary from
- https://github.com/ncw/stressdisk/releases
Or alternatively if you have Go installed use
go install github.com/ncw/stressdisk@latest
If you want to modify the sources, it is recommended to check out the repository.
git clone https://github.com/ncw/stressdisk.git
cd stressdisk
go build .
You can then modify the source, rebuild as needed, and submit patches.
Usage
-----
Use `stressdisk -h` to see all the options.
```
Disk soak testing utility
Automatic usage:
stressdisk run directory - auto fill the directory up and soak test it
stressdisk cycle directory - fill, test, delete, repeat - torture for flash
stressdisk clean directory - delete the check files from the directory
Manual usage:
stressdisk help - this help
stressdisk [ -s size ] write filename - write a check file
stressdisk read filename - read the check file back
stressdisk reads filename - ... repeatedly for duration set
stressdisk check filename1 filename2 - compare two check files
stressdisk checks filename1 filename2 - ... repeatedly for duration set
Full options:
-cpuprofile string
Write cpu profile to file
-duration duration
Duration to run test (default 24h0m0s)
-logfile string
File to write log to set to empty to ignore (default "stressdisk.log")
-maxerrors uint
Max number of errors to print per file (default 64)
-nodirect
Don't use O_DIRECT
-s int
Size of the check files (default 1000000000)
-stats duration
Interval to print stats (default 1m0s)
-statsfile string
File to load/store statistics data (default "stressdisk_stats.json")
Note that flags must be provided BEFORE the stressdisk command, eg
stressdisk -duration 48h run /mnt
```
Quickstart
----------
Install your new media in your computer and format it (make a filesystem on it).
Open a terminal (or cmd prompt if running Windows).
To check the disk:
Linux: ./stressdisk run /media/nameofnewdisk
Windows: stressdisk.exe run F:
Let it run for 24 hours. It will finish on its own. Note whether any errors
were reported. Then use the following to remove the check files:
Linux: ./stressdisk clean /media/nameofnewdisk
Windows: stressdisk.exe clean F:
If you find errors, then you can use the `read` / `reads` / `check` /
`checks` sub-commands to investigate further.
2012/09/20 22:23:20 Exiting after running for > 30s
2012/09/20 22:23:20
Bytes read: 20778 MByte ( 692.59 MByte/s)
Bytes written: 0 MByte ( 0.00 MByte/s)
Errors: 0
Elapsed time: 30.00033s
2012/09/20 22:23:20 PASSED with no errors
Stress disk can be interrupted after it has written its check files
and it will continue from where it left off.
The default running time for stressdisk is 24h which is a sensible
minimum. However if you want to run it for longer then use `-duration
48h` for instance.
Errors
------
If stressdisk finds an error it will print lines like this:
2019/03/07 10:55:09 0AA00000: 2D, A1 diff 8C
The fields are `offset`, `file1 value`, `file 2 value` and the diff
which is `file1_value XOR file2_value` all in hexadecimal. The diff
will be a binary number for a single bit error so `01`, `02`, `04`,
`08`, `10`, `20`, `40`, `80`.
This may give some insight into the problem (eg a single bit flipped),
or errors starting 4k boundaries, but may not.
However, the actual errors aren't that important, you shouldn't get
**any**. If you do then:
1. run [memtest86](https://www.memtest86.com/) on the machine for 48 hours to check for RAM problems, if this passes then
2. try the stressdisk test on another machine if you can, if this fails then
3. discard or return the media
If you didn't get to step 3. then you'll need to play with the
hardware of the machine, replace the RAM etc. Stressdisk errors are
*usually* caused by bad media, but not always. Bad RAM is a fairly
likely cause of stressdisk errors too.
Testing Flash
-------------
Stressdisk has a special mode which is good for giving flash / SSD
media a hard time. The normal "run" test will fill the disk and read
the files back continually which a good test but doesn't torture flash
as much as it could as writing is a much more intensive operation for
flash than reading.
To test flash / SSD harder "cycle" mode does lots of write cycles as
well as read cycles. It works by filling the media with test files
verifying that the data is valid, deleting the test files, and
repeating the write + verify process continually.
**Caution**: This will be destructive to flash media if run long periods
of time, since flash devices have a limited number of writes per
sector/cell.
**This Is Intentional**! You can use this to stress test flash harder.
You can also use this mode to find the breaking point of flash devices
to determine what the lifetime of the media is if you are quality
testing flash media before making a bulk buy. The `-statsfile` option
is useful when doing this to save persistent stats to disk in case the
process is interrupted.
If you are merely interested in doing a less destructive test of the
flash device for data integrity, then should use the "run" mode, as
this mode only writes the check files once, and does reads operations
to verify data integrity which have little destructive penalty.
How it works
------------
Stressdisk fills up your disk with identical large files (1 GB by
default) full of random data. It then randomly chooses a pair of
these and reads them back checking they are the same.
This causes the disk head to seek backwards and forwards across the
disk surface very quickly which is the worst possible access pattern
for disk drives and flushes out errors.
It seems to work equally well for non-rotating media.
The access patterns are designed so that your computer won't cache the
data being read off the disk so your computer will be forced to read
it off the disk.
Stressdisk uses OS specific commands to make sure the data isn't
cached in RAM so that you won't just be testing your computer RAM.
History
-------
I wrote the first version of stressdisk in about 1995 after
discovering that the CD I had just written at great expense had bit
errors in it. I discovered that my very expensive SCSI disk was
returning occasional errors.
It has been used over the years to soak test 1000s of disks, memory
cards, usb sticks and found many with errors. It has also found quite
a few memory errors (bad RAM).
The original stressdisk was written in C with a perl wrapper but it
was rather awkward to use because of that, so I re-wrote it in Go in
2012 as an exercise in learning Go and so that I could distribute it
in an easy to run single executable format.
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
Contact and support
-------------------
The project website is at:
- https://github.com/ncw/stressdisk
There you can file bug reports, ask for help or contribute patches.
Authors
-------
- Nick Craig-Wood <nick@craig-wood.com>
- Tomás Senart <tsenart@gmail.com>
- David Meador <dave@meadorresearch.com>
Contributors
------------
- Yves Junqueira for code review and helpful suggestions
- dcabro for reporting the windows empty partition issue
- Colin Lord for fixing documentation issues
- Your name goes here!
07070100000004000081A400000000000000000000000164995F2300000186000000000000000000000000000000000000001D00000000stressdisk-1.0.13/RELEASE.md# Making a release
Compile and test
Then run
goreleaser --clean --snapshot
To test the build
When happy, tag the release
git tag -s v1.0.XX -m "Release v1.0.XX"
Push it to github with
git push origin # without --follow-tags so it doesn't push the tag if it fails
git push --follow-tags origin
Then do a release build (set GITHUB token first)
goreleaser --clean
07070100000005000081A400000000000000000000000164995F2300000052000000000000000000000000000000000000001900000000stressdisk-1.0.13/go.modmodule github.com/ncw/stressdisk
go 1.12
require github.com/ncw/directio v1.0.5
07070100000006000081A400000000000000000000000164995F23000000A5000000000000000000000000000000000000001900000000stressdisk-1.0.13/go.sumgithub.com/ncw/directio v1.0.5 h1:JSUBhdjEvVaJvOoyPAbcW0fnd0tvRXD76wEfZ1KcQz4=
github.com/ncw/directio v1.0.5/go.mod h1:rX/pKEYkOXBGOggmcyJeJGloCkleSvphPx2eV3t6ROk=
07070100000007000081A400000000000000000000000164995F2300004F4B000000000000000000000000000000000000002000000000stressdisk-1.0.13/stressdisk.go// This checks a disc for errors. It is also a sensitive memory tester
// (suprisingly) so if you get a fault using this program then it could
// be disc or RAM.
//
// Nick Craig-Wood <nick@craig-wood.com>
/*
KEEP a state file - maybe number of rounds so could restart?
Estimate time to finish also
No timeout on write file? Or read once?
Make LEAF be settable
Make blockReader not Fatal error if there is a problem - would then
need to make sure all the goroutines were killed off properly
*/
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"os/signal"
"path/filepath"
"regexp"
"runtime"
"runtime/pprof"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/ncw/directio"
)
const (
// MB is Bytes in a Megabyte
MB = 1024 * 1024
// BlockSize is size of block to do IO with
BlockSize = 2 * MB
// Magic constants (See Knuth: Seminumerical Algorithms)
// Do not change! (They are for a maximal length LFSR)
ranlen = 55
ranlen2 = 24
// Leaf is name of the check files
Leaf = "TST_"
)
// Globals
var (
// Flags
fileSize = flag.Int64("s", 1e9, "Size of the check files")
cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file")
duration = flag.Duration("duration", time.Hour*24, "Duration to run test")
statsInterval = flag.Duration("stats", time.Minute*1, "Interval to print stats")
logfile = flag.String("logfile", "stressdisk.log", "File to write log to set to empty to ignore")
maxErrors = flag.Uint64("maxerrors", 64, "Max number of errors to print per file")
noDirect = flag.Bool("nodirect", false, "Don't use O_DIRECT")
statsFile = flag.String("statsfile", "stressdisk_stats.json", "File to load/store statistics data")
stats *Stats
openFile = directio.OpenFile
version = "development version" // overridden by goreleaser
)
// statsMode defines what mode the stats collection is in
type statsMode byte
// statsMode definitions
const (
modeNone statsMode = iota
modeRead
modeReadDone
modeWrite
modeWriteDone
)
// Stats stores accumulated statistics
type Stats struct {
Read uint64
Written uint64
Errors uint64
Start time.Time
ReadStart time.Time // start of unaccumulated time of read operation
ReadSeconds float64 // read seconds accumulator
WriteStart time.Time // start of unaccumulated time of write operation
WriteSeconds float64 // write seconds accumulator
mode statsMode // current mode - modeNone, modeRead, modeWrite
}
// NewStats cretates an initialised Stats
func NewStats() *Stats {
return &Stats{Start: time.Now()}
}
// SetMode sets the current operating mode of the stats module.
//
// Be sure to transition from for example modeRead to modeReadDone,
// before transitioning to modeWrite, otherwise you will lose the
// corresponding time statistic in the time accumulator.
func (s *Stats) SetMode(mode statsMode) {
s.mode = mode
switch s.mode {
case modeRead:
s.ReadStart = time.Now()
case modeReadDone:
dt := time.Since(s.ReadStart)
s.ReadSeconds += dt.Seconds()
stats.Store()
case modeWrite:
s.WriteStart = time.Now()
case modeWriteDone:
dt := time.Since(s.WriteStart)
s.WriteSeconds += dt.Seconds()
stats.Store()
}
}
// String convert the Stats to a string for printing
func (s *Stats) String() string {
dt := time.Since(s.Start) // total elapsed time
read, written := atomic.LoadUint64(&s.Read), atomic.LoadUint64(&s.Written)
readSpeed, writeSpeed := 0.0, 0.0
// calculate interim duration - for periodic stats display
// while operation is not completed.
switch s.mode {
case modeRead:
s.ReadSeconds += time.Since(s.ReadStart).Seconds()
s.ReadStart = time.Now()
case modeWrite:
s.WriteSeconds += time.Since(s.WriteStart).Seconds()
s.WriteStart = time.Now()
}
if s.ReadSeconds != 0 {
readSpeed = float64(read) / MB / s.ReadSeconds
}
if s.WriteSeconds != 0 {
writeSpeed = float64(written) / MB / s.WriteSeconds
}
return fmt.Sprintf(`
Bytes read: %10d MByte (%7.2f MByte/s)
Bytes written: %10d MByte (%7.2f MByte/s)
Errors: %10d
Elapsed time: %v
`,
read/MB, readSpeed,
written/MB, writeSpeed,
atomic.LoadUint64(&s.Errors),
dt)
}
// Store stores the stats to *statsFile if set
func (s *Stats) Store() {
if *statsFile == "" {
return
}
out, err := os.Create(*statsFile)
if err != nil {
log.Fatalf("error opening statsfile: %v", err)
}
defer out.Close()
encoder := json.NewEncoder(out)
// Encode into the buffer
err = encoder.Encode(&s)
if err != nil {
log.Fatalf("error writing stats file: %v", err)
}
}
// Load loads the stats from *statsFile if set
func (s *Stats) Load() {
if *statsFile == "" {
return
}
_, err := os.Stat(*statsFile)
if err != nil {
log.Printf("statsfile %q does not exist -- will create", *statsFile)
return
}
in, err := os.Open(*statsFile)
if err != nil {
log.Fatalf("error opening statsfile: %v", err)
}
defer in.Close()
decoder := json.NewDecoder(in)
err = decoder.Decode(&stats)
if err != nil {
log.Fatalf("error reading statsfile: %v", err)
}
stats.Start = time.Now() // restart the program timer
log.Printf("loaded statsfile %q", *statsFile)
stats.Log()
}
// Log outputs the Stats to the log
func (s *Stats) Log() {
log.Println(stats)
}
// AddWritten updates the stats for bytes written
func (s *Stats) AddWritten(bytes uint64) {
atomic.AddUint64(&s.Written, bytes)
}
// AddRead updates the stats for bytes read
func (s *Stats) AddRead(bytes uint64) {
atomic.AddUint64(&s.Read, bytes)
}
// AddErrors updates the stats for errors
func (s *Stats) AddErrors(errors uint64) {
atomic.AddUint64(&s.Errors, errors)
}
// Random contains the state for the random stream generator
type Random struct {
extendedData []byte
Data []byte // A BlockSize chunk of data which points to extendedData
bytes int // number of bytes of randomness
pos int // read position for Read
}
// NewRandom make a new random stream generator
func NewRandom() *Random {
r := &Random{}
r.extendedData = make([]byte, BlockSize+ranlen)
r.Data = r.extendedData[0:BlockSize]
r.Data[0] = 1
for i := 1; i < ranlen; i++ {
r.Data[i] = 0xA5
}
r.Randomise() // initial randomisation
r.bytes = 0 // start buffer empty
r.pos = 0
return r
}
// Randomise fills the random block up with randomness.
//
// This uses a random number generator from Knuth: Seminumerical
// Algorithms. The magic numbers are the polynomial for a maximal
// length linear feedback shift register The least significant bits of
// the numbers form this sequence (of length 2**55). The higher bits
// cause the sequence to be some multiple of this.
func (r *Random) Randomise() {
// copy the old randomness to the end
copy(r.extendedData[BlockSize:], r.extendedData[0:ranlen])
// make a new random block
d := r.extendedData
for i := BlockSize - 1; i >= 0; i-- {
d[i] = d[i+ranlen] + d[i+ranlen2]
}
// Show we have some bytes
r.bytes = BlockSize
r.pos = 0
}
// Read implements io.Reader for Random
func (r *Random) Read(p []byte) (int, error) {
bytesToWrite := len(p)
bytesWritten := 0
for bytesToWrite > 0 {
if r.bytes <= 0 {
r.Randomise()
}
chunkSize := bytesToWrite
if bytesToWrite >= r.bytes {
chunkSize = r.bytes
}
copy(p[bytesWritten:bytesWritten+chunkSize], r.Data[r.pos:r.pos+chunkSize])
bytesWritten += chunkSize
bytesToWrite -= chunkSize
r.pos += chunkSize
r.bytes -= chunkSize
}
return bytesWritten, nil
}
// outputDiff checks two blocks and outputs differences to the log
func outputDiff(pos int64, a, b []byte, output bool) bool {
if len(a) != len(b) {
panic("Assertion failed: Blocks passed to outputDiff must be the same length")
}
errors := uint64(0)
for i := range a {
if a[i] != b[i] {
if output {
if errors < *maxErrors {
log.Printf("%08X: %02X, %02X diff %02X\n",
pos+int64(i), b[i], a[i], b[i]^a[i])
} else if errors >= *maxErrors {
log.Printf("Error limit %d reached: not printing any more differences in this file\n", *maxErrors)
output = false
}
}
errors++
}
}
stats.AddErrors(errors)
return output
}
// BlockReader contains the state for reading blocks out of the file
type BlockReader struct {
// Input file
in io.Reader
// Name of input file
file string
// Channel to output blocks
out chan []byte
// Channel for spare blocks
spare chan []byte
// Channel to signal quit
quit chan bool
// Co-ordinate with background goroutine
wg sync.WaitGroup
}
// NewBlockReader reads a file in BlockSize using BlockSize chunks until done.
//
// It returns them in the channel using a triple buffered goroutine to
// do the reading so as to parallelise the IO.
//
// This improves read speed from 62.3 MByte/s to 69 MByte/s when
// reading two files and and from 62 to 182 Mbyte/s when reading from
// one file and one random source.
func NewBlockReader(in io.Reader, file string) *BlockReader {
br := &BlockReader{
in: in,
file: file,
out: make(chan []byte, 1),
spare: make(chan []byte, 4),
quit: make(chan bool, 1), // buffer of size 1 so can send into it without blocking
}
// Run the reader in the background
br.wg.Add(1)
go br.background()
return br
}
// background routine for BlockReader to read the blocks in the
// background into the channel
//
// It uses triple buffering with chan of length 1. One block being
// filled, one block in the channel, and one block being used by the
// client.
func (br *BlockReader) background() {
defer br.wg.Done()
defer close(br.out)
br.spare <- directio.AlignedBlock(BlockSize)
br.spare <- directio.AlignedBlock(BlockSize)
br.spare <- directio.AlignedBlock(BlockSize)
for {
block := <-br.spare
_, err := io.ReadFull(br.in, block)
if err != nil {
if err == io.EOF {
return
}
log.Fatalf("Error while reading %q: %s\n", br.file, err)
}
// FIXME bodge - don't account for reading from the random number
if br.file != "random" {
stats.AddRead(BlockSize)
}
select {
case br.out <- block:
case <-br.quit:
return
}
}
}
// Read a block from the BlockReader
//
// Returns nil at end of file
func (br *BlockReader) Read() []byte {
out, _ := <-br.out
return out
}
// Return a block to the BlockReader when processed for re-use
//
// Ignores a nil block
func (br *BlockReader) Return(block []byte) {
if block != nil {
br.spare <- block
}
}
// Close the BlockReader shuttting down the background goroutine
func (br *BlockReader) Close() {
br.quit <- true
br.wg.Wait()
}
// ReadFile reads the file given and checks it against the random source
func ReadFile(file string) {
in, err := openFile(file, os.O_RDONLY, 0666)
if err != nil {
log.Fatalf("Failed to open %s for reading: %s\n", file, err)
}
defer in.Close()
random := NewRandom()
pos := int64(0)
log.Printf("Reading file %q\n", file)
// FIXME this is similar code to ReadTwoFiles
br1 := NewBlockReader(in, file)
defer br1.Close()
br2 := NewBlockReader(random, "random")
defer br2.Close()
output := true
stats.SetMode(modeRead)
defer stats.SetMode(modeReadDone)
for {
block1 := br1.Read()
block2 := br2.Read()
if block1 == nil {
break
}
if bytes.Compare(block1, block2) != 0 {
output = outputDiff(pos, block1, block2, output)
}
pos += BlockSize
br1.Return(block1)
br2.Return(block2)
}
}
// WriteFile writes the random source for size bytes to the file given
//
// Returns a true if the write failed, false otherwise.
func WriteFile(file string, size int64) bool {
out, err := openFile(file, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
log.Fatalf("Couldn't open file %q for write: %s\n", file, err)
}
log.Printf("Writing file %q size %d\n", file, size)
failed := false
random := NewRandom()
br := NewBlockReader(random, "random")
defer br.Close()
stats.SetMode(modeWrite)
defer stats.SetMode(modeWriteDone)
for size > 0 {
block := br.Read()
_, err := out.Write(block)
br.Return(block)
if err != nil {
log.Printf("Error while writing %q\n", file)
failed = true
break
}
size -= BlockSize
stats.AddWritten(BlockSize)
}
out.Close()
if failed {
log.Printf("Removing incomplete file %q\n", file)
err = os.Remove(file)
if err != nil {
log.Fatalf("Failed to remove incomplete file %q: %s\n", file, err)
}
}
return failed
}
// ReadTwoFiles reads two files and checks them to be the same as it goes along.
//
// It reads the files in BlockSize chunks.
func ReadTwoFiles(file1, file2 string) {
in1, err := openFile(file1, os.O_RDONLY, 0666)
if err != nil {
log.Fatalf("Couldn't open file %q for read\n", file1)
}
defer in1.Close()
in2, err := openFile(file2, os.O_RDONLY, 0666)
if err != nil {
log.Fatalf("Couldn't open file %q for read\n", file2)
}
defer in2.Close()
log.Printf("Reading file %q, %q\n", file1, file2)
stats.SetMode(modeRead)
defer stats.SetMode(modeReadDone)
pos := int64(0)
br1 := NewBlockReader(in1, file1)
defer br1.Close()
br2 := NewBlockReader(in2, file2)
defer br2.Close()
output := true
for {
block1 := br1.Read()
block2 := br2.Read()
if block1 == nil || block2 == nil {
if block1 != nil || block2 != nil {
log.Fatalf("Files %q and %q are different sizes\n", file1, file2)
}
break
}
if bytes.Compare(block1, block2) != 0 {
output = outputDiff(pos, block1, block2, output)
}
pos += BlockSize
br1.Return(block1)
br2.Return(block2)
}
}
// syntaxError prints the syntax
func syntaxError() {
fmt.Fprintf(os.Stderr, `stressdisk - a disk soak testing utility - %s
Automatic usage:
stressdisk run directory - auto fill the directory up and soak test it
stressdisk cycle directory - fill, test, delete, repeat - torture for flash
stressdisk clean directory - delete the check files from the directory
Manual usage:
stressdisk help - this help
stressdisk [ -s size ] write filename - write a check file
stressdisk read filename - read the check file back
stressdisk reads filename - ... repeatedly for duration set
stressdisk check filename1 filename2 - compare two check files
stressdisk checks filename1 filename2 - ... repeatedly for duration set
Full options:
`, version)
flag.PrintDefaults()
fmt.Fprintf(os.Stderr, `
Note that flags must be provided BEFORE the stressdisk command, eg
stressdisk -duration 48h run /mnt
`)
}
// Exit with the message
func fatalf(message string, args ...interface{}) {
syntaxError()
fmt.Fprintf(os.Stderr, message, args...)
os.Exit(1)
}
// checkArgs checks there are enough arguments and prints a message if not
func checkArgs(args []string, n int, message string) {
if len(args) != n {
fatalf("%d arguments required: %s\n"+
"Make sure flags are listed before the command, eg\n"+
"stressdisk -duration 24h run /mnt\n", n, message)
}
}
// pairs is used to make the schedule for testing pairs of files.
//
// This reads all the data twice in a random order
func pairs(n int) (a, b []int) {
a = rand.Perm(n)
OUTER:
for {
b = rand.Perm(n)
if n > 1 {
// Make sure that we don't read the same block twice at the same time
for i := range a {
if a[i] == b[i] {
continue OUTER
}
}
}
break
}
return
}
// ReadDir finds the check files in the directory passed in, returning all the files
func ReadDir(dir string) []string {
matcher := regexp.MustCompile(`^` + regexp.QuoteMeta(Leaf) + `\d{4,}$`)
var files []string
entries, err := ioutil.ReadDir(dir)
if err != nil {
// Warn only if couldn't open directory
// See: http://code.google.com/p/go/issues/detail?id=4601
log.Printf("Couldn't read directory %q: %s\n", dir, err)
return files
}
for _, entry := range entries {
name := entry.Name()
if entry.Mode()&os.ModeType == 0 && matcher.MatchString(name) {
files = append(files, filepath.Join(dir, name))
}
}
return files
}
// DeleteFiles deletes all the check files
func DeleteFiles(files []string) {
log.Printf("Removing %d check files\n", len(files))
for _, file := range files {
log.Printf("Removing file %q\n", file)
err := os.Remove(file)
if err != nil {
log.Printf("Failed to remove file %q: %s\n", file, err)
}
}
}
// WriteFiles writes check files until the disk is full
func WriteFiles(dir string) []string {
var files []string
for i := 0; ; i++ {
file := filepath.Join(dir, fmt.Sprintf("%s%04d", Leaf, i))
if WriteFile(file, *fileSize) {
break
}
files = append(files, file)
}
if len(files) < 2 {
DeleteFiles(files)
log.Fatalf("Only generated %d files which isn't enough - reduce the size with -s\n", len(files))
}
return files
}
// GetFiles finds existing check files or creates new ones
func GetFiles(dir string) []string {
files := ReadDir(dir)
if len(files) == 0 {
log.Printf("No check files - generating\n")
files = WriteFiles(dir)
} else {
log.Printf("%d check files found - restarting\n", len(files))
}
return files
}
// finished prints the message and some stats then exits with the correct error code
func finished(message string) {
log.Println(message)
stats.Log()
stats.Store()
// Log pass / fail if we did any testing
if stats.Read != 0 {
if stats.Errors != 0 {
log.Fatalf("FAILED with %d errors - see %q for details", stats.Errors, *logfile)
}
log.Println("PASSED with no errors")
}
os.Exit(0)
}
func main() {
flag.Usage = syntaxError
flag.Parse()
args := flag.Args()
stats = NewStats()
runtime.GOMAXPROCS(3)
rand.Seed(time.Now().UnixNano())
// if no O_DIRECT just use normal OS open facility
if *noDirect {
openFile = os.OpenFile
}
// Setup profiling if desired
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
// Write to log file as well
if len(*logfile) > 0 {
fd, err := os.OpenFile(*logfile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
log.Fatal("Failed to open log file")
}
defer func() {
fd.WriteString("\n------\n")
fd.Close()
}()
log.SetOutput(io.MultiWriter(os.Stderr, fd))
}
if len(args) < 1 {
fatalf("No command supplied\n")
}
stats.Load()
// Exit on keyboard interrrupt
go func() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT)
<-ch
finished("Interrupt received")
}()
// Exit on timeout
go func() {
<-time.After(*duration)
finished(fmt.Sprintf("Exiting after running for > %v", duration))
}()
// Print the stats every statsInterval
go func() {
ch := time.Tick(*statsInterval)
for {
<-ch
stats.Log()
stats.Store()
}
}()
command := strings.ToLower(args[0])
args = args[1:]
var action func() bool
switch command {
case "write":
checkArgs(args, 1, "Need file to write")
action = func() bool {
WriteFile(args[0], *fileSize)
return false
}
case "check", "checks":
checkArgs(args, 2, "Need two files to read")
action = func() bool {
ReadTwoFiles(args[0], args[1])
return command == "checks"
}
case "read", "reads":
checkArgs(args, 1, "Need file to read")
action = func() bool {
ReadFile(args[0])
return command == "reads"
}
case "clean":
checkArgs(args, 1, "Need directory to delete files from")
action = func() bool {
dir := args[0]
DeleteFiles(ReadDir(dir))
return false
}
case "run":
// FIXME directory could be omitted?
// FIXME should be default
checkArgs(args, 1, "Need directory to write check files")
dir := args[0]
files := GetFiles(dir)
action = func() bool {
a, b := pairs(len(files))
for i := range a {
ReadTwoFiles(files[a[i]], files[b[i]])
}
return true
}
case "cycle":
checkArgs(args, 1, "Need directory to perform testing")
dir := args[0]
// Delete any pre-exising check files
DeleteFiles(ReadDir(dir))
action = func() bool {
files := GetFiles(dir)
a, b := pairs(len(files))
for i := range a {
ReadTwoFiles(files[a[i]], files[b[i]])
}
DeleteFiles(files)
return true
}
default:
fatalf("Command %q not understood\n", command)
}
// Run the action
for round := 0; ; round++ {
log.Printf("Starting round %d\n", round+1)
if !action() {
break
}
}
finished("All done")
}
07070100000008000081A400000000000000000000000164995F23000000B9000000000000000000000000000000000000002500000000stressdisk-1.0.13/stressdisk_test.gopackage main
import (
"testing"
)
func BenchmarkRandomise(b *testing.B) {
b.StopTimer()
random := NewRandom()
b.StartTimer()
for i := 0; i < b.N; i++ {
random.Randomise()
}
}
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!65 blocks