typo: Fix typos across the codebase. (#2442)

master
Harshavardhana 8 years ago committed by GitHub
parent b41bfcbf2f
commit 76d56c6ff2
  1. 4
      Makefile
  2. 4
      bucket-policy-handlers_test.go
  3. 6
      bucket-policy-parser_test.go
  4. 2
      dist/benchmark/benchcmp.sh
  5. 2
      docs/FreeBSD.md
  6. 2
      docs/erasure/README.md
  7. 4
      docs/how-to-run-multiple-minio-server-instances-on-single-machine.md
  8. 2
      leak-detect_test.go
  9. 2
      object-api-listobjects_test.go
  10. 2
      object-api-multipart_test.go
  11. 2
      object-api-putobject_test.go
  12. 2
      pkg/quick/errorutil.go
  13. 2
      signature-v4-utils.go
  14. 2
      xl-v1-multipart.go

@ -101,8 +101,8 @@ deadcode:
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/deadcode @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/deadcode
spelling: spelling:
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell *.go @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell -error *
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell pkg/**/* @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell -error pkg/**/*
test: build test: build
@echo "Running all minio testing:" @echo "Running all minio testing:"

@ -118,7 +118,7 @@ func TestBucketPolicyActionMatch(t *testing.T) {
{"s3:ListBucketMultipartUploads", getWriteOnlyBucketStatement(bucketName, objectPrefix), true}, {"s3:ListBucketMultipartUploads", getWriteOnlyBucketStatement(bucketName, objectPrefix), true},
// read-only bucket policy is expected to not allow ListBucketMultipartUploads operation on an anonymous request (Test case 9). // read-only bucket policy is expected to not allow ListBucketMultipartUploads operation on an anonymous request (Test case 9).
// the allowed actions in read-only bucket statement are "s3:GetBucketLocation","s3:ListBucket", // the allowed actions in read-only bucket statement are "s3:GetBucketLocation","s3:ListBucket",
// this shouldnot allow for ListBucketMultipartUploads operations. // this should not allow for ListBucketMultipartUploads operations.
{"s3:ListBucketMultipartUploads", getReadOnlyBucketStatement(bucketName, objectPrefix), false}, {"s3:ListBucketMultipartUploads", getReadOnlyBucketStatement(bucketName, objectPrefix), false},
// Any of the object level policy will not allow for s3:ListBucketMultipartUploads (Test cases 10-12). // Any of the object level policy will not allow for s3:ListBucketMultipartUploads (Test cases 10-12).
@ -136,7 +136,7 @@ func TestBucketPolicyActionMatch(t *testing.T) {
{"s3:ListBucket", getReadWriteBucketStatement(bucketName, objectPrefix), true}, {"s3:ListBucket", getReadWriteBucketStatement(bucketName, objectPrefix), true},
// write-only bucket policy is expected to not allow ListBucket operation on an anonymous request (Test case 15). // write-only bucket policy is expected to not allow ListBucket operation on an anonymous request (Test case 15).
// the allowed actions in write-only bucket statement are "s3:GetBucketLocation", "s3:ListBucketMultipartUploads", // the allowed actions in write-only bucket statement are "s3:GetBucketLocation", "s3:ListBucketMultipartUploads",
// this shouldnot allow for ListBucket operations. // this should not allow for ListBucket operations.
{"s3:ListBucket", getWriteOnlyBucketStatement(bucketName, objectPrefix), false}, {"s3:ListBucket", getWriteOnlyBucketStatement(bucketName, objectPrefix), false},
// Cases for testing ListBucket access for different Object level access permissions (Test cases 16-18). // Cases for testing ListBucket access for different Object level access permissions (Test cases 16-18).

@ -477,7 +477,7 @@ func TestCheckbucketPolicyResources(t *testing.T) {
statements[0].Actions = []string{"s3:DeleteObject", "s3:PutObject"} statements[0].Actions = []string{"s3:DeleteObject", "s3:PutObject"}
return statements return statements
} }
// contructing policy statement with recursive resources. // contracting policy statement with recursive resources.
// should result in ErrMalformedPolicy // should result in ErrMalformedPolicy
setRecurseResource := func(statements []policyStatement) []policyStatement { setRecurseResource := func(statements []policyStatement) []policyStatement {
statements[0].Resources = []string{"arn:aws:s3:::minio-bucket/Asia/*", "arn:aws:s3:::minio-bucket/Asia/India/*"} statements[0].Resources = []string{"arn:aws:s3:::minio-bucket/Asia/*", "arn:aws:s3:::minio-bucket/Asia/India/*"}
@ -512,7 +512,7 @@ func TestCheckbucketPolicyResources(t *testing.T) {
// this results in return of ErrMalformedPolicy. // this results in return of ErrMalformedPolicy.
{Version: "1.0", Statements: setValidPrefixActions(getWriteOnlyStatement("minio-bucket-fail", "Asia/India/"))}, {Version: "1.0", Statements: setValidPrefixActions(getWriteOnlyStatement("minio-bucket-fail", "Asia/India/"))},
// bucketPolicy - 6. // bucketPolicy - 6.
// contructing policy statement with recursive resources. // contracting policy statement with recursive resources.
// should result in ErrMalformedPolicy // should result in ErrMalformedPolicy
{Version: "1.0", Statements: setRecurseResource(setValidPrefixActions(getWriteOnlyStatement("minio-bucket", "")))}, {Version: "1.0", Statements: setRecurseResource(setValidPrefixActions(getWriteOnlyStatement("minio-bucket", "")))},
// BucketPolciy - 7. // BucketPolciy - 7.
@ -544,7 +544,7 @@ func TestCheckbucketPolicyResources(t *testing.T) {
// Resource prefix bucket part is not equal to the bucket name in this case. // Resource prefix bucket part is not equal to the bucket name in this case.
{bucketAccessPolicies[4], ErrMalformedPolicy, false}, {bucketAccessPolicies[4], ErrMalformedPolicy, false},
// Test case - 6. // Test case - 6.
// contructing policy statement with recursive resources. // contracting policy statement with recursive resources.
// should result in ErrPolicyNesting. // should result in ErrPolicyNesting.
{bucketAccessPolicies[5], ErrPolicyNesting, false}, {bucketAccessPolicies[5], ErrPolicyNesting, false},
// Test case - 7. // Test case - 7.

@ -50,7 +50,7 @@ if [ ! $# -eq 2 ]
then then
# exit if commit SHA's are not provided. # exit if commit SHA's are not provided.
echo $# echo $#
echo "Need Commit SHA's of 2 snapshots to be supplied to run benchmark comparision." echo "Need Commit SHA's of 2 snapshots to be supplied to run benchmark comparison."
exit 1 exit 1
fi fi

@ -62,7 +62,7 @@ Verify if it is writable
``` ```
Now you have successfully created a ZFS pool for futher reading please refer to [ZFS Quickstart Guide](https://www.freebsd.org/doc/handbook/zfs-quickstart.html) Now you have successfully created a ZFS pool for further reading please refer to [ZFS Quickstart Guide](https://www.freebsd.org/doc/handbook/zfs-quickstart.html)
However, this pool is not taking advantage of any ZFS features, so let's create a ZFS filesytem on this pool with compression enabled. ZFS supports many compression algorithms: lzjb, gzip, zle, lz4. LZ4 is often the most performant algorithm in terms of compression of data versus system overhead. However, this pool is not taking advantage of any ZFS features, so let's create a ZFS filesytem on this pool with compression enabled. ZFS supports many compression algorithms: lzjb, gzip, zle, lz4. LZ4 is often the most performant algorithm in terms of compression of data versus system overhead.

@ -12,7 +12,7 @@ Erasure code is a mathematical algorithm to reconstruct missing or corrupted dat
## Why is Erasure Code useful? ## Why is Erasure Code useful?
Erasure code protects data from multiple drives failure unlike RAID or replication. For eg RAID6 can protect against 2 drive failure whereas in Minio erasure code you can lose as many as half number of drives and still the data reamins safe. Further Minio's erasure code is at object level and can heal one object at a time. For RAID, healing can only be performed at volume level which translates into huge down time. As Minio encodes each object individually with a high parity count. Storage servers once deployed should not require drive replacement or healing for the lifetime of the server. Minio's erasure coded backend is designed for operational efficiency and takes full advantage of hardware acceleration whenever available. Erasure code protects data from multiple drives failure unlike RAID or replication. For eg RAID6 can protect against 2 drive failure whereas in Minio erasure code you can lose as many as half number of drives and still the data remains safe. Further Minio's erasure code is at object level and can heal one object at a time. For RAID, healing can only be performed at volume level which translates into huge down time. As Minio encodes each object individually with a high parity count. Storage servers once deployed should not require drive replacement or healing for the lifetime of the server. Minio's erasure coded backend is designed for operational efficiency and takes full advantage of hardware acceleration whenever available.
![Erasure](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/erasure-code.jpg?raw=true) ![Erasure](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/erasure-code.jpg?raw=true)

@ -1,9 +1,9 @@
# How to run multiple Minio server instances on single machine. [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # How to run multiple Minio server instances on single machine. [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![minio_MULTISERVER](https://github.com/minio/minio/blob/master/docs/screenshots/multiport.jpeg?raw=true) ![minio_MULTIVERSE](https://github.com/minio/minio/blob/master/docs/screenshots/multiport.jpeg?raw=true)
In this document we will illustrate how to set up multiple Minio server instances on single machine. These Minio servers are running on thier own port, data directory & configuration directory. In this document we will illustrate how to set up multiple Minio server instances on single machine. These Minio servers are running on their own port, data directory & configuration directory.
## 1. Prerequisites ## 1. Prerequisites

@ -37,7 +37,7 @@ func NewLeakDetect() LeakDetect {
return snapshot return snapshot
} }
// CompareCurrentSnapshot - Comapres the initial relevant stack trace with the current one (during the time of invocation). // CompareCurrentSnapshot - Compares the initial relevant stack trace with the current one (during the time of invocation).
func (initialSnapShot LeakDetect) CompareCurrentSnapshot() []string { func (initialSnapShot LeakDetect) CompareCurrentSnapshot() []string {
var stackDiff []string var stackDiff []string
for _, g := range pickRelevantGoroutines() { for _, g := range pickRelevantGoroutines() {

@ -503,7 +503,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
{"test-bucket-list-object", "Asia/India/", "", "", 10, resultCases[23], nil, true}, {"test-bucket-list-object", "Asia/India/", "", "", 10, resultCases[23], nil, true},
{"test-bucket-list-object", "Asia", "", "", 10, resultCases[24], nil, true}, {"test-bucket-list-object", "Asia", "", "", 10, resultCases[24], nil, true},
// Tests with prefix and delimiter (55-57). // Tests with prefix and delimiter (55-57).
// With delimeter the code shouldnot recurse into the sub-directories of prefix Dir. // With delimeter the code should not recurse into the sub-directories of prefix Dir.
{"test-bucket-list-object", "Asia", "", "/", 10, resultCases[25], nil, true}, {"test-bucket-list-object", "Asia", "", "/", 10, resultCases[25], nil, true},
{"test-bucket-list-object", "new", "", "/", 10, resultCases[26], nil, true}, {"test-bucket-list-object", "new", "", "/", 10, resultCases[26], nil, true},
{"test-bucket-list-object", "Asia/India/", "", "/", 10, resultCases[27], nil, true}, {"test-bucket-list-object", "Asia/India/", "", "/", 10, resultCases[27], nil, true},

@ -1876,7 +1876,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
} }
// Benchmarks for ObjectLayer.PutObjectPart(). // Benchmarks for ObjectLayer.PutObjectPart().
// The intent is to benchamrk PutObjectPart for various sizes ranging from few bytes to 100MB. // The intent is to benchmark PutObjectPart for various sizes ranging from few bytes to 100MB.
// Also each of these Benchmarks are run both XL and FS backends. // Also each of these Benchmarks are run both XL and FS backends.
// BenchmarkPutObjectPart5MbFS - Benchmark FS.PutObjectPart() for object size of 5MB. // BenchmarkPutObjectPart5MbFS - Benchmark FS.PutObjectPart() for object size of 5MB.

@ -395,7 +395,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
} }
// Benchmarks for ObjectLayer.PutObject(). // Benchmarks for ObjectLayer.PutObject().
// The intent is to benchamrk PutObject for various sizes ranging from few bytes to 100MB. // The intent is to benchmark PutObject for various sizes ranging from few bytes to 100MB.
// Also each of these Benchmarks are run both XL and FS backends. // Also each of these Benchmarks are run both XL and FS backends.
// BenchmarkPutObjectVerySmallFS - Benchmark FS.PutObject() for object size of 10 bytes. // BenchmarkPutObjectVerySmallFS - Benchmark FS.PutObject() for object size of 10 bytes.

@ -49,7 +49,7 @@ func FormatJSONSyntaxError(data io.Reader, sErr *json.SyntaxError) error {
termWidth := 25 termWidth := 25
// errorShift is the length of the minimum needed place for // errorShift is the length of the minimum needed place for
// error msg accessoires, like <--, etc.. We calculate it // error msg accessories, like <--, etc.. We calculate it
// dynamically to avoid an eventual bug after modifying errorFmt // dynamically to avoid an eventual bug after modifying errorFmt
errorShift := len(fmt.Sprintf(errorFmt, 1, "")) errorShift := len(fmt.Sprintf(errorFmt, 1, ""))

@ -143,7 +143,7 @@ func extractSignedHeaders(signedHeaders []string, reqHeaders http.Header) (http.
continue continue
} }
// the "host" field will not be found in the header map, it can be found in req.Host. // the "host" field will not be found in the header map, it can be found in req.Host.
// but its necessary to make sure that the "host" field exists in the list of signed paramaters, // but its necessary to make sure that the "host" field exists in the list of signed parameters,
// the check is done above. // the check is done above.
if header == "host" { if header == "host" {
continue continue

@ -722,7 +722,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// This lock also protects the cache namespace. // This lock also protects the cache namespace.
nsMutex.Unlock(bucket, object) nsMutex.Unlock(bucket, object)
// Prefetch the object from disk by triggerring a fake GetObject call // Prefetch the object from disk by triggering a fake GetObject call
// Unlike a regular single PutObject, multipart PutObject is comes in // Unlike a regular single PutObject, multipart PutObject is comes in
// stages and it is harder to cache. // stages and it is harder to cache.
go xl.GetObject(bucket, object, 0, objectSize, ioutil.Discard) go xl.GetObject(bucket, object, 0, objectSize, ioutil.Discard)

Loading…
Cancel
Save