cleanup: Remove old donut/xl code and erasure implementation.

This is a change to bring in 'klauspost/reedsolomon' library
in #1270 patch.
master
Harshavardhana 9 years ago
parent 6037fe66e9
commit 379e0abf03
  1. 1
      .gitignore
  2. 10
      .travis.yml
  3. 16
      Makefile
  4. 16
      appveyor.yml
  5. 6
      buildscripts/checkdeps.sh
  6. 1
      pkg/crypto/sha1/.gitignore
  7. 202
      pkg/crypto/sha1/LICENSE
  8. 150
      pkg/crypto/sha1/sha1.go
  9. 967
      pkg/crypto/sha1/sha1_linux.S
  10. 579
      pkg/crypto/sha1/sha1_sse3_amd64.asm
  11. 154
      pkg/crypto/sha1/sha1_test.go
  12. 21
      pkg/crypto/sha1/sha1_yasm_darwin.go
  13. 21
      pkg/crypto/sha1/sha1_yasm_linux.go
  14. 21
      pkg/crypto/sha1/sha1_yasm_windows.go
  15. 43
      pkg/crypto/sha1/sha1block.go
  16. 110
      pkg/crypto/sha1/sha1block_generic.go
  17. 50
      pkg/crypto/sha1/sha1block_linux.go
  18. 23
      pkg/crypto/sha1/sha1block_nocgo.go
  19. 1
      pkg/erasure/.gitignore
  20. 64
      pkg/erasure/INSTALLGO.md
  21. 26
      pkg/erasure/LICENSE.INTEL
  22. 202
      pkg/erasure/LICENSE.MINIO
  23. 25
      pkg/erasure/README.md
  24. 62
      pkg/erasure/ctypes.go
  25. 66
      pkg/erasure/doc.go
  26. 39
      pkg/erasure/ec_minio_common.h
  27. 142
      pkg/erasure/ec_minio_decode.c
  28. 55
      pkg/erasure/ec_minio_encode.c
  29. 123
      pkg/erasure/erasure_decode.go
  30. 174
      pkg/erasure/erasure_encode.go
  31. 82
      pkg/erasure/erasure_test.go
  32. 38
      pkg/erasure/stdint.go
  33. 202
      pkg/xl/LICENSE
  34. 3
      pkg/xl/README.md
  35. 49
      pkg/xl/acl.go
  36. 196
      pkg/xl/block/block.go
  37. 83
      pkg/xl/block/block_test.go
  38. 641
      pkg/xl/bucket.go
  39. 204
      pkg/xl/cache/data/data.go
  40. 45
      pkg/xl/cache/data/data_test.go
  41. 110
      pkg/xl/cache/metadata/metadata.go
  42. 46
      pkg/xl/cache/metadata/metadata_test.go
  43. 192
      pkg/xl/common.go
  44. 82
      pkg/xl/config.go
  45. 159
      pkg/xl/definitions.go
  46. 3
      pkg/xl/doc.go
  47. 73
      pkg/xl/encoder.go
  48. 342
      pkg/xl/errors.go
  49. 71
      pkg/xl/heal.go
  50. 74
      pkg/xl/interfaces.go
  51. 83
      pkg/xl/management.go
  52. 516
      pkg/xl/multipart.go
  53. 78
      pkg/xl/node.go
  54. 55
      pkg/xl/xl-metadata.md
  55. 683
      pkg/xl/xl-v1.go
  56. 292
      pkg/xl/xl-v1_test.go
  57. 639
      pkg/xl/xl-v2.go
  58. 267
      pkg/xl/xl-v2_test.go

1
.gitignore vendored

@ -10,7 +10,6 @@ site/
/Minio.iml
**/access.log
build
isa-l
vendor/**/*.js
vendor/**/*.json
release

@ -6,16 +6,6 @@ os:
- linux
- osx
before_install:
- git clone https://github.com/yasm/yasm
- cd yasm
- git checkout v1.3.0
- "./autogen.sh"
- "./configure"
- make
- export PATH=$PATH:`pwd`
- cd ..
osx_image: xcode7.2
env:

@ -87,13 +87,6 @@ fmt:
@GO15VENDOREXPERIMENT=1 gofmt -s -l *.go
@GO15VENDOREXPERIMENT=1 gofmt -s -l pkg
## Configure Intel library.
isa-l:
@echo "Configuring $@:"
@git clone -q https://github.com/minio/isa-l.git
@cd isa-l; ${MAKE} -f Makefile.unx arch=$(arch) >/dev/null; mv include isa-l;
lint:
@echo "Running $@:"
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/golint *.go
@ -104,8 +97,7 @@ cyclo:
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/gocyclo -over 65 *.go
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/gocyclo -over 65 pkg
build: getdeps verifiers $(UI_ASSETS) isa-l
@GO15VENDOREXPERIMENT=1 go generate github.com/minio/minio/pkg/crypto/sha1
build: getdeps verifiers $(UI_ASSETS)
deadcode:
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/deadcode
@ -116,12 +108,12 @@ spelling:
test: build
@echo "Running all minio testing:"
@GODEBUG=cgocheck=0 CGO_CPPFLAGS="-I$(PWD)/isa-l" CGO_LDFLAGS="$(PWD)/isa-l/isa-l.a" GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) .
@GODEBUG=cgocheck=0 CGO_CPPFLAGS="-I$(PWD)/isa-l" CGO_LDFLAGS="$(PWD)/isa-l/isa-l.a" GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) github.com/minio/minio/pkg...
@GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) .
@GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) github.com/minio/minio/pkg...
gomake-all: build
@echo "Installing minio:"
@CGO_CPPFLAGS="-I$(PWD)/isa-l" CGO_LDFLAGS="$(PWD)/isa-l/isa-l.a" GO15VENDOREXPERIMENT=1 go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio
@GO15VENDOREXPERIMENT=1 go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio
pkg-add:
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/govendor add $(PKG)

@ -10,36 +10,20 @@ environment:
GOPATH: c:\gopath
GO_EXTLINK_ENABLED: 0
GO15VENDOREXPERIMENT: 1
# Remove this after fixing erasure code.
GODEBUG: cgocheck=0
# scripts that run after cloning repository
install:
- '"C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64'
- curl -fsSL -o c:\go\bin\yasm.exe http://www.tortall.net/projects/yasm/releases/yasm-1.3.0-win64.exe
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- rd C:\Go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.6.windows-amd64.zip
- 7z x go1.6.windows-amd64.zip -oC:\ >nul
- curl -fsSL -o mingw-w64.7z http://downloads.sourceforge.net/project/mingw-w64-dgn/mingw-w64/mingw-w64-bin-x86_64-20151206.7z
- 7z x -oC:\ mingw-w64.7z > NUL
- set PATH=C:\mingw64\bin;%PATH%
- x86_64-w64-mingw32-gcc --version
- go version
- go env
- git clone https://github.com/minio/isa-l
- cd isa-l
- make -f Makefile.unx arch=mingw
- mv include isa-l
- set CGO_CPPFLAGS=-Ic:/gopath/src/github.com/minio/minio/isa-l
- set CGO_LDFLAGS=c:/gopath/src/github.com/minio/minio/isa-l/isa-l.a
- set CC=x86_64-w64-mingw32-gcc
- set CXX=x86_64-w64-mingw32-g++
- cd %GOPATH%\src\github.com\minio\minio
# to run your custom scripts instead of automatic MSBuild
build_script:
- go generate github.com/minio/minio/pkg/crypto/sha1
- go test .
- go test -race .
- go test github.com/minio/minio/pkg...

@ -21,7 +21,6 @@ _init() {
## Minimum required versions for build dependencies
GIT_VERSION="1.0"
YASM_VERSION="1.2.0"
GO_VERSION="1.6"
OSX_VERSION="10.8"
UNAME=$(uname -sm)
@ -184,11 +183,6 @@ check_deps() {
if [ $? -ge 2 ]; then
MISSING="${MISSING} git"
fi
check_version "$(env yasm --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${YASM_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} yasm(${YASM_VERSION})"
fi
}
main() {

@ -1 +0,0 @@
*.syso

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,150 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015-2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file of
// Golang project:
// https://github.com/golang/go/blob/master/LICENSE
// Using this part of Minio codebase under the license
// Apache License Version 2.0 with modifications
// Package sha1 implements the SHA1 hash algorithm as defined in RFC 3174.
package sha1
import "hash"
// Size - The size of a SHA1 checksum in bytes.
const Size = 20
// BlockSize - The blocksize of SHA1 in bytes.
const BlockSize = 64
const (
chunk = 64
init0 = 0x67452301
init1 = 0xEFCDAB89
init2 = 0x98BADCFE
init3 = 0x10325476
init4 = 0xC3D2E1F0
)
// digest represents the partial evaluation of a checksum.
type digest struct {
h [5]uint32
x [chunk]byte
nx int
len uint64
}
// Reset digest
func (d *digest) Reset() {
d.h[0] = init0
d.h[1] = init1
d.h[2] = init2
d.h[3] = init3
d.h[4] = init4
d.nx = 0
d.len = 0
}
// New returns a new hash.Hash computing the SHA1 checksum.
func New() hash.Hash {
d := new(digest)
d.Reset()
return d
}
// Return output size
func (d *digest) Size() int { return Size }
// Return checksum blocksize
func (d *digest) BlockSize() int { return BlockSize }
// Write to digest
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == chunk {
block(d, d.x[:])
d.nx = 0
}
p = p[n:]
}
if len(p) >= chunk {
n := len(p) &^ (chunk - 1)
block(d, p[:n])
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
// Return checksum bytes
func (d *digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
// Intermediate checksum function
func (d *digest) checkSum() [Size]byte {
len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
d.Write(tmp[0 : 56-len%64])
} else {
d.Write(tmp[0 : 64+56-len%64])
}
// Length in bits.
len <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(len >> (56 - 8*i))
}
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
for i, s := range d.h {
digest[i*4] = byte(s >> 24)
digest[i*4+1] = byte(s >> 16)
digest[i*4+2] = byte(s >> 8)
digest[i*4+3] = byte(s)
}
return digest
}
// Sum - single caller sha1 helper
func Sum(data []byte) [Size]byte {
var d digest
d.Reset()
d.Write(data)
return d.checkSum()
}

@ -1,967 +0,0 @@
/*
* Implement fast SHA-1 with AVX2 instructions. (x86_64)
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Contact Information:
* Ilya Albrekht <ilya.albrekht@intel.com>
* Maxim Locktyukhin <maxim.locktyukhin@intel.com>
* Ronen Zohar <ronen.zohar@intel.com>
* Chandramouli Narayanan <mouli@linux.intel.com>
*
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* SHA-1 implementation with Intel(R) AVX2 instruction set extensions.
*
*This implementation is based on the previous SSSE3 release:
* https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1
*
*Updates 20-byte SHA-1 record in 'hash' for even number of
*'num_blocks' consecutive 64-byte blocks
*
*/
/*
* Using this part of Minio codebase under the license
* Apache License Version 2.0 with modifications
*
*/
#ifdef HAS_AVX2
#ifndef ENTRY
#define ENTRY(name) \
.globl name ; \
.align 4,0x90 ; \
name:
#endif
#ifndef END
#define END(name) \
.size name, .-name
#endif
#ifndef ENDPROC
#define ENDPROC(name) \
.type name, @function ; \
END(name)
#endif
#define NUM_INVALID 100
#define TYPE_R32 0
#define TYPE_R64 1
#define TYPE_XMM 2
#define TYPE_INVALID 100
.macro R32_NUM opd r32
\opd = NUM_INVALID
.ifc \r32,%eax
\opd = 0
.endif
.ifc \r32,%ecx
\opd = 1
.endif
.ifc \r32,%edx
\opd = 2
.endif
.ifc \r32,%ebx
\opd = 3
.endif
.ifc \r32,%esp
\opd = 4
.endif
.ifc \r32,%ebp
\opd = 5
.endif
.ifc \r32,%esi
\opd = 6
.endif
.ifc \r32,%edi
\opd = 7
.endif
#ifdef X86_64
.ifc \r32,%r8d
\opd = 8
.endif
.ifc \r32,%r9d
\opd = 9
.endif
.ifc \r32,%r10d
\opd = 10
.endif
.ifc \r32,%r11d
\opd = 11
.endif
.ifc \r32,%r12d
\opd = 12
.endif
.ifc \r32,%r13d
\opd = 13
.endif
.ifc \r32,%r14d
\opd = 14
.endif
.ifc \r32,%r15d
\opd = 15
.endif
#endif
.endm
.macro R64_NUM opd r64
\opd = NUM_INVALID
#ifdef X86_64
.ifc \r64,%rax
\opd = 0
.endif
.ifc \r64,%rcx
\opd = 1
.endif
.ifc \r64,%rdx
\opd = 2
.endif
.ifc \r64,%rbx
\opd = 3
.endif
.ifc \r64,%rsp
\opd = 4
.endif
.ifc \r64,%rbp
\opd = 5
.endif
.ifc \r64,%rsi
\opd = 6
.endif
.ifc \r64,%rdi
\opd = 7
.endif
.ifc \r64,%r8
\opd = 8
.endif
.ifc \r64,%r9
\opd = 9
.endif
.ifc \r64,%r10
\opd = 10
.endif
.ifc \r64,%r11
\opd = 11
.endif
.ifc \r64,%r12
\opd = 12
.endif
.ifc \r64,%r13
\opd = 13
.endif
.ifc \r64,%r14
\opd = 14
.endif
.ifc \r64,%r15
\opd = 15
.endif
#endif
.endm
.macro XMM_NUM opd xmm
\opd = NUM_INVALID
.ifc \xmm,%xmm0
\opd = 0
.endif
.ifc \xmm,%xmm1
\opd = 1
.endif
.ifc \xmm,%xmm2
\opd = 2
.endif
.ifc \xmm,%xmm3
\opd = 3
.endif
.ifc \xmm,%xmm4
\opd = 4
.endif
.ifc \xmm,%xmm5
\opd = 5
.endif
.ifc \xmm,%xmm6
\opd = 6
.endif
.ifc \xmm,%xmm7
\opd = 7
.endif
.ifc \xmm,%xmm8
\opd = 8
.endif
.ifc \xmm,%xmm9
\opd = 9
.endif
.ifc \xmm,%xmm10
\opd = 10
.endif
.ifc \xmm,%xmm11
\opd = 11
.endif
.ifc \xmm,%xmm12
\opd = 12
.endif
.ifc \xmm,%xmm13
\opd = 13
.endif
.ifc \xmm,%xmm14
\opd = 14
.endif
.ifc \xmm,%xmm15
\opd = 15
.endif
.endm
.macro TYPE type reg
R32_NUM reg_type_r32 \reg
R64_NUM reg_type_r64 \reg
XMM_NUM reg_type_xmm \reg
.if reg_type_r64 <> NUM_INVALID
\type = TYPE_R64
.elseif reg_type_r32 <> NUM_INVALID
\type = TYPE_R32
.elseif reg_type_xmm <> NUM_INVALID
\type = TYPE_XMM
.else
\type = TYPE_INVALID
.endif
.endm
.macro PFX_OPD_SIZE
.byte 0x66
.endm
.macro PFX_REX opd1 opd2 W=0
.if ((\opd1 | \opd2) & 8) || \W
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
.endif
.endm
.macro MODRM mod opd1 opd2
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
.endm
.macro PSHUFB_XMM xmm1 xmm2
XMM_NUM pshufb_opd1 \xmm1
XMM_NUM pshufb_opd2 \xmm2
PFX_OPD_SIZE
PFX_REX pshufb_opd1 pshufb_opd2
.byte 0x0f, 0x38, 0x00
MODRM 0xc0 pshufb_opd1 pshufb_opd2
.endm
.macro PCLMULQDQ imm8 xmm1 xmm2
XMM_NUM clmul_opd1 \xmm1
XMM_NUM clmul_opd2 \xmm2
PFX_OPD_SIZE
PFX_REX clmul_opd1 clmul_opd2
.byte 0x0f, 0x3a, 0x44
MODRM 0xc0 clmul_opd1 clmul_opd2
.byte \imm8
.endm
.macro PEXTRD imm8 xmm gpr
R32_NUM extrd_opd1 \gpr
XMM_NUM extrd_opd2 \xmm
PFX_OPD_SIZE
PFX_REX extrd_opd1 extrd_opd2
.byte 0x0f, 0x3a, 0x16
MODRM 0xc0 extrd_opd1 extrd_opd2
.byte \imm8
.endm
.macro MOVQ_R64_XMM opd1 opd2
TYPE movq_r64_xmm_opd1_type \opd1
.if movq_r64_xmm_opd1_type == TYPE_XMM
XMM_NUM movq_r64_xmm_opd1 \opd1
R64_NUM movq_r64_xmm_opd2 \opd2
.else
R64_NUM movq_r64_xmm_opd1 \opd1
XMM_NUM movq_r64_xmm_opd2 \opd2
.endif
PFX_OPD_SIZE
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
.if movq_r64_xmm_opd1_type == TYPE_XMM
.byte 0x0f, 0x7e
.else
.byte 0x0f, 0x6e
.endif
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
.endm
#define CTX %rdi /* arg1 */
#define BUF %rsi /* arg2 */
#define CNT %rdx /* arg3 */
#define REG_A %ecx
#define REG_B %esi
#define REG_C %edi
#define REG_D %eax
#define REG_E %edx
#define REG_TB %ebx
#define REG_TA %r12d
#define REG_RA %rcx
#define REG_RB %rsi
#define REG_RC %rdi
#define REG_RD %rax
#define REG_RE %rdx
#define REG_RTA %r12
#define REG_RTB %rbx
#define REG_T1 %ebp
#define xmm_mov vmovups
#define avx2_zeroupper vzeroupper
#define RND_F1 1
#define RND_F2 2
#define RND_F3 3
.macro REGALLOC
.set A, REG_A
.set B, REG_B
.set C, REG_C
.set D, REG_D
.set E, REG_E
.set TB, REG_TB
.set TA, REG_TA
.set RA, REG_RA
.set RB, REG_RB
.set RC, REG_RC
.set RD, REG_RD
.set RE, REG_RE
.set RTA, REG_RTA
.set RTB, REG_RTB
.set T1, REG_T1
.endm
#define K_BASE %r8
#define HASH_PTR %r9
#define BUFFER_PTR %r10
#define BUFFER_PTR2 %r13
#define BUFFER_END %r11
#define PRECALC_BUF %r14
#define WK_BUF %r15
#define W_TMP %xmm0
#define WY_TMP %ymm0
#define WY_TMP2 %ymm9
# AVX2 variables
#define WY0 %ymm3
#define WY4 %ymm5
#define WY08 %ymm7
#define WY12 %ymm8
#define WY16 %ymm12
#define WY20 %ymm13
#define WY24 %ymm14
#define WY28 %ymm15
#define YMM_SHUFB_BSWAP %ymm10
/*
* Keep 2 iterations precalculated at a time:
* - 80 DWORDs per iteration * 2
*/
#define W_SIZE (80*2*2 +16)
#define WK(t) ((((t) % 80) / 4)*32 + ( (t) % 4)*4 + ((t)/80)*16 )(WK_BUF)
#define PRECALC_WK(t) ((t)*2*2)(PRECALC_BUF)
.macro UPDATE_HASH hash, val
add \hash, \val
mov \val, \hash
.endm
.macro PRECALC_RESET_WY
.set WY_00, WY0
.set WY_04, WY4
.set WY_08, WY08
.set WY_12, WY12
.set WY_16, WY16
.set WY_20, WY20
.set WY_24, WY24
.set WY_28, WY28
.set WY_32, WY_00
.endm
.macro PRECALC_ROTATE_WY
/* Rotate macros */
.set WY_32, WY_28
.set WY_28, WY_24
.set WY_24, WY_20
.set WY_20, WY_16
.set WY_16, WY_12
.set WY_12, WY_08
.set WY_08, WY_04
.set WY_04, WY_00
.set WY_00, WY_32
/* Define register aliases */
.set WY, WY_00
.set WY_minus_04, WY_04
.set WY_minus_08, WY_08
.set WY_minus_12, WY_12
.set WY_minus_16, WY_16
.set WY_minus_20, WY_20
.set WY_minus_24, WY_24
.set WY_minus_28, WY_28
.set WY_minus_32, WY
.endm
.macro PRECALC_00_15
.if (i == 0) # Initialize and rotate registers
PRECALC_RESET_WY
PRECALC_ROTATE_WY
.endif
/* message scheduling pre-compute for rounds 0-15 */
.if ((i & 7) == 0)
/*
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
.elseif ((i & 7) == 1)
vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
WY_TMP, WY_TMP
.elseif ((i & 7) == 2)
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
.elseif ((i & 7) == 4)
vpaddd K_XMM(K_BASE), WY, WY_TMP
.elseif ((i & 7) == 7)
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
.endif
.endm
.macro PRECALC_16_31
/*
* message scheduling pre-compute for rounds 16-31
* calculating last 32 w[i] values in 8 XMM registers
* pre-calculate K+w[i] values and store to mem
* for later load by ALU add instruction
*
* "brute force" vectorization for rounds 16-31 only
* due to w[i]->w[i-3] dependency
*/
.if ((i & 7) == 0)
/*
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
/* w[i-14] */
vpalignr $8, WY_minus_16, WY_minus_12, WY
vpsrldq $4, WY_minus_04, WY_TMP /* w[i-3] */
.elseif ((i & 7) == 1)
vpxor WY_minus_08, WY, WY
vpxor WY_minus_16, WY_TMP, WY_TMP
.elseif ((i & 7) == 2)
vpxor WY_TMP, WY, WY
vpslldq $12, WY, WY_TMP2
.elseif ((i & 7) == 3)
vpslld $1, WY, WY_TMP
vpsrld $31, WY, WY
.elseif ((i & 7) == 4)
vpor WY, WY_TMP, WY_TMP
vpslld $2, WY_TMP2, WY
.elseif ((i & 7) == 5)
vpsrld $30, WY_TMP2, WY_TMP2
vpxor WY, WY_TMP, WY_TMP
.elseif ((i & 7) == 7)
vpxor WY_TMP2, WY_TMP, WY
vpaddd K_XMM(K_BASE), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
.endif
.endm
.macro PRECALC_32_79
/*
* in SHA-1 specification:
* w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1
* instead we do equal:
* w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2
* allows more efficient vectorization
* since w[i]=>w[i-3] dependency is broken
*/
.if ((i & 7) == 0)
/*
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
vpalignr $8, WY_minus_08, WY_minus_04, WY_TMP
.elseif ((i & 7) == 1)
/* W is W_minus_32 before xor */
vpxor WY_minus_28, WY, WY
.elseif ((i & 7) == 2)
vpxor WY_minus_16, WY_TMP, WY_TMP
.elseif ((i & 7) == 3)
vpxor WY_TMP, WY, WY
.elseif ((i & 7) == 4)
vpslld $2, WY, WY_TMP
.elseif ((i & 7) == 5)
vpsrld $30, WY, WY
vpor WY, WY_TMP, WY
.elseif ((i & 7) == 7)
vpaddd K_XMM(K_BASE), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
.endif
.endm
.macro PRECALC r, s
.set i, \r
.if (i < 40)
.set K_XMM, 32*0
.elseif (i < 80)
.set K_XMM, 32*1
.elseif (i < 120)
.set K_XMM, 32*2
.else
.set K_XMM, 32*3
.endif
.if (i<32)
PRECALC_00_15 \s
.elseif (i<64)
PRECALC_16_31 \s
.elseif (i < 160)
PRECALC_32_79 \s
.endif
.endm
.macro ROTATE_STATE
.set T_REG, E
.set E, D
.set D, C
.set C, B
.set B, TB
.set TB, A
.set A, T_REG
.set T_REG, RE
.set RE, RD
.set RD, RC
.set RC, RB
.set RB, RTB
.set RTB, RA
.set RA, T_REG
.endm
/* Macro relies on saved ROUND_Fx */
.macro RND_FUN f, r
.if (\f == RND_F1)
ROUND_F1 \r
.elseif (\f == RND_F2)
ROUND_F2 \r
.elseif (\f == RND_F3)
ROUND_F3 \r
.endif
.endm
.macro RR r
.set round_id, (\r % 80)
.if (round_id == 0) /* Precalculate F for first round */
.set ROUND_FUNC, RND_F1
mov B, TB
rorx $(32-30), B, B /* b>>>2 */
andn D, TB, T1
and C, TB
xor T1, TB
.endif
RND_FUN ROUND_FUNC, \r
ROTATE_STATE
.if (round_id == 18)
.set ROUND_FUNC, RND_F2
.elseif (round_id == 38)
.set ROUND_FUNC, RND_F3
.elseif (round_id == 58)
.set ROUND_FUNC, RND_F2
.endif
.set round_id, ( (\r+1) % 80)
RND_FUN ROUND_FUNC, (\r+1)
ROTATE_STATE
.endm
.macro ROUND_F1 r
add WK(\r), E
andn C, A, T1 /* ~b&d */
lea (RE,RTB), E /* Add F from the previous round */
rorx $(32-5), A, TA /* T2 = A >>> 5 */
rorx $(32-30),A, TB /* b>>>2 for next round */
PRECALC (\r) /* msg scheduling for next 2 blocks */
/*
* Calculate F for the next round
* (b & c) ^ andn[b, d]
*/
and B, A /* b&c */
xor T1, A /* F1 = (b&c) ^ (~b&d) */
lea (RE,RTA), E /* E += A >>> 5 */
.endm
.macro ROUND_F2 r
add WK(\r), E
lea (RE,RTB), E /* Add F from the previous round */
/* Calculate F for the next round */
rorx $(32-5), A, TA /* T2 = A >>> 5 */
.if ((round_id) < 79)
rorx $(32-30), A, TB /* b>>>2 for next round */
.endif
PRECALC (\r) /* msg scheduling for next 2 blocks */
.if ((round_id) < 79)
xor B, A
.endif
add TA, E /* E += A >>> 5 */
.if ((round_id) < 79)
xor C, A
.endif
.endm
.macro ROUND_F3 r
add WK(\r), E
PRECALC (\r) /* msg scheduling for next 2 blocks */
lea (RE,RTB), E /* Add F from the previous round */
mov B, T1
or A, T1
rorx $(32-5), A, TA /* T2 = A >>> 5 */
rorx $(32-30), A, TB /* b>>>2 for next round */
/* Calculate F for the next round
* (b and c) or (d and (b or c))
*/
and C, T1
and B, A
or T1, A
add TA, E /* E += A >>> 5 */
.endm
/*
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
*/
.macro SHA1_PIPELINED_MAIN_BODY
REGALLOC
mov (HASH_PTR), A
mov 4(HASH_PTR), B
mov 8(HASH_PTR), C
mov 12(HASH_PTR), D
mov 16(HASH_PTR), E
mov %rsp, PRECALC_BUF
lea (2*4*80+32)(%rsp), WK_BUF
# Precalc WK for first 2 blocks
PRECALC_OFFSET = 0
.set i, 0
.rept 160
PRECALC i
.set i, i + 1
.endr
PRECALC_OFFSET = 128
xchg WK_BUF, PRECALC_BUF
.align 32
_loop:
/*
* code loops through more than one block
* we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE
*/
cmp K_BASE, BUFFER_PTR
jne _begin
.align 32
jmp _end
.align 32
_begin:
/*
* Do first block
* rounds: 0,2,4,6,8
*/
.set j, 0
.rept 5
RR j
.set j, j+2
.endr
jmp _loop0
_loop0:
/*
* rounds:
* 10,12,14,16,18
* 20,22,24,26,28
* 30,32,34,36,38
* 40,42,44,46,48
* 50,52,54,56,58
*/
.rept 25
RR j
.set j, j+2
.endr
add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
/*
* rounds
* 60,62,64,66,68
* 70,72,74,76,78
*/
.rept 10
RR j
.set j, j+2
.endr
UPDATE_HASH (HASH_PTR), A
UPDATE_HASH 4(HASH_PTR), TB
UPDATE_HASH 8(HASH_PTR), C
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
cmp K_BASE, BUFFER_PTR /* is current block the last one? */
je _loop
mov TB, B
/* Process second block */
/*
* rounds
* 0+80, 2+80, 4+80, 6+80, 8+80
* 10+80,12+80,14+80,16+80,18+80
*/
.set j, 0
.rept 10
RR j+80
.set j, j+2
.endr
jmp _loop1
_loop1:
/*
* rounds
* 20+80,22+80,24+80,26+80,28+80
* 30+80,32+80,34+80,36+80,38+80
*/
.rept 10
RR j+80
.set j, j+2
.endr
jmp _loop2
_loop2:
/*
* rounds
* 40+80,42+80,44+80,46+80,48+80
* 50+80,52+80,54+80,56+80,58+80
*/
.rept 10
RR j+80
.set j, j+2
.endr
add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
jmp _loop3
_loop3:
/*
* rounds
* 60+80,62+80,64+80,66+80,68+80
* 70+80,72+80,74+80,76+80,78+80
*/
.rept 10
RR j+80
.set j, j+2
.endr
UPDATE_HASH (HASH_PTR), A
UPDATE_HASH 4(HASH_PTR), TB
UPDATE_HASH 8(HASH_PTR), C
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
/* Reset state for AVX2 reg permutation */
mov A, TA
mov TB, A
mov C, TB
mov E, C
mov D, B
mov TA, D
REGALLOC
xchg WK_BUF, PRECALC_BUF
jmp _loop
.align 32
_end:
.endm
.section .rodata
#define K1 0x5a827999
#define K2 0x6ed9eba1
#define K3 0x8f1bbcdc
#define K4 0xca62c1d6
.align 128
K_XMM_AR:
.long K1, K1, K1, K1
.long K1, K1, K1, K1
.long K2, K2, K2, K2
.long K2, K2, K2, K2
.long K3, K3, K3, K3
.long K3, K3, K3, K3
.long K4, K4, K4, K4
.long K4, K4, K4, K4
BSWAP_SHUFB_CTL:
.long 0x00010203
.long 0x04050607
.long 0x08090a0b
.long 0x0c0d0e0f
.long 0x00010203
.long 0x04050607
.long 0x08090a0b
.long 0x0c0d0e0f
# void sha1_transform(int32_t *hash, const char* input, size_t num_blocks) ;
.text
ENTRY(sha1_transform)
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
RESERVE_STACK = (W_SIZE*4 + 8+24)
/* Align stack */
mov %rsp, %rbx
and $~(0x20-1), %rsp
push %rbx
sub $RESERVE_STACK, %rsp
avx2_zeroupper
lea K_XMM_AR(%rip), K_BASE
mov CTX, HASH_PTR
mov BUF, BUFFER_PTR
lea 64(BUF), BUFFER_PTR2
shl $6, CNT /* mul by 64 */
add BUF, CNT
add $64, CNT
mov CNT, BUFFER_END
cmp BUFFER_END, BUFFER_PTR2
cmovae K_BASE, BUFFER_PTR2
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
SHA1_PIPELINED_MAIN_BODY
avx2_zeroupper
add $RESERVE_STACK, %rsp
pop %rsp
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
pop %rbx
ret
ENDPROC(sha1_transform)
#endif

@ -1,579 +0,0 @@
;---------------------
; https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1
;
; License information:
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; This implementation notably advances the performance of SHA-1 algorithm compared to existing
; implementations. We are encouraging all projects utilizing SHA-1 to integrate this new fast
; implementation and are ready to help if issues or concerns arise (you are welcome to leave
; a comment or write an email to the authors). It is provided 'as is' and free for either
; commercial or non-commercial use.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; This code implements two interfaces of SHA-1 update function: 1) working on a single
; 64-byte block and 2) working on a buffer of multiple 64-bit blocks. Multiple blocks
; version of code is software pipelined and faster overall, it is a default. Assemble
; with -DINTEL_SHA1_SINGLEBLOCK to select single 64-byte block function interface.
;
; C++ prototypes of implemented functions are below:
;
; #ifndef INTEL_SHA1_SINGLEBLOCK
; // Updates 20-byte SHA-1 record in 'hash' for 'num_blocks' consequtive 64-byte blocks
; extern "C" void sha1_update_intel(int *hash, const char* input, size_t num_blocks );
; #else
; // Updates 20-byte SHA-1 record in 'hash' for one 64-byte block pointed by 'input'
; extern "C" void sha1_update_intel(int *hash, const char* input);
; #endif
;
; Function name 'sha1_update_intel' can be changed in the source or via macro:
; -DINTEL_SHA1_UPDATE_FUNCNAME=my_sha1_update_func_name
;
; It implements both UNIX(default) and Windows ABIs, use -DWIN_ABI on Windows
;
; Code checks CPU for SSSE3 support via CPUID feature flag (CPUID.1.ECX.SSSE3[bit 9]==1),
; and performs dispatch. Since in most cases the functionality on non-SSSE3 supporting CPUs
; is also required, the default (e.g. one being replaced) function can be provided for
; dispatch on such CPUs, the name of old function can be changed in the source or via macro:
; -DINTEL_SHA1_UPDATE_DEFAULT_DISPATCH=default_sha1_update_function_name
;
; Authors: Maxim Locktyukhin and Ronen Zohar at Intel.com
;
%ifndef INTEL_SHA1_UPDATE_DEFAULT_DISPATCH
;; can be replaced with a default SHA-1 update function name
%define INTEL_SHA1_UPDATE_DEFAULT_DISPATCH sha1_intel_non_ssse3_cpu_stub_
%else
extern INTEL_SHA1_UPDATE_DEFAULT_DISPATCH
%endif
;; provide alternative SHA-1 update function's name here
%ifndef INTEL_SHA1_UPDATE_FUNCNAME
%define INTEL_SHA1_UPDATE_FUNCNAME sha1_update_intel
%endif
global INTEL_SHA1_UPDATE_FUNCNAME
%ifndef INTEL_SHA1_SINGLEBLOCK
%assign multiblock 1
%else
%assign multiblock 0
%endif
bits 64
default rel
%ifdef WIN_ABI
%xdefine arg1 rcx
%xdefine arg2 rdx
%xdefine arg3 r8
%else
%xdefine arg1 rdi
%xdefine arg2 rsi
%xdefine arg3 rdx
%endif
%xdefine ctx arg1
%xdefine buf arg2
%xdefine cnt arg3
%macro REGALLOC 0
%xdefine A ecx
%xdefine B esi
%xdefine C edi
%xdefine D ebp
%xdefine E edx
%xdefine T1 eax
%xdefine T2 ebx
%endmacro
%xdefine K_BASE r8
%xdefine HASH_PTR r9
%xdefine BUFFER_PTR r10
%xdefine BUFFER_END r11
%xdefine W_TMP xmm0
%xdefine W_TMP2 xmm9
%xdefine W0 xmm1
%xdefine W4 xmm2
%xdefine W8 xmm3
%xdefine W12 xmm4
%xdefine W16 xmm5
%xdefine W20 xmm6
%xdefine W24 xmm7
%xdefine W28 xmm8
%xdefine XMM_SHUFB_BSWAP xmm10
;; we keep window of 64 w[i]+K pre-calculated values in a circular buffer
%xdefine WK(t) (rsp + (t & 15)*4)
;------------------------------------------------------------------------------
;
; macro implements SHA-1 function's body for single or several 64-byte blocks
; first param: function's name
; second param: =0 - function implements single 64-byte block hash
; =1 - function implements multiple64-byte blocks hash
; 3rd function's argument is a number, greater 0, of 64-byte blocks to calc hash for
;
%macro SHA1_VECTOR_ASM 2
align 4096
%1:
push rbx
push rbp
%ifdef WIN_ABI
push rdi
push rsi
%xdefine stack_size (16*4 + 16*5 + 8)
%else
%xdefine stack_size (16*4 + 8)
%endif
sub rsp, stack_size
%ifdef WIN_ABI
%xdefine xmm_save_base (rsp + 16*4)
xmm_mov [xmm_save_base + 0*16], xmm6
xmm_mov [xmm_save_base + 1*16], xmm7
xmm_mov [xmm_save_base + 2*16], xmm8
xmm_mov [xmm_save_base + 3*16], xmm9
xmm_mov [xmm_save_base + 4*16], xmm10
%endif
mov HASH_PTR, ctx
mov BUFFER_PTR, buf
%if (%2 == 1)
shl cnt, 6 ;; mul by 64
add cnt, buf
mov BUFFER_END, cnt
%endif
lea K_BASE, [K_XMM_AR]
xmm_mov XMM_SHUFB_BSWAP, [bswap_shufb_ctl]
SHA1_PIPELINED_MAIN_BODY %2
%ifdef WIN_ABI
xmm_mov xmm6, [xmm_save_base + 0*16]
xmm_mov xmm7, [xmm_save_base + 1*16]
xmm_mov xmm8, [xmm_save_base + 2*16]
xmm_mov xmm9, [xmm_save_base + 3*16]
xmm_mov xmm10,[xmm_save_base + 4*16]
%endif
add rsp, stack_size
%ifdef WIN_ABI
pop rsi
pop rdi
%endif
pop rbp
pop rbx
ret
%endmacro
;--------------------------------------------
; macro implements 80 rounds of SHA-1, for one 64-byte block or multiple blocks with s/w pipelining
; macro param: =0 - process single 64-byte block
; =1 - multiple blocks
;
%macro SHA1_PIPELINED_MAIN_BODY 1
REGALLOC
mov A, [HASH_PTR ]
mov B, [HASH_PTR+ 4]
mov C, [HASH_PTR+ 8]
mov D, [HASH_PTR+12]
mov E, [HASH_PTR+16]
%assign i 0
%rep W_PRECALC_AHEAD
W_PRECALC i
%assign i i+1
%endrep
%xdefine F F1
%if (%1 == 1) ;; code loops through more than one block
%%_loop:
cmp BUFFER_PTR, K_BASE ;; we use K_BASE value as a signal of a last block,
jne %%_begin ;; it is set below by: cmovae BUFFER_PTR, K_BASE
jmp %%_end
align 32
%%_begin:
%endif
RR A,B,C,D,E,0
RR D,E,A,B,C,2
RR B,C,D,E,A,4
RR E,A,B,C,D,6
RR C,D,E,A,B,8
RR A,B,C,D,E,10
RR D,E,A,B,C,12
RR B,C,D,E,A,14
RR E,A,B,C,D,16
RR C,D,E,A,B,18
%xdefine F F2
RR A,B,C,D,E,20
RR D,E,A,B,C,22
RR B,C,D,E,A,24
RR E,A,B,C,D,26
RR C,D,E,A,B,28
RR A,B,C,D,E,30
RR D,E,A,B,C,32
RR B,C,D,E,A,34
RR E,A,B,C,D,36
RR C,D,E,A,B,38
%xdefine F F3
RR A,B,C,D,E,40
RR D,E,A,B,C,42
RR B,C,D,E,A,44
RR E,A,B,C,D,46
RR C,D,E,A,B,48
RR A,B,C,D,E,50
RR D,E,A,B,C,52
RR B,C,D,E,A,54
RR E,A,B,C,D,56
RR C,D,E,A,B,58
%xdefine F F4
%if (%1 == 1) ;; if code loops through more than one block
add BUFFER_PTR, 64 ;; move to next 64-byte block
cmp BUFFER_PTR, BUFFER_END ;; check if current block is the last one
cmovae BUFFER_PTR, K_BASE ;; smart way to signal the last iteration
%else
%xdefine W_NO_TAIL_PRECALC 1 ;; no software pipelining for single block interface
%endif
RR A,B,C,D,E,60
RR D,E,A,B,C,62
RR B,C,D,E,A,64
RR E,A,B,C,D,66
RR C,D,E,A,B,68
RR A,B,C,D,E,70
RR D,E,A,B,C,72
RR B,C,D,E,A,74
RR E,A,B,C,D,76
RR C,D,E,A,B,78
UPDATE_HASH [HASH_PTR ],A
UPDATE_HASH [HASH_PTR+ 4],B
UPDATE_HASH [HASH_PTR+ 8],C
UPDATE_HASH [HASH_PTR+12],D
UPDATE_HASH [HASH_PTR+16],E
%if (%1 == 1)
jmp %%_loop
align 32
%%_end:
%endif
%xdefine W_NO_TAIL_PRECALC 0
%xdefine F %error
%endmacro
%macro F1 3
mov T1,%2
xor T1,%3
and T1,%1
xor T1,%3
%endmacro
%macro F2 3
mov T1,%3
xor T1,%2
xor T1,%1
%endmacro
%macro F3 3
mov T1,%2
mov T2,%1
or T1,%1
and T2,%2
and T1,%3
or T1,T2
%endmacro
%define F4 F2
%macro UPDATE_HASH 2
add %2, %1
mov %1, %2
%endmacro
%macro W_PRECALC 1
%xdefine i (%1)
%if (i < 20)
%xdefine K_XMM 0
%elif (i < 40)
%xdefine K_XMM 16
%elif (i < 60)
%xdefine K_XMM 32
%else
%xdefine K_XMM 48
%endif
%if (i<16 || (i>=80 && i<(80 + W_PRECALC_AHEAD)))
%if (W_NO_TAIL_PRECALC == 0)
%xdefine i ((%1) % 80) ;; pre-compute for the next iteration
%if (i == 0)
W_PRECALC_RESET
%endif
W_PRECALC_00_15
%endif
%elif (i < 32)
W_PRECALC_16_31
%elif (i < 80) ;; rounds 32-79
W_PRECALC_32_79
%endif
%endmacro
%macro W_PRECALC_RESET 0
%xdefine W W0
%xdefine W_minus_04 W4
%xdefine W_minus_08 W8
%xdefine W_minus_12 W12
%xdefine W_minus_16 W16
%xdefine W_minus_20 W20
%xdefine W_minus_24 W24
%xdefine W_minus_28 W28
%xdefine W_minus_32 W
%endmacro
%macro W_PRECALC_ROTATE 0
%xdefine W_minus_32 W_minus_28
%xdefine W_minus_28 W_minus_24
%xdefine W_minus_24 W_minus_20
%xdefine W_minus_20 W_minus_16
%xdefine W_minus_16 W_minus_12
%xdefine W_minus_12 W_minus_08
%xdefine W_minus_08 W_minus_04
%xdefine W_minus_04 W
%xdefine W W_minus_32
%endmacro
%xdefine W_PRECALC_AHEAD 16
%xdefine W_NO_TAIL_PRECALC 0
%xdefine xmm_mov movdqa
%macro W_PRECALC_00_15 0
;; message scheduling pre-compute for rounds 0-15
%if ((i & 3) == 0) ;; blended SSE and ALU instruction scheduling, 1 vector iteration per 4 rounds
movdqu W_TMP, [BUFFER_PTR + (i * 4)]
%elif ((i & 3) == 1)
pshufb W_TMP, XMM_SHUFB_BSWAP
movdqa W, W_TMP
%elif ((i & 3) == 2)
paddd W_TMP, [K_BASE]
%elif ((i & 3) == 3)
movdqa [WK(i&~3)], W_TMP
W_PRECALC_ROTATE
%endif
%endmacro
%macro W_PRECALC_16_31 0
;; message scheduling pre-compute for rounds 16-31
;; calculating last 32 w[i] values in 8 XMM registers
;; pre-calculate K+w[i] values and store to mem, for later load by ALU add instruction
;;
;; "brute force" vectorization for rounds 16-31 only due to w[i]->w[i-3] dependency
;;
%if ((i & 3) == 0) ;; blended SSE and ALU instruction scheduling, 1 vector iteration per 4 rounds
movdqa W, W_minus_12
palignr W, W_minus_16, 8 ;; w[i-14]
movdqa W_TMP, W_minus_04
psrldq W_TMP, 4 ;; w[i-3]
pxor W, W_minus_08
%elif ((i & 3) == 1)
pxor W_TMP, W_minus_16
pxor W, W_TMP
movdqa W_TMP2, W
movdqa W_TMP, W
pslldq W_TMP2, 12
%elif ((i & 3) == 2)
psrld W, 31
pslld W_TMP, 1
por W_TMP, W
movdqa W, W_TMP2
psrld W_TMP2, 30
pslld W, 2
%elif ((i & 3) == 3)
pxor W_TMP, W
pxor W_TMP, W_TMP2
movdqa W, W_TMP
paddd W_TMP, [K_BASE + K_XMM]
movdqa [WK(i&~3)],W_TMP
W_PRECALC_ROTATE
%endif
%endmacro
%macro W_PRECALC_32_79 0
;; in SHA-1 specification: w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1
;; instead we do equal: w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2
;; allows more efficient vectorization since w[i]=>w[i-3] dependency is broken
;;
%if ((i & 3) == 0) ;; blended SSE and ALU instruction scheduling, 1 vector iteration per 4 rounds
movdqa W_TMP, W_minus_04
pxor W, W_minus_28 ;; W is W_minus_32 before xor
palignr W_TMP, W_minus_08, 8
%elif ((i & 3) == 1)
pxor W, W_minus_16
pxor W, W_TMP
movdqa W_TMP, W
%elif ((i & 3) == 2)
psrld W, 30
pslld W_TMP, 2
por W_TMP, W
%elif ((i & 3) == 3)
movdqa W, W_TMP
paddd W_TMP, [K_BASE + K_XMM]
movdqa [WK(i&~3)],W_TMP
W_PRECALC_ROTATE
%endif
%endmacro
%macro RR 6 ;; RR does two rounds of SHA-1 back to back with W pre-calculation
;; TEMP = A
;; A = F( i, B, C, D ) + E + ROTATE_LEFT( A, 5 ) + W[i] + K(i)
;; C = ROTATE_LEFT( B, 30 )
;; D = C
;; E = D
;; B = TEMP
W_PRECALC (%6 + W_PRECALC_AHEAD)
F %2, %3, %4 ;; F returns result in T1
add %5, [WK(%6)]
rol %2, 30
mov T2, %1
add %4, [WK(%6 + 1)]
rol T2, 5
add %5, T1
W_PRECALC (%6 + W_PRECALC_AHEAD + 1)
add T2, %5
mov %5, T2
rol T2, 5
add %4, T2
F %1, %2, %3 ;; F returns result in T1
add %4, T1
rol %1, 30
;; write: %1, %2
;; rotate: %1<=%4, %2<=%5, %3<=%1, %4<=%2, %5<=%3
%endmacro
;;----------------------
section .data align=128
%xdefine K1 0x5a827999
%xdefine K2 0x6ed9eba1
%xdefine K3 0x8f1bbcdc
%xdefine K4 0xca62c1d6
align 128
K_XMM_AR:
DD K1, K1, K1, K1
DD K2, K2, K2, K2
DD K3, K3, K3, K3
DD K4, K4, K4, K4
align 16
bswap_shufb_ctl:
DD 00010203h
DD 04050607h
DD 08090a0bh
DD 0c0d0e0fh
;; dispatch pointer, points to the init routine for the first invocation
sha1_update_intel_dispatched:
DQ sha1_update_intel_init_
;;----------------------
section .text align=4096
SHA1_VECTOR_ASM sha1_update_intel_ssse3_, multiblock
align 32
sha1_update_intel_init_: ;; we get here with the first time invocation
call sha1_update_intel_dispacth_init_
INTEL_SHA1_UPDATE_FUNCNAME: ;; we get here after init
jmp qword [sha1_update_intel_dispatched]
;; CPUID feature flag based dispatch
sha1_update_intel_dispacth_init_:
push rax
push rbx
push rcx
push rdx
push rsi
lea rsi, [INTEL_SHA1_UPDATE_DEFAULT_DISPATCH]
mov eax, 1
cpuid
test ecx, 0200h ;; SSSE3 support, CPUID.1.ECX[bit 9]
jz _done
lea rsi, [sha1_update_intel_ssse3_]
_done:
mov [sha1_update_intel_dispatched], rsi
pop rsi
pop rdx
pop rcx
pop rbx
pop rax
ret
;;----------------------
;; in the case a default SHA-1 update function implementation was not provided
;; and code was invoked on a non-SSSE3 supporting CPU, dispatch handles this
;; failure in a safest way - jumps to the stub function with UD2 instruction below
sha1_intel_non_ssse3_cpu_stub_:
ud2 ;; in the case no default SHA-1 was provided non-SSSE3 CPUs safely fail here
ret
; END
;----------------------

@ -1,154 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015-2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file of
// Golang project:
// https://github.com/golang/go/blob/master/LICENSE
// Using this part of Minio codebase under the license
// Apache License Version 2.0 with modifications
// SHA1 hash algorithm. See RFC 3174.
package sha1
import (
"crypto/rand"
"fmt"
"io"
"testing"
)
type sha1Test struct {
out string
in string
}
var golden = []sha1Test{
{"da39a3ee5e6b4b0d3255bfef95601890afd80709", ""},
{"86f7e437faa5a7fce15d1ddcb9eaeaea377667b8", "a"},
{"da23614e02469a0d7c7bd1bdab5c9c474b1904dc", "ab"},
{"a9993e364706816aba3e25717850c26c9cd0d89d", "abc"},
{"81fe8bfe87576c3ecb22426f8e57847382917acf", "abcd"},
{"03de6c570bfe24bfc328ccd7ca46b76eadaf4334", "abcde"},
{"1f8ac10f23c5b5bc1167bda84b833e5c057a77d2", "abcdef"},
{"2fb5e13419fc89246865e7a324f476ec624e8740", "abcdefg"},
{"425af12a0743502b322e93a015bcf868e324d56a", "abcdefgh"},
{"c63b19f1e4c8b5f76b25c49b8b87f57d8e4872a1", "abcdefghi"},
{"d68c19a0a345b7eab78d5e11e991c026ec60db63", "abcdefghij"},
{"ebf81ddcbe5bf13aaabdc4d65354fdf2044f38a7", "Discard medicine more than two years old."},
{"e5dea09392dd886ca63531aaa00571dc07554bb6", "He who has a shady past knows that nice guys finish last."},
{"45988f7234467b94e3e9494434c96ee3609d8f8f", "I wouldn't marry him with a ten foot pole."},
{"55dee037eb7460d5a692d1ce11330b260e40c988", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"},
{"b7bc5fb91080c7de6b582ea281f8a396d7c0aee8", "The days of the digital watch are numbered. -Tom Stoppard"},
{"c3aed9358f7c77f523afe86135f06b95b3999797", "Nepal premier won't resign."},
{"6e29d302bf6e3a5e4305ff318d983197d6906bb9", "For every action there is an equal and opposite government program."},
{"597f6a540010f94c15d71806a99a2c8710e747bd", "His money is twice tainted: 'taint yours and 'taint mine."},
{"6859733b2590a8a091cecf50086febc5ceef1e80", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"},
{"514b2630ec089b8aee18795fc0cf1f4860cdacad", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"},
{"c5ca0d4a7b6676fc7aa72caa41cc3d5df567ed69", "size: a.out: bad magic"},
{"74c51fa9a04eadc8c1bbeaa7fc442f834b90a00a", "The major problem is with sendmail. -Mark Horton"},
{"0b4c4ce5f52c3ad2821852a8dc00217fa18b8b66", "Give me a rock, paper and scissors and I will move the world. CCFestoon"},
{"3ae7937dd790315beb0f48330e8642237c61550a", "If the enemy is within range, then so are you."},
{"410a2b296df92b9a47412b13281df8f830a9f44b", "It's well we cannot hear the screams/That we create in others' dreams."},
{"841e7c85ca1adcddbdd0187f1289acb5c642f7f5", "You remind me of a TV show, but that's all right: I watch it anyway."},
{"163173b825d03b952601376b25212df66763e1db", "C is as portable as Stonehedge!!"},
{"32b0377f2687eb88e22106f133c586ab314d5279", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"},
{"0885aaf99b569542fd165fa44e322718f4a984e0", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"},
{"6627d6904d71420b0bf3886ab629623538689f45", "How can you write a big system without C++? -Paul Glick"},
}
func TestGolden(t *testing.T) {
for i := 0; i < len(golden); i++ {
g := golden[i]
s := fmt.Sprintf("%x", Sum([]byte(g.in)))
if s != g.out {
t.Fatalf("Sum function: sha1(%s) = %s want %s", g.in, s, g.out)
}
c := New()
for j := 0; j < 3; j++ {
if j < 2 {
io.WriteString(c, g.in)
} else {
io.WriteString(c, g.in[0:len(g.in)/2])
c.Sum(nil)
io.WriteString(c, g.in[len(g.in)/2:])
}
s := fmt.Sprintf("%x", c.Sum(nil))
if s != g.out {
t.Fatalf("sha1[%d](%s) = %s want %s", j, g.in, s, g.out)
}
c.Reset()
}
}
}
func TestSize(t *testing.T) {
c := New()
if got := c.Size(); got != Size {
t.Errorf("Size = %d; want %d", got, Size)
}
}
func TestBlockSize(t *testing.T) {
c := New()
if got := c.BlockSize(); got != BlockSize {
t.Errorf("BlockSize = %d; want %d", got, BlockSize)
}
}
// Tests that blockGeneric (pure Go) and block (in assembly for amd64, 386, arm) match.
func TestBlockGeneric(t *testing.T) {
gen, asm := New().(*digest), New().(*digest)
buf := make([]byte, BlockSize*20) // arbitrary factor
rand.Read(buf)
blockGeneric(gen, buf)
block(asm, buf)
if *gen != *asm {
t.Error("block and blockGeneric resulted in different states")
}
}
var bench = New()
var buf = make([]byte, 1024*1024)
func benchmarkSize(b *testing.B, size int) {
b.SetBytes(int64(size))
sum := make([]byte, bench.Size())
for i := 0; i < b.N; i++ {
bench.Reset()
bench.Write(buf[:size])
bench.Sum(sum[:0])
}
}
func BenchmarkHash8Bytes(b *testing.B) {
benchmarkSize(b, 8)
}
func BenchmarkHash1K(b *testing.B) {
benchmarkSize(b, 1024)
}
func BenchmarkHash8K(b *testing.B) {
benchmarkSize(b, 8192)
}
func BenchmarkHash1M(b *testing.B) {
benchmarkSize(b, 1024*1024)
}

@ -1,21 +0,0 @@
// +build darwin,amd64
/*
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
//go:generate yasm -f macho64 -DINTEL_SHA1_UPDATE_FUNCNAME=_sha1_update_intel sha1_sse3_amd64.asm -o sha1_sse3_amd64.syso

@ -1,21 +0,0 @@
// +build linux,amd64
/*
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
//go:generate yasm -f elf64 sha1_sse3_amd64.asm -o sha1_sse3_amd64.syso

@ -1,21 +0,0 @@
// +build windows,amd64
/*
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
//go:generate yasm -f win64 -DWIN_ABI=1 sha1_sse3_amd64.asm -o sha1_sse3_amd64.syso

@ -1,43 +0,0 @@
// +build amd64,cgo
// +build darwin windows
/*
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
// #include <stdint.h>
// #include <stdlib.h>
// void sha1_update_intel(int32_t *hash, const char* input, size_t num_blocks);
import "C"
import (
"unsafe"
"github.com/klauspost/cpuid"
)
func block(dig *digest, p []byte) {
switch true {
case cpuid.CPU.SSE3():
blockSSE3(dig, p)
default:
blockGeneric(dig, p)
}
}
func blockSSE3(dig *digest, p []byte) {
C.sha1_update_intel((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk))
}

@ -1,110 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015-2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file of
// Golang project:
// https://github.com/golang/go/blob/master/LICENSE
// Using this part of Minio codebase under the license
// Apache License Version 2.0 with modifications
package sha1
const (
_K0 = 0x5A827999
_K1 = 0x6ED9EBA1
_K2 = 0x8F1BBCDC
_K3 = 0xCA62C1D6
)
// blockGeneric is a portable, pure Go version of the SHA1 block step.
// It's used by sha1block_generic.go and tests.
func blockGeneric(dig *digest, p []byte) {
var w [16]uint32
h0, h1, h2, h3, h4 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4]
for len(p) >= chunk {
// Can interlace the computation of w with the
// rounds below if needed for speed.
for i := 0; i < 16; i++ {
j := i * 4
w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
}
a, b, c, d, e := h0, h1, h2, h3, h4
// Each of the four 20-iteration rounds
// differs only in the computation of f and
// the choice of K (_K0, _K1, etc).
i := 0
for ; i < 16; i++ {
f := b&c | (^b)&d
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K0
a, b, c, d, e = t, a, b30, c, d
}
for ; i < 20; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b&c | (^b)&d
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K0
a, b, c, d, e = t, a, b30, c, d
}
for ; i < 40; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b ^ c ^ d
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K1
a, b, c, d, e = t, a, b30, c, d
}
for ; i < 60; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := ((b | c) & d) | (b & c)
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K2
a, b, c, d, e = t, a, b30, c, d
}
for ; i < 80; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b ^ c ^ d
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K3
a, b, c, d, e = t, a, b30, c, d
}
h0 += a
h1 += b
h2 += c
h3 += d
h4 += e
p = p[chunk:]
}
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] = h0, h1, h2, h3, h4
}

@ -1,50 +0,0 @@
// +build linux,amd64,cgo
/*
* Minio Cloud Storage, (C) 2014-2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
// #cgo CFLAGS: -DHAS_AVX2
// #include <stdint.h>
// #include <stdlib.h>
// void sha1_transform(int32_t *hash, const char* input, size_t num_blocks);
// void sha1_update_intel(int32_t *hash, const char* input, size_t num_blocks);
import "C"
import (
"unsafe"
"github.com/klauspost/cpuid"
)
func block(dig *digest, p []byte) {
switch true {
case cpuid.CPU.AVX2():
blockAVX2(dig, p)
case cpuid.CPU.SSE3():
blockSSE3(dig, p)
default:
blockGeneric(dig, p)
}
}
func blockAVX2(dig *digest, p []byte) {
C.sha1_transform((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk))
}
func blockSSE3(dig *digest, p []byte) {
C.sha1_update_intel((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk))
}

@ -1,23 +0,0 @@
// +build !cgo arm
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
func block(dig *digest, p []byte) {
blockGeneric(dig, p)
}

@ -1 +0,0 @@
*.syso

@ -1,64 +0,0 @@
## Ubuntu (Kylin) 14.04
### Build Dependencies
This installation document assumes Ubuntu 14.04+ on x86-64 platform.
##### Install Git, GCC, yasm
```sh
$ sudo apt-get install git build-essential yasm
```
##### Install Go 1.6+
Download Go 1.6+ from [https://golang.org/dl/](https://golang.org/dl/).
```sh
$ wget https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz
$ mkdir -p ${HOME}/bin/
$ mkdir -p ${HOME}/go/
$ tar -C ${HOME}/bin/ -xzf go1.6.linux-amd64.tar.gz
```
##### Setup GOROOT and GOPATH
Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
and GOPATH specifies the location of your project workspace.
```sh
$ export GOROOT=${HOME}/bin/go
$ export GOPATH=${HOME}/go
$ export PATH=${HOME}/bin/go/bin:${GOPATH}/bin:$PATH
```
## OS X (Yosemite) 10.10
### Build Dependencies
This installation document assumes OS X Yosemite 10.10+ on x86-64 platform.
##### Install brew
```sh
$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
```
##### Install Git, Python
```sh
$ brew install git python yasm
```
##### Install Go 1.6+
Install golang binaries using `brew`
```sh
$ brew install go
$ mkdir -p $HOME/go
```
##### Setup GOROOT and GOPATH
Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
and GOPATH specifies the location of your project workspace.
```sh
$ export GOPATH=${HOME}/go
$ export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6)
$ export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
$ export PATH=${GOPATH}/bin:$PATH
```

@ -1,26 +0,0 @@
Copyright(c) 2011-2014 Intel Corporation All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,25 +0,0 @@
## Introduction
Erasure is an open source Golang library written on top of ISAL (Intel Intelligent Storage Library) released under [Apache license v2](./LICENSE)
### Developers
* [Get Source](./CONTRIBUTING.md)
* [Build Dependencies](./BUILDDEPS.md)
* [Development Workflow](./CONTRIBUTING.md#developer-guidelines)
* [Developer discussions and bugs](https://github.com/minio/minio/issues)
### Supported platforms
| Name | Supported |
| ------------- | ------------- |
| Linux | Yes |
| Windows | Not yet |
| Mac OSX | Yes |
### Supported architectures
| Arch | Supported |
| ------------- | ------------- |
| x86-64 | Yes |
| arm64 | Not yet|
| i386 | Never |

@ -1,62 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package erasure
// #include <stdint.h>
import "C"
import (
"fmt"
"unsafe"
)
// intSlice2CIntArray converts Go int slice to C int array
func intSlice2CIntArray(srcErrList []int) *C.int32_t {
if len(srcErrList) == 0 {
return (*C.int32_t)(unsafe.Pointer(nil))
}
var sizeErrInt = int(unsafe.Sizeof(srcErrList[0]))
switch sizeInt {
case sizeErrInt:
return (*C.int32_t)(unsafe.Pointer(&srcErrList[0]))
case sizeInt8:
int8Array := make([]int8, len(srcErrList))
for i, v := range srcErrList {
int8Array[i] = int8(v)
}
return (*C.int32_t)(unsafe.Pointer(&int8Array[0]))
case sizeInt16:
int16Array := make([]int16, len(srcErrList))
for i, v := range srcErrList {
int16Array[i] = int16(v)
}
return (*C.int32_t)(unsafe.Pointer(&int16Array[0]))
case sizeInt32:
int32Array := make([]int32, len(srcErrList))
for i, v := range srcErrList {
int32Array[i] = int32(v)
}
return (*C.int32_t)(unsafe.Pointer(&int32Array[0]))
case sizeInt64:
int64Array := make([]int64, len(srcErrList))
for i, v := range srcErrList {
int64Array[i] = int64(v)
}
return (*C.int32_t)(unsafe.Pointer(&int64Array[0]))
default:
panic(fmt.Sprintf("Unsupported: %d", sizeInt))
}
}

@ -1,66 +0,0 @@
// Package erasure is a Go wrapper for the Intel Intelligent Storage
// Acceleration Library (Intel ISA-L). Intel ISA-L is a CPU optimized
// implementation of erasure coding algorithms.
//
// For more information on Intel ISA-L, please visit:
// https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version
//
// Usage:
//
// Encode encodes a block of data. The input is the original data. The output
// is a 2 tuple containing (k + m) chunks of erasure encoded data and the
// length of the original object.
//
// Decode decodes 2 tuple data containing (k + m) chunks back into its original form.
// Additionally original block length should also be provided as input.
//
// Decoded data is exactly similar in length and content as the original data.
//
// Encoding data may be performed in 3 steps.
//
// 1. Create a parse set of encoder parameters
// 2. Create a new encoder
// 3. Encode data
//
// Decoding data is also performed in 3 steps.
//
// 1. Create a parse set of encoder parameters for validation
// 2. Create a new encoder
// 3. Decode data
//
// Erasure parameters contain three configurable elements:
// ValidateParams(k, m, technique int) (ErasureParams, error)
// k - Number of rows in matrix
// m - Number of colums in matrix
// technique - Matrix type, can be either Cauchy (recommended) or Vandermonde
// constraints: k + m < Galois Field (2^8)
//
// Choosing right parity and matrix technique is left for application to decide.
//
// But here are the few points to keep in mind
//
// Matrix Type:
// - Vandermonde is most commonly used method for choosing coefficients in erasure
// encoding but does not guarantee invertable for every sub matrix.
// - Whereas Cauchy is our recommended method for choosing coefficients in erasure coding.
// Since any sub-matrix of a Cauchy matrix is invertable.
//
// Total blocks:
// - Data blocks and Parity blocks should not be greater than 'Galois Field' (2^8)
//
// Example
//
// Creating and using an encoder
// var bytes []byte
// params := erasure.ValidateParams(10, 5)
// encoder := erasure.NewErasure(params)
// encodedData, length := encoder.Encode(bytes)
//
// Creating and using a decoder
// var encodedData [][]byte
// var length int
// params := erasure.ValidateParams(10, 5)
// encoder := erasure.NewErasure(params)
// originalData, err := encoder.Decode(encodedData, length)
//
package erasure

@ -1,39 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __COMMON_H__
#define __COMMON_H__
#include <stdint.h>
int32_t minio_init_encoder (int k, int m,
unsigned char **encode_matrix,
unsigned char **encode_tbls);
int32_t minio_init_decoder (int32_t *error_index,
int k, int n, int errs,
unsigned char *encoding_matrix,
unsigned char **decode_matrix,
unsigned char **decode_tbls,
uint32_t **decode_index);
int32_t minio_get_source_target (int errs, int k, int m,
int32_t *error_index,
uint32_t *decode_index,
unsigned char **buffs,
unsigned char ***source,
unsigned char ***target);
#endif /* __COMMON_H__ */

@ -1,142 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "isa-l.h"
#include "ec_minio_common.h"
static
int32_t _minio_src_index_in_error (int r, int32_t *error_index, int errs)
{
int i;
for (i = 0; i < errs; i++) {
if (error_index[i] == r) {
// true
return 1;
}
}
// false
return 0;
}
// Separate out source data and target buffers
int32_t minio_get_source_target (int errs, int k, int m,
int32_t *error_index,
uint32_t *decode_index,
unsigned char **buffs,
unsigned char ***source,
unsigned char ***target)
{
int i;
unsigned char *tmp_source[k];
unsigned char *tmp_target[m];
if (k < 0 || m < 0) {
return -1;
}
memset (tmp_source, 0, k);
memset (tmp_target, 0, m);
for (i = 0; i < k; i++) {
tmp_source[i] = (unsigned char *) buffs[decode_index[i]];
}
for (i = 0; i < m; i++) {
if (i < errs)
tmp_target[i] = (unsigned char *) buffs[error_index[i]];
}
*source = tmp_source;
*target = tmp_target;
return 0;
}
/*
Generate decode matrix during the decoding phase
*/
int minio_init_decoder (int32_t *error_index,
int k, int n, int errs,
unsigned char *encode_matrix,
unsigned char **decode_matrix,
unsigned char **decode_tbls,
uint32_t **decode_index)
{
int i, j, r, l;
uint32_t *tmp_decode_index = (uint32_t *) malloc(sizeof(uint32_t) * k);
unsigned char *input_matrix;
unsigned char *inverse_matrix;
unsigned char *tmp_decode_matrix;
unsigned char *tmp_decode_tbls;
input_matrix = (unsigned char *) malloc(sizeof(unsigned char) * k * n);
inverse_matrix = (unsigned char *) malloc(sizeof(unsigned char) * k * n);
tmp_decode_matrix = (unsigned char *) malloc(sizeof(unsigned char) * k * n);;
tmp_decode_tbls = (unsigned char *) malloc(sizeof(unsigned char) * k * n * 32);
for (i = 0, r = 0; i < k; i++, r++) {
while (_minio_src_index_in_error(r, error_index, errs))
r++;
for (j = 0; j < k; j++) {
input_matrix[k * i + j] = encode_matrix[k * r + j];
}
tmp_decode_index[i] = r;
}
// Not all vandermonde matrix can be inverted
if (gf_invert_matrix(input_matrix, inverse_matrix, k) < 0) {
free(tmp_decode_matrix);
free(tmp_decode_tbls);
free(tmp_decode_index);
return -1;
}
for (l = 0; l < errs; l++) {
if (error_index[l] < k) {
// decoding matrix elements for data chunks
for (j = 0; j < k; j++) {
tmp_decode_matrix[k * l + j] =
inverse_matrix[k *
error_index[l] + j];
}
} else {
// decoding matrix element for coding chunks
for (i = 0; i < k; i++) {
unsigned char s = 0;
for (j = 0; j < k; j++) {
s ^= gf_mul(inverse_matrix[j * k + i],
encode_matrix[k *
error_index[l] + j]);
}
tmp_decode_matrix[k * l + i] = s;
}
}
}
ec_init_tables (k, errs, tmp_decode_matrix, tmp_decode_tbls);
*decode_matrix = tmp_decode_matrix;
*decode_tbls = tmp_decode_tbls;
*decode_index = tmp_decode_index;
return 0;
}

@ -1,55 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include "isa-l.h"
#include "ec_minio_common.h"
/*
Generate encode matrix during the encoding phase
*/
int32_t minio_init_encoder (int k, int m, unsigned char **encode_matrix, unsigned char **encode_tbls)
{
unsigned char *tmp_matrix;
unsigned char *tmp_tbls;
tmp_matrix = (unsigned char *) malloc (k * (k + m));
tmp_tbls = (unsigned char *) malloc (k * (k + m) * 32);
if (k < 5) {
/*
Commonly used method for choosing coefficients in erasure
encoding but does not guarantee invertable for every sub
matrix. For large k it is possible to find cases where the
decode matrix chosen from sources and parity not in erasure
are not invertable. Users may want to adjust for k > 5.
-- Intel
*/
gf_gen_rs_matrix (tmp_matrix, k + m, k);
} else {
gf_gen_cauchy1_matrix (tmp_matrix, k + m, k);
}
ec_init_tables(k, m, &tmp_matrix[k * k], tmp_tbls);
*encode_matrix = tmp_matrix;
*encode_tbls = tmp_tbls;
return 0;
}

@ -1,123 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package erasure
// #cgo CFLAGS: -O0
// #include <stdlib.h>
// #include "isa-l.h"
// #include "ec_minio_common.h"
import "C"
import (
"errors"
"fmt"
"unsafe"
)
// Decode decodes erasure coded blocks of data into its original
// form. Erasure coded data contains K data blocks and M parity
// blocks. Decode can withstand data loss up to any M number of blocks.
//
// "encodedDataBlocks" is an array of K data blocks and M parity
// blocks. Data blocks are position and order dependent. Missing blocks
// are set to "nil". There must be at least "K" number of data|parity
// blocks.
//
// "dataLen" is the length of original source data
func (e *Erasure) Decode(encodedDataBlocks [][]byte, dataLen int) (decodedData []byte, err error) {
e.mutex.Lock()
defer e.mutex.Unlock()
var source, target **C.uchar
k := int(e.params.K)
m := int(e.params.M)
n := k + m
// We need the data and parity blocks preserved in the same order. Missing blocks are set to nil.
if len(encodedDataBlocks) != n {
msg := fmt.Sprintf("Encoded data blocks slice must of length [%d]", n)
return nil, errors.New(msg)
}
// Length of a single encoded block
encodedBlockLen := GetEncodedBlockLen(dataLen, uint8(k))
// Keep track of errors per block.
missingEncodedBlocks := make([]int, n+1)
var missingEncodedBlocksCount int
// Check for the missing encoded blocks
for i := range encodedDataBlocks {
if encodedDataBlocks[i] == nil || len(encodedDataBlocks[i]) == 0 {
missingEncodedBlocks[missingEncodedBlocksCount] = i
missingEncodedBlocksCount++
}
}
// Cannot reconstruct original data. Need at least M number of data or parity blocks.
if missingEncodedBlocksCount > m {
return nil, fmt.Errorf("Cannot reconstruct original data. Need at least [%d] data or parity blocks", m)
}
// Convert from Go int slice to C int array
missingEncodedBlocksC := intSlice2CIntArray(missingEncodedBlocks[:missingEncodedBlocksCount])
// Allocate buffer for the missing blocks
for i := range encodedDataBlocks {
if encodedDataBlocks[i] == nil || len(encodedDataBlocks[i]) == 0 {
encodedDataBlocks[i] = make([]byte, encodedBlockLen)
}
}
// If not already initialized, recompute and cache
if e.decodeMatrix == nil || e.decodeTbls == nil || e.decodeIndex == nil {
var decodeMatrix, decodeTbls *C.uchar
var decodeIndex *C.uint32_t
C.minio_init_decoder(missingEncodedBlocksC, C.int(k), C.int(n), C.int(missingEncodedBlocksCount),
e.encodeMatrix, &decodeMatrix, &decodeTbls, &decodeIndex)
// cache this for future needs
e.decodeMatrix = decodeMatrix
e.decodeTbls = decodeTbls
e.decodeIndex = decodeIndex
}
// Make a slice of pointers to encoded blocks. Necessary to bridge to the C world.
pointers := make([]*byte, n)
for i := range encodedDataBlocks {
pointers[i] = &encodedDataBlocks[i][0]
}
// Get pointers to source "data" and target "parity" blocks from the output byte array.
ret := C.minio_get_source_target(C.int(missingEncodedBlocksCount), C.int(k), C.int(m), missingEncodedBlocksC,
e.decodeIndex, (**C.uchar)(unsafe.Pointer(&pointers[0])), &source, &target)
if int(ret) == -1 {
return nil, errors.New("Unable to decode data")
}
// Decode data
C.ec_encode_data(C.int(encodedBlockLen), C.int(k), C.int(missingEncodedBlocksCount), e.decodeTbls,
source, target)
// Allocate buffer to output buffer
decodedData = make([]byte, 0, encodedBlockLen*int(k))
for i := 0; i < int(k); i++ {
decodedData = append(decodedData, encodedDataBlocks[i]...)
}
return decodedData[:dataLen], nil
}

@ -1,174 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package erasure
// #include <stdlib.h>
// #include "isa-l.h"
// #include "ec_minio_common.h"
import "C"
import (
"errors"
"sync"
"unsafe"
)
// Block alignment
const (
SIMDAlign = 32
)
// Params is a configuration set for building an encoder. It is created using ValidateParams().
type Params struct {
K uint8
M uint8
}
// Erasure is an object used to encode and decode data.
type Erasure struct {
params *Params
encodeMatrix, encodeTbls *C.uchar
decodeMatrix, decodeTbls *C.uchar
decodeIndex *C.uint32_t
mutex *sync.Mutex
}
// ValidateParams creates an Params object.
//
// k and m represent the matrix size, which corresponds to the protection level
// technique is the matrix type. Valid inputs are Cauchy (recommended) or Vandermonde.
//
func ValidateParams(k, m uint8) (*Params, error) {
if k < 1 {
return nil, errors.New("k cannot be zero")
}
if m < 1 {
return nil, errors.New("m cannot be zero")
}
if k+m > 255 {
return nil, errors.New("(k + m) cannot be bigger than Galois field GF(2^8) - 1")
}
return &Params{
K: k,
M: m,
}, nil
}
// NewErasure creates an encoder object with a given set of parameters.
func NewErasure(ep *Params) *Erasure {
var k = C.int(ep.K)
var m = C.int(ep.M)
var encodeMatrix *C.uchar
var encodeTbls *C.uchar
C.minio_init_encoder(k, m, &encodeMatrix, &encodeTbls)
return &Erasure{
params: ep,
encodeMatrix: encodeMatrix,
encodeTbls: encodeTbls,
decodeMatrix: nil,
decodeTbls: nil,
decodeIndex: nil,
mutex: new(sync.Mutex),
}
}
// GetEncodedBlocksLen - total length of all encoded blocks
func GetEncodedBlocksLen(inputLen int, k, m uint8) (outputLen int) {
outputLen = GetEncodedBlockLen(inputLen, k) * int(k+m)
return outputLen
}
// GetEncodedBlockLen - length per block of encoded blocks
func GetEncodedBlockLen(inputLen int, k uint8) (encodedOutputLen int) {
alignment := int(k) * SIMDAlign
remainder := inputLen % alignment
paddedInputLen := inputLen
if remainder != 0 {
paddedInputLen = inputLen + (alignment - remainder)
}
encodedOutputLen = paddedInputLen / int(k)
return encodedOutputLen
}
// Encode erasure codes a block of data in "k" data blocks and "m" parity blocks.
// Output is [k+m][]blocks of data and parity slices.
func (e *Erasure) Encode(inputData []byte) (encodedBlocks [][]byte, err error) {
e.mutex.Lock()
defer e.mutex.Unlock()
k := int(e.params.K) // "k" data blocks
m := int(e.params.M) // "m" parity blocks
n := k + m // "n" total encoded blocks
// Length of a single encoded chunk.
// Total number of encoded chunks = "k" data + "m" parity blocks
encodedBlockLen := GetEncodedBlockLen(len(inputData), uint8(k))
// Length of total number of "k" data chunks
encodedDataBlocksLen := encodedBlockLen * k
// Length of extra padding required for the data blocks.
encodedDataBlocksPadLen := encodedDataBlocksLen - len(inputData)
// Extend inputData buffer to accommodate coded data blocks if necesssary
if encodedDataBlocksPadLen > 0 {
padding := make([]byte, encodedDataBlocksPadLen)
// Expand with new padded blocks to the byte array
inputData = append(inputData, padding...)
}
// Extend inputData buffer to accommodate coded parity blocks
{ // Local Scope
encodedParityBlocksLen := encodedBlockLen * m
parityBlocks := make([]byte, encodedParityBlocksLen)
inputData = append(inputData, parityBlocks...)
}
// Allocate memory to the "encoded blocks" return buffer
encodedBlocks = make([][]byte, n) // Return buffer
// Necessary to bridge Go to the C world. C requires 2D arry of pointers to
// byte array. "encodedBlocks" is a 2D slice.
pointersToEncodedBlock := make([]*byte, n) // Pointers to encoded blocks.
// Copy data block slices to encoded block buffer
for i := 0; i < k; i++ {
encodedBlocks[i] = inputData[i*encodedBlockLen : (i+1)*encodedBlockLen]
pointersToEncodedBlock[i] = &encodedBlocks[i][0]
}
// Copy erasure block slices to encoded block buffer
for i := k; i < n; i++ {
encodedBlocks[i] = make([]byte, encodedBlockLen)
pointersToEncodedBlock[i] = &encodedBlocks[i][0]
}
// Erasure code the data into K data blocks and M parity
// blocks. Only the parity blocks are filled. Data blocks remain
// intact.
C.ec_encode_data(C.int(encodedBlockLen), C.int(k), C.int(m), e.encodeTbls,
(**C.uchar)(unsafe.Pointer(&pointersToEncodedBlock[:k][0])), // Pointers to data blocks
(**C.uchar)(unsafe.Pointer(&pointersToEncodedBlock[k:][0]))) // Pointers to parity blocks
return encodedBlocks, nil
}

@ -1,82 +0,0 @@
// +build !windows
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package erasure
import (
"bytes"
"testing"
. "gopkg.in/check.v1"
)
type MySuite struct{}
var _ = Suite(&MySuite{})
func Test(t *testing.T) { TestingT(t) }
const (
k = 10
m = 5
)
func corruptChunks(chunks [][]byte, errorIndex []int) [][]byte {
for _, err := range errorIndex {
chunks[err] = nil
}
return chunks
}
func (s *MySuite) TestEncodeDecodeFailure(c *C) {
ep, err := ValidateParams(k, m)
c.Assert(err, IsNil)
data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.")
e := NewErasure(ep)
chunks, err := e.Encode(data)
c.Assert(err, IsNil)
errorIndex := []int{0, 3, 5, 9, 11, 13}
chunks = corruptChunks(chunks, errorIndex)
_, err = e.Decode(chunks, len(data))
c.Assert(err, Not(IsNil))
}
func (s *MySuite) TestEncodeDecodeSuccess(c *C) {
ep, err := ValidateParams(k, m)
c.Assert(err, IsNil)
data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.")
e := NewErasure(ep)
chunks, err := e.Encode(data)
c.Assert(err, IsNil)
errorIndex := []int{0, 3, 5, 9, 13}
chunks = corruptChunks(chunks, errorIndex)
recoveredData, err := e.Decode(chunks, len(data))
c.Assert(err, IsNil)
if !bytes.Equal(data, recoveredData) {
c.Fatalf("Recovered data mismatches with original data")
}
}

@ -1,38 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package erasure
//
// int sizeInt()
// {
// return sizeof(int);
// }
import "C"
import "unsafe"
var (
// See http://golang.org/ref/spec#Numeric_types
sizeInt = int(C.sizeInt())
// SizeInt8 is the byte size of a int8.
sizeInt8 = int(unsafe.Sizeof(int8(0)))
// SizeInt16 is the byte size of a int16.
sizeInt16 = int(unsafe.Sizeof(int16(0)))
// SizeInt32 is the byte size of a int32.
sizeInt32 = int(unsafe.Sizeof(int32(0)))
// SizeInt64 is the byte size of a int64.
sizeInt64 = int(unsafe.Sizeof(int64(0)))
)

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,3 +0,0 @@
# XL
xl - XL distributed erasure coded on-disk format released under [Apache license v2](./LICENSE).

@ -1,49 +0,0 @@
// +build ignore
package xl
// BucketACL - bucket level access control
type BucketACL string
// different types of ACL's currently supported for buckets
const (
BucketPrivate = BucketACL("private")
BucketPublicRead = BucketACL("public-read")
BucketPublicReadWrite = BucketACL("public-read-write")
)
func (b BucketACL) String() string {
return string(b)
}
// IsPrivate - is acl Private
func (b BucketACL) IsPrivate() bool {
return b == BucketACL("private")
}
// IsPublicRead - is acl PublicRead
func (b BucketACL) IsPublicRead() bool {
return b == BucketACL("public-read")
}
// IsPublicReadWrite - is acl PublicReadWrite
func (b BucketACL) IsPublicReadWrite() bool {
return b == BucketACL("public-read-write")
}
// IsValidBucketACL - is provided acl string supported
func IsValidBucketACL(acl string) bool {
switch acl {
case "private":
fallthrough
case "public-read":
fallthrough
case "public-read-write":
return true
case "":
// by default its "private"
return true
default:
return false
}
}

@ -1,196 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package block
import (
"errors"
"os"
"path/filepath"
"sync"
"syscall"
"github.com/minio/minio/pkg/atomic"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/probe"
)
// Block container for block disk parameters
type Block struct {
lock *sync.Mutex
path string
fsInfo disk.Info
}
// ErrInvalidArgument - invalid argument.
var ErrInvalidArgument = errors.New("Invalid argument")
// New - instantiate new disk
func New(diskPath string) (Block, *probe.Error) {
if diskPath == "" {
return Block{}, probe.NewError(ErrInvalidArgument)
}
st, err := os.Stat(diskPath)
if err != nil {
return Block{}, probe.NewError(err)
}
if !st.IsDir() {
return Block{}, probe.NewError(syscall.ENOTDIR)
}
info, err := disk.GetInfo(diskPath)
if err != nil {
return Block{}, probe.NewError(err)
}
disk := Block{
lock: &sync.Mutex{},
path: diskPath,
fsInfo: info,
}
return disk, nil
}
// IsUsable - is disk usable, alive
func (d Block) IsUsable() bool {
_, err := os.Stat(d.path)
if err != nil {
return false
}
return true
}
// GetPath - get root disk path
func (d Block) GetPath() string {
return d.path
}
// GetFSInfo - get disk filesystem and its usage information
func (d Block) GetFSInfo() disk.Info {
d.lock.Lock()
defer d.lock.Unlock()
info, err := disk.GetInfo(d.path)
if err != nil {
return d.fsInfo
}
d.fsInfo = info
return info
}
// MakeDir - make a directory inside disk root path
func (d Block) MakeDir(dirname string) *probe.Error {
d.lock.Lock()
defer d.lock.Unlock()
if err := os.MkdirAll(filepath.Join(d.path, dirname), 0700); err != nil {
return probe.NewError(err)
}
return nil
}
// ListDir - list a directory inside disk root path, get only directories
func (d Block) ListDir(dirname string) ([]os.FileInfo, *probe.Error) {
d.lock.Lock()
defer d.lock.Unlock()
dir, err := os.Open(filepath.Join(d.path, dirname))
if err != nil {
return nil, probe.NewError(err)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, probe.NewError(err)
}
var directories []os.FileInfo
for _, content := range contents {
// Include only directories, ignore everything else
if content.IsDir() {
directories = append(directories, content)
}
}
return directories, nil
}
// ListFiles - list a directory inside disk root path, get only files
func (d Block) ListFiles(dirname string) ([]os.FileInfo, *probe.Error) {
d.lock.Lock()
defer d.lock.Unlock()
dir, err := os.Open(filepath.Join(d.path, dirname))
if err != nil {
return nil, probe.NewError(err)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, probe.NewError(err)
}
var files []os.FileInfo
for _, content := range contents {
// Include only regular files, ignore everything else
if content.Mode().IsRegular() {
files = append(files, content)
}
}
return files, nil
}
// CreateFile - create a file inside disk root path, replies with custome d.File which provides atomic writes
func (d Block) CreateFile(filename string) (*atomic.File, *probe.Error) {
d.lock.Lock()
defer d.lock.Unlock()
if filename == "" {
return nil, probe.NewError(ErrInvalidArgument)
}
f, err := atomic.FileCreate(filepath.Join(d.path, filename))
if err != nil {
return nil, probe.NewError(err)
}
return f, nil
}
// Open - read a file inside disk root path
func (d Block) Open(filename string) (*os.File, *probe.Error) {
d.lock.Lock()
defer d.lock.Unlock()
if filename == "" {
return nil, probe.NewError(ErrInvalidArgument)
}
dataFile, err := os.Open(filepath.Join(d.path, filename))
if err != nil {
return nil, probe.NewError(err)
}
return dataFile, nil
}
// OpenFile - Use with caution
func (d Block) OpenFile(filename string, flags int, perm os.FileMode) (*os.File, *probe.Error) {
d.lock.Lock()
defer d.lock.Unlock()
if filename == "" {
return nil, probe.NewError(ErrInvalidArgument)
}
dataFile, err := os.OpenFile(filepath.Join(d.path, filename), flags, perm)
if err != nil {
return nil, probe.NewError(err)
}
return dataFile, nil
}

@ -1,83 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedisk.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package block
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
. "gopkg.in/check.v1"
)
func TestDisk(t *testing.T) { TestingT(t) }
type MyDiskSuite struct {
path string
d Block
}
var _ = Suite(&MyDiskSuite{})
func (s *MyDiskSuite) SetUpSuite(c *C) {
path, err := ioutil.TempDir(os.TempDir(), "disk-")
c.Assert(err, IsNil)
s.path = path
d, perr := New(s.path)
c.Assert(perr, IsNil)
s.d = d
}
func (s *MyDiskSuite) TearDownSuite(c *C) {
os.RemoveAll(s.path)
}
func (s *MyDiskSuite) TestDiskInfo(c *C) {
c.Assert(s.path, Equals, s.d.GetPath())
fsInfo := s.d.GetFSInfo()
c.Assert(fsInfo.FSType, Not(Equals), "UNKNOWN")
}
func (s *MyDiskSuite) TestDiskCreateDir(c *C) {
c.Assert(s.d.MakeDir("hello"), IsNil)
}
func (s *MyDiskSuite) TestDiskCreateFile(c *C) {
f, err := s.d.CreateFile("hello1")
c.Assert(err, IsNil)
c.Assert(f.Name(), Not(Equals), filepath.Join(s.path, "hello1"))
// close renames the file
f.Close()
// Open should be a success
_, err = s.d.Open("hello1")
c.Assert(err, IsNil)
}
func (s *MyDiskSuite) TestDiskOpen(c *C) {
f1, err := s.d.CreateFile("hello2")
c.Assert(err, IsNil)
c.Assert(f1.Name(), Not(Equals), filepath.Join(s.path, "hello2"))
// close renames the file
f1.Close()
f2, err := s.d.Open("hello2")
c.Assert(err, IsNil)
c.Assert(f2.Name(), Equals, filepath.Join(s.path, "hello2"))
defer f2.Close()
}

@ -1,641 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"bytes"
"fmt"
"hash"
"io"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"crypto/md5"
"encoding/hex"
"encoding/json"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/signature4"
"github.com/minio/minio/pkg/xl/block"
)
const (
blockSize = 10 * 1024 * 1024
)
// internal struct carrying bucket specific information
type bucket struct {
name string
acl string
time time.Time
xlName string
nodes map[string]node
lock *sync.Mutex
}
// newBucket - instantiate a new bucket
func newBucket(bucketName, aclType, xlName string, nodes map[string]node) (bucket, BucketMetadata, *probe.Error) {
if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(xlName) == "" {
return bucket{}, BucketMetadata{}, probe.NewError(InvalidArgument{})
}
b := bucket{}
t := time.Now().UTC()
b.name = bucketName
b.acl = aclType
b.time = t
b.xlName = xlName
b.nodes = nodes
b.lock = new(sync.Mutex)
metadata := BucketMetadata{}
metadata.Version = bucketMetadataVersion
metadata.Name = bucketName
metadata.ACL = BucketACL(aclType)
metadata.Created = t
metadata.Metadata = make(map[string]string)
metadata.BucketObjects = make(map[string]struct{})
return b, metadata, nil
}
// getBucketName -
func (b bucket) getBucketName() string {
return b.name
}
// getBucketMetadataReaders -
func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) {
readers := make(map[int]io.ReadCloser)
var disks map[int]block.Block
var err *probe.Error
for _, node := range b.nodes {
disks, err = node.ListDisks()
if err != nil {
return nil, err.Trace()
}
}
var bucketMetaDataReader io.ReadCloser
for order, disk := range disks {
bucketMetaDataReader, err = disk.Open(filepath.Join(b.xlName, bucketMetadataConfig))
if err != nil {
continue
}
readers[order] = bucketMetaDataReader
}
if err != nil {
return nil, err.Trace()
}
return readers, nil
}
// getBucketMetadata -
func (b bucket) getBucketMetadata() (*AllBuckets, *probe.Error) {
metadata := new(AllBuckets)
var readers map[int]io.ReadCloser
{
var err *probe.Error
readers, err = b.getBucketMetadataReaders()
if err != nil {
return nil, err.Trace()
}
}
for _, reader := range readers {
defer reader.Close()
}
var err error
for _, reader := range readers {
jenc := json.NewDecoder(reader)
if err = jenc.Decode(metadata); err == nil {
return metadata, nil
}
}
return nil, probe.NewError(err)
}
// GetObjectMetadata - get metadata for an object
func (b bucket) GetObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
return b.readObjectMetadata(normalizeObjectName(objectName))
}
// ListObjects - list all objects
func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
if maxkeys <= 0 {
maxkeys = 1000
}
var isTruncated bool
var objects []string
bucketMetadata, err := b.getBucketMetadata()
if err != nil {
return ListObjectsResults{}, err.Trace()
}
for objectName := range bucketMetadata.Buckets[b.getBucketName()].Multiparts {
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
if objectName > marker {
objects = append(objects, objectName)
}
}
}
for objectName := range bucketMetadata.Buckets[b.getBucketName()].BucketObjects {
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
if objectName > marker {
objects = append(objects, objectName)
}
}
}
if strings.TrimSpace(prefix) != "" {
objects = TrimPrefix(objects, prefix)
}
var prefixes []string
var filteredObjects []string
filteredObjects = objects
if strings.TrimSpace(delimiter) != "" {
filteredObjects = HasNoDelimiter(objects, delimiter)
prefixes = HasDelimiter(objects, delimiter)
prefixes = SplitDelimiter(prefixes, delimiter)
prefixes = SortUnique(prefixes)
}
var results []string
var commonPrefixes []string
for _, commonPrefix := range prefixes {
commonPrefixes = append(commonPrefixes, prefix+commonPrefix)
}
filteredObjects = RemoveDuplicates(filteredObjects)
sort.Strings(filteredObjects)
for _, objectName := range filteredObjects {
if len(results) >= maxkeys {
isTruncated = true
break
}
results = append(results, prefix+objectName)
}
results = RemoveDuplicates(results)
commonPrefixes = RemoveDuplicates(commonPrefixes)
sort.Strings(commonPrefixes)
listObjects := ListObjectsResults{}
listObjects.Objects = make(map[string]ObjectMetadata)
listObjects.CommonPrefixes = commonPrefixes
listObjects.IsTruncated = isTruncated
for _, objectName := range results {
objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName))
if err != nil {
return ListObjectsResults{}, err.Trace()
}
listObjects.Objects[objectName] = objMetadata
}
return listObjects, nil
}
// ReadObject - open an object to read
func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
reader, writer := io.Pipe()
// get list of objects
bucketMetadata, err := b.getBucketMetadata()
if err != nil {
return nil, 0, err.Trace()
}
// check if object exists
if _, ok := bucketMetadata.Buckets[b.getBucketName()].BucketObjects[objectName]; !ok {
return nil, 0, probe.NewError(ObjectNotFound{Object: objectName})
}
objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName))
if err != nil {
return nil, 0, err.Trace()
}
// read and reply back to GetObject() request in a go-routine
go b.readObjectData(normalizeObjectName(objectName), writer, objMetadata)
return reader, objMetadata.Size, nil
}
// WriteObject - write a new object into bucket
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *signature4.Sign) (ObjectMetadata, *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
if objectName == "" || objectData == nil {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
writers, err := b.getObjectWriters(normalizeObjectName(objectName), "data")
if err != nil {
return ObjectMetadata{}, err.Trace()
}
sumMD5 := md5.New()
sum512 := sha512.New()
var sum256 hash.Hash
var mwriter io.Writer
if signature != nil {
sum256 = sha256.New()
mwriter = io.MultiWriter(sumMD5, sum256, sum512)
} else {
mwriter = io.MultiWriter(sumMD5, sum512)
}
objMetadata := ObjectMetadata{}
objMetadata.Version = objectMetadataVersion
objMetadata.Created = time.Now().UTC()
// if total writers are only '1' do not compute erasure
switch len(writers) == 1 {
case true:
mw := io.MultiWriter(writers[0], mwriter)
totalLength, err := io.Copy(mw, objectData)
if err != nil {
CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.NewError(err)
}
objMetadata.Size = totalLength
case false:
// calculate data and parity dictated by total number of writers
k, m, err := b.getDataAndParity(len(writers))
if err != nil {
CleanupWritersOnError(writers)
return ObjectMetadata{}, err.Trace()
}
// write encoded data with k, m and writers
chunkCount, totalLength, err := b.writeObjectData(k, m, writers, objectData, size, mwriter)
if err != nil {
CleanupWritersOnError(writers)
return ObjectMetadata{}, err.Trace()
}
/// xlMetadata section
objMetadata.BlockSize = blockSize
objMetadata.ChunkCount = chunkCount
objMetadata.DataDisks = k
objMetadata.ParityDisks = m
objMetadata.Size = int64(totalLength)
}
objMetadata.Bucket = b.getBucketName()
objMetadata.Object = objectName
dataMD5sum := sumMD5.Sum(nil)
dataSHA512sum := sum512.Sum(nil)
if signature != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sum256.Sum(nil)))
if err != nil {
// error occurred while doing signature calculation, we return and also cleanup any temporary writers.
CleanupWritersOnError(writers)
return ObjectMetadata{}, err.Trace()
}
if !ok {
// purge all writers, when control flow reaches here
//
// Signature mismatch occurred all temp files to be removed and all data purged.
CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
}
}
objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum)
objMetadata.SHA512Sum = hex.EncodeToString(dataSHA512sum)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objMetadata.MD5Sum); err != nil {
return ObjectMetadata{}, err.Trace()
}
}
objMetadata.Metadata = metadata
// write object specific metadata
if err := b.writeObjectMetadata(normalizeObjectName(objectName), objMetadata); err != nil {
// purge all writers, when control flow reaches here
CleanupWritersOnError(writers)
return ObjectMetadata{}, err.Trace()
}
// close all writers, when control flow reaches here
for _, writer := range writers {
writer.Close()
}
return objMetadata, nil
}
// isMD5SumEqual - returns error if md5sum mismatches, other its `nil`
func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
if err != nil {
return probe.NewError(err)
}
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
if err != nil {
return probe.NewError(err)
}
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
return probe.NewError(BadDigest{})
}
return nil
}
return probe.NewError(InvalidArgument{})
}
// writeObjectMetadata - write additional object metadata
func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadata) *probe.Error {
if objMetadata.Object == "" {
return probe.NewError(InvalidArgument{})
}
objMetadataWriters, err := b.getObjectWriters(objectName, objectMetadataConfig)
if err != nil {
return err.Trace()
}
for _, objMetadataWriter := range objMetadataWriters {
jenc := json.NewEncoder(objMetadataWriter)
if err := jenc.Encode(&objMetadata); err != nil {
// Close writers and purge all temporary entries
CleanupWritersOnError(objMetadataWriters)
return probe.NewError(err)
}
}
for _, objMetadataWriter := range objMetadataWriters {
objMetadataWriter.Close()
}
return nil
}
// readObjectMetadata - read object metadata
func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) {
if objectName == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
objMetadata := ObjectMetadata{}
objMetadataReaders, err := b.getObjectReaders(objectName, objectMetadataConfig)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
for _, objMetadataReader := range objMetadataReaders {
defer objMetadataReader.Close()
}
{
var err error
for _, objMetadataReader := range objMetadataReaders {
jdec := json.NewDecoder(objMetadataReader)
if err = jdec.Decode(&objMetadata); err == nil {
return objMetadata, nil
}
}
return ObjectMetadata{}, probe.NewError(err)
}
}
// TODO - This a temporary normalization of objectNames, need to find a better way
//
// normalizedObjectName - all objectNames with "/" get normalized to a simple objectName
//
// example:
// user provided value - "this/is/my/deep/directory/structure"
// xl normalized value - "this-is-my-deep-directory-structure"
//
func normalizeObjectName(objectName string) string {
// replace every '/' with '-'
return strings.Replace(objectName, "/", "-", -1)
}
// getDataAndParity - calculate k, m (data and parity) values from number of disks
func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err *probe.Error) {
if totalWriters <= 1 {
return 0, 0, probe.NewError(InvalidArgument{})
}
quotient := totalWriters / 2 // not using float or abs to let integer round off to lower value
// quotient cannot be bigger than (255 / 2) = 127
if quotient > 127 {
return 0, 0, probe.NewError(ParityOverflow{})
}
remainder := totalWriters % 2 // will be 1 for odd and 0 for even numbers
k = uint8(quotient + remainder)
m = uint8(quotient)
return k, m, nil
}
// writeObjectData -
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, hashWriter io.Writer) (int, int, *probe.Error) {
encoder, err := newEncoder(k, m)
if err != nil {
return 0, 0, err.Trace()
}
chunkSize := int64(10 * 1024 * 1024)
chunkCount := 0
totalLength := 0
var e error
for e == nil {
var length int
inputData := make([]byte, chunkSize)
length, e = objectData.Read(inputData)
if length != 0 {
encodedBlocks, err := encoder.Encode(inputData[0:length])
if err != nil {
return 0, 0, err.Trace()
}
if _, err := hashWriter.Write(inputData[0:length]); err != nil {
return 0, 0, probe.NewError(err)
}
for blockIndex, block := range encodedBlocks {
errCh := make(chan error, 1)
go func(writer io.Writer, reader io.Reader, errCh chan<- error) {
defer close(errCh)
_, err := io.Copy(writer, reader)
errCh <- err
}(writers[blockIndex], bytes.NewReader(block), errCh)
if err := <-errCh; err != nil {
// Returning error is fine here CleanupErrors() would cleanup writers
return 0, 0, probe.NewError(err)
}
}
totalLength += length
chunkCount = chunkCount + 1
}
}
if e != io.EOF {
return 0, 0, probe.NewError(e)
}
return chunkCount, totalLength, nil
}
// readObjectData -
func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) {
readers, err := b.getObjectReaders(objectName, "data")
if err != nil {
writer.CloseWithError(probe.WrapError(err))
return
}
for _, reader := range readers {
defer reader.Close()
}
var expected512Sum, expectedMd5sum []byte
{
var err error
expectedMd5sum, err = hex.DecodeString(objMetadata.MD5Sum)
if err != nil {
writer.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
expected512Sum, err = hex.DecodeString(objMetadata.SHA512Sum)
if err != nil {
writer.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
}
hasher := md5.New()
sum512hasher := sha256.New()
mwriter := io.MultiWriter(writer, hasher, sum512hasher)
switch len(readers) > 1 {
case true:
encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks)
if err != nil {
writer.CloseWithError(probe.WrapError(err))
return
}
totalLeft := objMetadata.Size
for i := 0; i < objMetadata.ChunkCount; i++ {
decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer)
if err != nil {
writer.CloseWithError(probe.WrapError(err))
return
}
if _, err := io.Copy(mwriter, bytes.NewReader(decodedData)); err != nil {
writer.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
totalLeft = totalLeft - int64(objMetadata.BlockSize)
}
case false:
_, err := io.Copy(writer, readers[0])
if err != nil {
writer.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
}
// check if decodedData md5sum matches
if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) {
writer.CloseWithError(probe.WrapError(probe.NewError(ChecksumMismatch{})))
return
}
if !bytes.Equal(expected512Sum, sum512hasher.Sum(nil)) {
writer.CloseWithError(probe.WrapError(probe.NewError(ChecksumMismatch{})))
return
}
writer.Close()
return
}
// decodeEncodedData -
func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, *probe.Error) {
var curBlockSize int64
if blockSize < totalLeft {
curBlockSize = blockSize
} else {
curBlockSize = totalLeft
}
curChunkSize, err := encoder.GetEncodedBlockLen(int(curBlockSize))
if err != nil {
return nil, err.Trace()
}
encodedBytes := make([][]byte, encoder.k+encoder.m)
errCh := make(chan error, len(readers))
var errRet error
var readCnt int
for i, reader := range readers {
go func(reader io.Reader, i int) {
encodedBytes[i] = make([]byte, curChunkSize)
_, err := io.ReadFull(reader, encodedBytes[i])
if err != nil {
encodedBytes[i] = nil
errCh <- err
return
}
errCh <- nil
}(reader, i)
// read through errCh for any errors
err := <-errCh
if err != nil {
errRet = err
} else {
readCnt++
}
}
if readCnt < int(encoder.k) {
return nil, probe.NewError(errRet)
}
decodedData, err := encoder.Decode(encodedBytes, int(curBlockSize))
if err != nil {
return nil, err.Trace()
}
return decodedData, nil
}
// getObjectReaders -
func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, *probe.Error) {
readers := make(map[int]io.ReadCloser)
var disks map[int]block.Block
var err *probe.Error
nodeSlice := 0
for _, node := range b.nodes {
disks, err = node.ListDisks()
if err != nil {
return nil, err.Trace()
}
for order, disk := range disks {
var objectSlice io.ReadCloser
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
objectPath := filepath.Join(b.xlName, bucketSlice, objectName, objectMeta)
objectSlice, err = disk.Open(objectPath)
if err == nil {
readers[order] = objectSlice
}
}
nodeSlice = nodeSlice + 1
}
if err != nil {
return nil, err.Trace()
}
return readers, nil
}
// getObjectWriters -
func (b bucket) getObjectWriters(objectName, objectMeta string) ([]io.WriteCloser, *probe.Error) {
var writers []io.WriteCloser
nodeSlice := 0
for _, node := range b.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err.Trace()
}
writers = make([]io.WriteCloser, len(disks))
for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
objectPath := filepath.Join(b.xlName, bucketSlice, objectName, objectMeta)
objectSlice, err := disk.CreateFile(objectPath)
if err != nil {
return nil, err.Trace()
}
writers[order] = objectSlice
}
nodeSlice = nodeSlice + 1
}
return writers, nil
}

@ -1,204 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package data implements in memory caching methods for data
package data
import (
"container/list"
"sync"
"time"
)
var noExpiration = time.Duration(0)
// Cache holds the required variables to compose an in memory cache system
// which also provides expiring key mechanism and also maxSize
type Cache struct {
// Mutex is used for handling the concurrent
// read/write requests for cache
sync.Mutex
// items hold the cached objects
items *list.List
// reverseItems holds the time that related item's updated at
reverseItems map[interface{}]*list.Element
// maxSize is a total size for overall cache
maxSize uint64
// currentSize is a current size in memory
currentSize uint64
// OnEvicted - callback function for eviction
OnEvicted func(a ...interface{})
// totalEvicted counter to keep track of total expirations
totalEvicted int
}
// Stats current cache statistics
type Stats struct {
Bytes uint64
Items int
Evicted int
}
type element struct {
key interface{}
value []byte
}
// NewCache creates an inmemory cache
//
// maxSize is used for expiring objects before we run out of memory
// expiration is used for expiration of a key from cache
func NewCache(maxSize uint64) *Cache {
return &Cache{
items: list.New(),
reverseItems: make(map[interface{}]*list.Element),
maxSize: maxSize,
}
}
// SetMaxSize set a new max size
func (r *Cache) SetMaxSize(maxSize uint64) {
r.Lock()
defer r.Unlock()
r.maxSize = maxSize
return
}
// Stats get current cache statistics
func (r *Cache) Stats() Stats {
return Stats{
Bytes: r.currentSize,
Items: r.items.Len(),
Evicted: r.totalEvicted,
}
}
// Get returns a value of a given key if it exists
func (r *Cache) Get(key interface{}) ([]byte, bool) {
r.Lock()
defer r.Unlock()
ele, hit := r.reverseItems[key]
if !hit {
return nil, false
}
r.items.MoveToFront(ele)
return ele.Value.(*element).value, true
}
// Len returns length of the value of a given key, returns zero if key doesn't exist
func (r *Cache) Len(key interface{}) int {
r.Lock()
defer r.Unlock()
_, ok := r.reverseItems[key]
if !ok {
return 0
}
return len(r.reverseItems[key].Value.(*element).value)
}
// Append will append new data to an existing key,
// if key doesn't exist it behaves like Set()
func (r *Cache) Append(key interface{}, value []byte) bool {
r.Lock()
defer r.Unlock()
valueLen := uint64(len(value))
if r.maxSize > 0 {
// check if the size of the object is not bigger than the
// capacity of the cache
if valueLen > r.maxSize {
return false
}
// remove random key if only we reach the maxSize threshold
for (r.currentSize + valueLen) > r.maxSize {
r.doDeleteOldest()
break
}
}
ele, hit := r.reverseItems[key]
if !hit {
ele = r.items.PushFront(&element{key, value})
r.currentSize += valueLen
r.reverseItems[key] = ele
return true
}
r.items.MoveToFront(ele)
r.currentSize += valueLen
ele.Value.(*element).value = append(ele.Value.(*element).value, value...)
return true
}
// Set will persist a value to the cache
func (r *Cache) Set(key interface{}, value []byte) bool {
r.Lock()
defer r.Unlock()
valueLen := uint64(len(value))
if r.maxSize > 0 {
// check if the size of the object is not bigger than the
// capacity of the cache
if valueLen > r.maxSize {
return false
}
// remove random key if only we reach the maxSize threshold
for (r.currentSize + valueLen) > r.maxSize {
r.doDeleteOldest()
}
}
if _, hit := r.reverseItems[key]; hit {
return false
}
ele := r.items.PushFront(&element{key, value})
r.currentSize += valueLen
r.reverseItems[key] = ele
return true
}
// Delete deletes a given key if exists
func (r *Cache) Delete(key interface{}) {
r.Lock()
defer r.Unlock()
ele, ok := r.reverseItems[key]
if !ok {
return
}
if ele != nil {
r.currentSize -= uint64(len(r.reverseItems[key].Value.(*element).value))
r.items.Remove(ele)
delete(r.reverseItems, key)
r.totalEvicted++
if r.OnEvicted != nil {
r.OnEvicted(key)
}
}
}
func (r *Cache) doDeleteOldest() {
ele := r.items.Back()
if ele != nil {
r.currentSize -= uint64(len(r.reverseItems[ele.Value.(*element).key].Value.(*element).value))
delete(r.reverseItems, ele.Value.(*element).key)
r.items.Remove(ele)
r.totalEvicted++
if r.OnEvicted != nil {
r.OnEvicted(ele.Value.(*element).key)
}
}
}

@ -1,45 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package data
import (
"testing"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestCache(c *C) {
cache := NewCache(1000)
data := []byte("Hello, world!")
ok := cache.Set("filename", data)
c.Assert(ok, Equals, true)
storedata, ok := cache.Get("filename")
c.Assert(ok, Equals, true)
c.Assert(data, DeepEquals, storedata)
cache.Delete("filename")
_, ok = cache.Get("filename")
c.Assert(ok, Equals, false)
}

@ -1,110 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package metadata implements in memory caching methods for metadata information
package metadata
import (
"sync"
"time"
)
var noExpiration = time.Duration(0)
// Cache holds the required variables to compose an in memory cache system
// which also provides expiring key mechanism and also maxSize
type Cache struct {
// Mutex is used for handling the concurrent
// read/write requests for cache
sync.Mutex
// items hold the cached objects
items map[string]interface{}
// updatedAt holds the time that related item's updated at
updatedAt map[string]time.Time
}
// Stats current cache statistics
type Stats struct {
Items int
}
// NewCache creates an inmemory cache
//
func NewCache() *Cache {
return &Cache{
items: make(map[string]interface{}),
updatedAt: map[string]time.Time{},
}
}
// Stats get current cache statistics
func (r *Cache) Stats() Stats {
return Stats{
Items: len(r.items),
}
}
// GetAll returs all the items
func (r *Cache) GetAll() map[string]interface{} {
r.Lock()
defer r.Unlock()
// copy
items := r.items
return items
}
// Get returns a value of a given key if it exists
func (r *Cache) Get(key string) interface{} {
r.Lock()
defer r.Unlock()
value, ok := r.items[key]
if !ok {
return nil
}
return value
}
// Exists returns true if key exists
func (r *Cache) Exists(key string) bool {
r.Lock()
defer r.Unlock()
_, ok := r.items[key]
return ok
}
// Set will persist a value to the cache
func (r *Cache) Set(key string, value interface{}) bool {
r.Lock()
defer r.Unlock()
r.items[key] = value
return true
}
// Delete deletes a given key if exists
func (r *Cache) Delete(key string) {
r.Lock()
defer r.Unlock()
r.doDelete(key)
}
func (r *Cache) doDelete(key string) {
if _, ok := r.items[key]; ok {
delete(r.items, key)
delete(r.updatedAt, key)
}
}

@ -1,46 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package metadata
import (
"testing"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestCache(c *C) {
cache := NewCache()
data := []byte("Hello, world!")
ok := cache.Set("filename", data)
c.Assert(ok, Equals, true)
storedata := cache.Get("filename")
c.Assert(ok, Equals, true)
c.Assert(data, DeepEquals, storedata)
cache.Delete("filename")
ok = cache.Exists("filename")
c.Assert(ok, Equals, false)
}

@ -1,192 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"bufio"
"bytes"
"io"
"regexp"
"sort"
"strings"
"unicode/utf8"
"github.com/minio/minio/pkg/atomic"
)
// IsValidXL - verify xl name is correct
func IsValidXL(xlName string) bool {
if len(xlName) < 3 || len(xlName) > 63 {
return false
}
if xlName[0] == '.' || xlName[len(xlName)-1] == '.' {
return false
}
if match, _ := regexp.MatchString("\\.\\.", xlName); match == true {
return false
}
// We don't support xlNames with '.' in them
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", xlName)
return match
}
// IsValidBucket - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func IsValidBucket(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
return false
}
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
return false
}
if match, _ := regexp.MatchString("\\.\\.", bucket); match == true {
return false
}
// We don't support buckets with '.' in them
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket)
return match
}
// IsValidObjectName - verify object name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func IsValidObjectName(object string) bool {
if strings.TrimSpace(object) == "" {
return false
}
if len(object) > 1024 || len(object) == 0 {
return false
}
if !utf8.ValidString(object) {
return false
}
return true
}
// IsValidPrefix - verify prefix name is correct, an empty prefix is valid
func IsValidPrefix(prefix string) bool {
if strings.TrimSpace(prefix) == "" {
return true
}
return IsValidObjectName(prefix)
}
// ProxyWriter implements io.Writer to trap written bytes
type ProxyWriter struct {
writer io.Writer
writtenBytes []byte
}
func (r *ProxyWriter) Write(p []byte) (n int, err error) {
n, err = r.writer.Write(p)
if err != nil {
return
}
r.writtenBytes = append(r.writtenBytes, p[0:n]...)
return
}
// NewProxyWriter - wrap around a given writer with ProxyWriter
func NewProxyWriter(w io.Writer) *ProxyWriter {
return &ProxyWriter{writer: w, writtenBytes: nil}
}
// Delimiter delims the string at delimiter
func Delimiter(object, delimiter string) string {
readBuffer := bytes.NewBufferString(object)
reader := bufio.NewReader(readBuffer)
stringReader := strings.NewReader(delimiter)
delimited, _ := stringReader.ReadByte()
delimitedStr, _ := reader.ReadString(delimited)
return delimitedStr
}
// RemoveDuplicates removes duplicate elements from a slice
func RemoveDuplicates(slice []string) []string {
newSlice := []string{}
seen := make(map[string]struct{})
for _, val := range slice {
if _, ok := seen[val]; !ok {
newSlice = append(newSlice, val)
seen[val] = struct{}{} // avoiding byte allocation
}
}
return newSlice
}
// TrimPrefix trims off a prefix string from all the elements in a given slice
func TrimPrefix(objects []string, prefix string) []string {
var results []string
for _, object := range objects {
results = append(results, strings.TrimPrefix(object, prefix))
}
return results
}
// HasNoDelimiter provides a new slice from an input slice which has elements without delimiter
func HasNoDelimiter(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if !strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
// HasDelimiter provides a new slice from an input slice which has elements with a delimiter
func HasDelimiter(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
// SplitDelimiter provides a new slice from an input slice by splitting a delimiter
func SplitDelimiter(objects []string, delim string) []string {
var results []string
for _, object := range objects {
parts := strings.Split(object, delim)
results = append(results, parts[0]+delim)
}
return results
}
// SortUnique sort a slice in lexical order, removing duplicate elements
func SortUnique(objects []string) []string {
objectMap := make(map[string]string)
for _, v := range objects {
objectMap[v] = v
}
var results []string
for k := range objectMap {
results = append(results, k)
}
sort.Strings(results)
return results
}
// CleanupWritersOnError purge writers on error
func CleanupWritersOnError(writers []io.WriteCloser) {
for _, writer := range writers {
writer.(*atomic.File).CloseAndPurge()
}
}

@ -1,82 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"path/filepath"
"github.com/minio/go-homedir"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick"
)
// getXLConfigPath get xl config file path
func getXLConfigPath() (string, *probe.Error) {
if customConfigPath != "" {
return customConfigPath, nil
}
homeDir, err := homedir.Dir()
if err != nil {
return "", probe.NewError(err)
}
xlConfigPath := filepath.Join(homeDir, ".minio", "xl.json")
return xlConfigPath, nil
}
// internal variable only accessed via get/set methods
var customConfigPath string
// SetXLConfigPath - set custom xl config path
func SetXLConfigPath(configPath string) {
customConfigPath = configPath
}
// SaveConfig save xl config
func SaveConfig(a *Config) *probe.Error {
xlConfigPath, err := getXLConfigPath()
if err != nil {
return err.Trace()
}
qc, err := quick.New(a)
if err != nil {
return err.Trace()
}
if err := qc.Save(xlConfigPath); err != nil {
return err.Trace()
}
return nil
}
// LoadConfig load xl config
func LoadConfig() (*Config, *probe.Error) {
xlConfigPath, err := getXLConfigPath()
if err != nil {
return nil, err.Trace()
}
a := &Config{}
a.Version = "0.0.1"
qc, err := quick.New(a)
if err != nil {
return nil, err.Trace()
}
if err := qc.Load(xlConfigPath); err != nil {
return nil, err.Trace()
}
return qc.Data().(*Config), nil
}

@ -1,159 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import "time"
// ObjectMetadata container for object on xl system
type ObjectMetadata struct {
// version
Version string `json:"version"`
// object metadata
Created time.Time `json:"created"`
Bucket string `json:"bucket"`
Object string `json:"object"`
Size int64 `json:"size"`
// erasure
DataDisks uint8 `json:"sys.erasureK"`
ParityDisks uint8 `json:"sys.erasureM"`
BlockSize int `json:"sys.blockSize"`
ChunkCount int `json:"sys.chunkCount"`
// checksums
MD5Sum string `json:"sys.md5sum"`
SHA512Sum string `json:"sys.sha512sum"`
// metadata
Metadata map[string]string `json:"metadata"`
}
// Metadata container for xl metadata
type Metadata struct {
Version string `json:"version"`
}
// AllBuckets container for all buckets
type AllBuckets struct {
Version string `json:"version"`
Buckets map[string]BucketMetadata `json:"buckets"`
}
// BucketMetadata container for bucket level metadata
type BucketMetadata struct {
Version string `json:"version"`
Name string `json:"name"`
ACL BucketACL `json:"acl"`
Created time.Time `json:"created"`
Multiparts map[string]MultiPartSession `json:"multiparts"`
Metadata map[string]string `json:"metadata"`
BucketObjects map[string]struct{} `json:"objects"`
}
// ListObjectsResults container for list objects response
type ListObjectsResults struct {
Objects map[string]ObjectMetadata `json:"objects"`
CommonPrefixes []string `json:"commonPrefixes"`
IsTruncated bool `json:"isTruncated"`
}
// MultiPartSession multipart session
type MultiPartSession struct {
UploadID string `json:"uploadId"`
Initiated time.Time `json:"initiated"`
Parts map[string]PartMetadata `json:"parts"`
TotalParts int `json:"total-parts"`
}
// PartMetadata - various types of individual part resources
type PartMetadata struct {
PartNumber int
LastModified time.Time
ETag string
Size int64
}
// CompletePart - completed part container
type CompletePart struct {
PartNumber int
ETag string
}
// completedParts is a sortable interface for Part slice
type completedParts []CompletePart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// CompleteMultipartUpload container for completing multipart upload
type CompleteMultipartUpload struct {
Part []CompletePart
}
// ObjectResourcesMetadata - various types of object resources
type ObjectResourcesMetadata struct {
Bucket string
EncodingType string
Key string
UploadID string
StorageClass string
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
IsTruncated bool
Part []*PartMetadata
}
// UploadMetadata container capturing metadata on in progress multipart upload in a given bucket
type UploadMetadata struct {
Key string
UploadID string
StorageClass string
Initiated time.Time
}
// BucketMultipartResourcesMetadata - various types of bucket resources for inprogress multipart uploads
type BucketMultipartResourcesMetadata struct {
KeyMarker string
UploadIDMarker string
NextKeyMarker string
NextUploadIDMarker string
EncodingType string
MaxUploads int
IsTruncated bool
Upload []*UploadMetadata
Prefix string
Delimiter string
CommonPrefixes []string
}
// BucketResourcesMetadata - various types of bucket resources
type BucketResourcesMetadata struct {
Prefix string
Marker string
NextMarker string
Maxkeys int
EncodingType string
Delimiter string
IsTruncated bool
CommonPrefixes []string
}

@ -1,3 +0,0 @@
// +build ignore
package xl

@ -1,73 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
encoding "github.com/minio/minio/pkg/erasure"
"github.com/minio/minio/pkg/probe"
)
// encoder internal struct
type encoder struct {
encoder *encoding.Erasure
k, m uint8
}
// newEncoder - instantiate a new encoder
func newEncoder(k, m uint8) (encoder, *probe.Error) {
e := encoder{}
params, err := encoding.ValidateParams(k, m)
if err != nil {
return encoder{}, probe.NewError(err)
}
e.encoder = encoding.NewErasure(params)
e.k = k
e.m = m
return e, nil
}
// TODO - think again if this is needed
// GetEncodedBlockLen - wrapper around erasure function with the same name
func (e encoder) GetEncodedBlockLen(dataLength int) (int, *probe.Error) {
if dataLength <= 0 {
return 0, probe.NewError(InvalidArgument{})
}
return encoding.GetEncodedBlockLen(dataLength, e.k), nil
}
// Encode - erasure code input bytes
func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) {
if data == nil {
return nil, probe.NewError(InvalidArgument{})
}
encodedData, err := e.encoder.Encode(data)
if err != nil {
return nil, probe.NewError(err)
}
return encodedData, nil
}
// Decode - erasure decode input encoded bytes
func (e encoder) Decode(encodedData [][]byte, dataLength int) ([]byte, *probe.Error) {
decodedData, err := e.encoder.Decode(encodedData, dataLength)
if err != nil {
return nil, probe.NewError(err)
}
return decodedData, nil
}

@ -1,342 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import "fmt"
// SignDoesNotMatch - signature does not match.
type SignDoesNotMatch struct{}
func (e SignDoesNotMatch) Error() string {
return "Signature does not match."
}
// InvalidArgument invalid argument
type InvalidArgument struct{}
func (e InvalidArgument) Error() string {
return "Invalid argument"
}
// UnsupportedFilesystem unsupported filesystem type
type UnsupportedFilesystem struct {
Type string
}
func (e UnsupportedFilesystem) Error() string {
return "Unsupported filesystem: " + e.Type
}
// BucketNotFound bucket does not exist
type BucketNotFound struct {
Bucket string
}
func (e BucketNotFound) Error() string {
return "Bucket not found: " + e.Bucket
}
// ObjectExists object exists
type ObjectExists struct {
Object string
}
func (e ObjectExists) Error() string {
return "Object exists: " + e.Object
}
// ObjectNotFound object does not exist
type ObjectNotFound struct {
Object string
}
func (e ObjectNotFound) Error() string {
return "Object not found: " + e.Object
}
// ObjectCorrupted object found to be corrupted
type ObjectCorrupted struct {
Object string
}
func (e ObjectCorrupted) Error() string {
return "Object found corrupted: " + e.Object
}
// BucketExists bucket exists
type BucketExists struct {
Bucket string
}
func (e BucketExists) Error() string {
return "Bucket exists: " + e.Bucket
}
// CorruptedBackend backend found to be corrupted
type CorruptedBackend struct {
Backend string
}
func (e CorruptedBackend) Error() string {
return "Corrupted backend: " + e.Backend
}
// NotImplemented function not implemented
type NotImplemented struct {
Function string
}
func (e NotImplemented) Error() string {
return "Not implemented: " + e.Function
}
// InvalidDisksArgument invalid number of disks per node
type InvalidDisksArgument struct{}
func (e InvalidDisksArgument) Error() string {
return "Invalid number of disks per node"
}
// BadDigest bad md5sum
type BadDigest struct{}
func (e BadDigest) Error() string {
return "Bad digest"
}
// ParityOverflow parity over flow
type ParityOverflow struct{}
func (e ParityOverflow) Error() string {
return "Parity overflow"
}
// ChecksumMismatch checksum mismatch
type ChecksumMismatch struct{}
func (e ChecksumMismatch) Error() string {
return "Checksum mismatch"
}
// MissingPOSTPolicy missing post policy
type MissingPOSTPolicy struct{}
func (e MissingPOSTPolicy) Error() string {
return "Missing POST policy in multipart form"
}
// InternalError - generic internal error
type InternalError struct {
}
// BackendError - generic disk backend error
type BackendError struct {
Path string
}
// BackendCorrupted - path has corrupted data
type BackendCorrupted BackendError
// APINotImplemented - generic API not implemented error
type APINotImplemented struct {
API string
}
// GenericBucketError - generic bucket error
type GenericBucketError struct {
Bucket string
}
// GenericObjectError - generic object error
type GenericObjectError struct {
Bucket string
Object string
}
// ImplementationError - generic implementation error
type ImplementationError struct {
Bucket string
Object string
Err error
}
// DigestError - Generic Md5 error
type DigestError struct {
Bucket string
Key string
Md5 string
}
/// ACL related errors
// InvalidACL - acl invalid
type InvalidACL struct {
ACL string
}
func (e InvalidACL) Error() string {
return "Requested ACL is " + e.ACL + " invalid"
}
/// Bucket related errors
// BucketNameInvalid - bucketname provided is invalid
type BucketNameInvalid GenericBucketError
// TooManyBuckets - total buckets exceeded
type TooManyBuckets GenericBucketError
/// Object related errors
// EntityTooLarge - object size exceeds maximum limit
type EntityTooLarge struct {
GenericObjectError
Size string
MaxSize string
}
// ObjectNameInvalid - object name provided is invalid
type ObjectNameInvalid GenericObjectError
// InvalidDigest - md5 in request header invalid
type InvalidDigest DigestError
// Return string an error formatted as the given text
func (e ImplementationError) Error() string {
error := ""
if e.Bucket != "" {
error = error + "Bucket: " + e.Bucket + " "
}
if e.Object != "" {
error = error + "Object: " + e.Object + " "
}
error = error + "Error: " + e.Err.Error()
return error
}
// EmbedError - wrapper function for error object
func EmbedError(bucket, object string, err error) ImplementationError {
return ImplementationError{
Bucket: bucket,
Object: object,
Err: err,
}
}
// Return string an error formatted as the given text
func (e InternalError) Error() string {
return "Internal error occured"
}
// Return string an error formatted as the given text
func (e APINotImplemented) Error() string {
return "Api not implemented: " + e.API
}
// Return string an error formatted as the given text
func (e BucketNameInvalid) Error() string {
return "Bucket name invalid: " + e.Bucket
}
// Return string an error formatted as the given text
func (e TooManyBuckets) Error() string {
return "Bucket limit exceeded beyond 100, cannot create bucket: " + e.Bucket
}
// Return string an error formatted as the given text
func (e ObjectNameInvalid) Error() string {
return "Object name invalid: " + e.Bucket + "#" + e.Object
}
// Return string an error formatted as the given text
func (e EntityTooLarge) Error() string {
return e.Bucket + "#" + e.Object + "with " + e.Size + "reached maximum allowed size limit " + e.MaxSize
}
// IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header
type IncompleteBody GenericObjectError
// Return string an error formatted as the given text
func (e IncompleteBody) Error() string {
return e.Bucket + "#" + e.Object + "has incomplete body"
}
// Return string an error formatted as the given text
func (e BackendCorrupted) Error() string {
return "Backend corrupted: " + e.Path
}
// Return string an error formatted as the given text
func (e InvalidDigest) Error() string {
return "Md5 provided " + e.Md5 + " is invalid"
}
// OperationNotPermitted - operation not permitted
type OperationNotPermitted struct {
Op string
Reason string
}
func (e OperationNotPermitted) Error() string {
return "Operation " + e.Op + " not permitted for reason: " + e.Reason
}
// InvalidRange - invalid range
type InvalidRange struct {
Start int64
Length int64
}
func (e InvalidRange) Error() string {
return fmt.Sprintf("Invalid range start:%d length:%d", e.Start, e.Length)
}
/// Multipart related errors
// InvalidUploadID invalid upload id
type InvalidUploadID struct {
UploadID string
}
func (e InvalidUploadID) Error() string {
return "Invalid upload id " + e.UploadID
}
// InvalidPart One or more of the specified parts could not be found
type InvalidPart struct{}
func (e InvalidPart) Error() string {
return "One or more of the specified parts could not be found"
}
// InvalidPartOrder parts are not ordered as Requested
type InvalidPartOrder struct {
UploadID string
}
func (e InvalidPartOrder) Error() string {
return "Invalid part order sent for " + e.UploadID
}
// MalformedXML invalid xml format
type MalformedXML struct{}
func (e MalformedXML) Error() string {
return "Malformed XML"
}

@ -1,71 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"encoding/json"
"fmt"
"path/filepath"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/xl/block"
)
// healBuckets heal bucket slices
func (xl API) healBuckets() *probe.Error {
if err := xl.listXLBuckets(); err != nil {
return err.Trace()
}
bucketMetadata, err := xl.getXLBucketMetadata()
if err != nil {
return err.Trace()
}
disks := make(map[int]block.Block)
for _, node := range xl.nodes {
nDisks, err := node.ListDisks()
if err != nil {
return err.Trace()
}
for k, v := range nDisks {
disks[k] = v
}
}
for order, disk := range disks {
if disk.IsUsable() {
disk.MakeDir(xl.config.XLName)
bucketMetadataWriter, err := disk.CreateFile(filepath.Join(xl.config.XLName, bucketMetadataConfig))
if err != nil {
return err.Trace()
}
defer bucketMetadataWriter.Close()
jenc := json.NewEncoder(bucketMetadataWriter)
if err := jenc.Encode(bucketMetadata); err != nil {
return probe.NewError(err)
}
for bucket := range bucketMetadata.Buckets {
bucketSlice := fmt.Sprintf("%s$0$%d", bucket, order) // TODO handle node slices
err := disk.MakeDir(filepath.Join(xl.config.XLName, bucketSlice))
if err != nil {
return err.Trace()
}
}
}
}
return nil
}

@ -1,74 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"io"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/signature4"
)
// Collection of XL specification interfaces
// Interface is a collection of cloud storage and management interface
type Interface interface {
CloudStorage
Management
}
// CloudStorage is a xl cloud storage interface
type CloudStorage interface {
// Storage service operations
GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error)
SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error
ListBuckets() ([]BucketMetadata, *probe.Error)
MakeBucket(bucket string, ACL string, location io.Reader, signature *signature4.Sign) *probe.Error
// Bucket operations
ListObjects(string, BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
// Object operations
GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error)
GetObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error)
// bucket, object, expectedMD5Sum, size, reader, metadata, signature
CreateObject(string, string, string, int64, io.Reader, map[string]string, *signature4.Sign) (ObjectMetadata, *probe.Error)
Multipart
}
// Multipart API
type Multipart interface {
NewMultipartUpload(bucket, key, contentType string) (string, *probe.Error)
AbortMultipartUpload(bucket, key, uploadID string) *probe.Error
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *signature4.Sign) (string, *probe.Error)
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signature4.Sign) (ObjectMetadata, *probe.Error)
ListMultipartUploads(string, BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
ListObjectParts(string, string, ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
}
// Management is a xl management system interface
type Management interface {
Heal() *probe.Error
Rebalance() *probe.Error
Info() (map[string][]string, *probe.Error)
AttachNode(hostname string, disks []string) *probe.Error
DetachNode(hostname string) *probe.Error
}

@ -1,83 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/xl/block"
)
// Info - return info about xl configuration
func (xl API) Info() (nodeDiskMap map[string][]string, err *probe.Error) {
nodeDiskMap = make(map[string][]string)
for nodeName, n := range xl.nodes {
disks, err := n.ListDisks()
if err != nil {
return nil, err.Trace()
}
diskList := make([]string, len(disks))
for diskOrder, disk := range disks {
diskList[diskOrder] = disk.GetPath()
}
nodeDiskMap[nodeName] = diskList
}
return nodeDiskMap, nil
}
// AttachNode - attach node
func (xl API) AttachNode(hostname string, disks []string) *probe.Error {
if hostname == "" || len(disks) == 0 {
return probe.NewError(InvalidArgument{})
}
n, err := newNode(hostname)
if err != nil {
return err.Trace()
}
xl.nodes[hostname] = n
for i, d := range disks {
newDisk, err := block.New(d)
if err != nil {
continue
}
if err := newDisk.MakeDir(xl.config.XLName); err != nil {
return err.Trace()
}
if err := n.AttachDisk(newDisk, i); err != nil {
return err.Trace()
}
}
return nil
}
// DetachNode - detach node
func (xl API) DetachNode(hostname string) *probe.Error {
delete(xl.nodes, hostname)
return nil
}
// Rebalance - rebalance an existing xl with new disks and nodes
func (xl API) Rebalance() *probe.Error {
return probe.NewError(APINotImplemented{API: "management.Rebalance"})
}
// Heal - heal your xls
func (xl API) Heal() *probe.Error {
// TODO handle data heal
return xl.healBuckets()
}

@ -1,516 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"bytes"
"crypto/md5"
"crypto/sha512"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
"math/rand"
"runtime/debug"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/signature4"
"github.com/minio/minio/pkg/xl/cache/data"
)
/// V2 API functions
// NewMultipartUpload - initiate a new multipart session
func (xl API) NewMultipartUpload(bucket, key, contentType string) (string, *probe.Error) {
xl.lock.Lock()
defer xl.lock.Unlock()
if !IsValidBucket(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return "", probe.NewError(ObjectNameInvalid{Object: key})
}
// if len(xl.config.NodeDiskMap) > 0 {
// return xl.newMultipartUpload(bucket, key, contentType)
// }
if !xl.storedBuckets.Exists(bucket) {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return "", probe.NewError(ObjectExists{Object: key})
}
id := []byte(strconv.Itoa(rand.Int()) + bucket + key + time.Now().UTC().String())
uploadIDSum := sha512.Sum512(id)
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
storedBucket.multiPartSession[key] = MultiPartSession{
UploadID: uploadID,
Initiated: time.Now().UTC(),
TotalParts: 0,
}
storedBucket.partMetadata[key] = make(map[int]PartMetadata)
multiPartCache := data.NewCache(0)
multiPartCache.OnEvicted = xl.evictedPart
xl.multiPartObjects[uploadID] = multiPartCache
xl.storedBuckets.Set(bucket, storedBucket)
return uploadID, nil
}
// AbortMultipartUpload - abort an incomplete multipart session
func (xl API) AbortMultipartUpload(bucket, key, uploadID string) *probe.Error {
xl.lock.Lock()
defer xl.lock.Unlock()
if !IsValidBucket(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return probe.NewError(ObjectNameInvalid{Object: key})
}
// TODO: multipart support for xl is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
// if len(xl.config.NodeDiskMap) > 0 {
// return xl.abortMultipartUpload(bucket, key, uploadID)
// }
if !xl.storedBuckets.Exists(bucket) {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
if storedBucket.multiPartSession[key].UploadID != uploadID {
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
xl.cleanupMultipartSession(bucket, key, uploadID)
return nil
}
// CreateObjectPart - create a part in a multipart session
func (xl API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signature4.Sign) (string, *probe.Error) {
xl.lock.Lock()
etag, err := xl.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data, signature)
xl.lock.Unlock()
// possible free
debug.FreeOSMemory()
return etag, err.Trace()
}
// createObject - internal wrapper function called by CreateObjectPart
func (xl API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signature4.Sign) (string, *probe.Error) {
if !IsValidBucket(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return "", probe.NewError(ObjectNameInvalid{Object: key})
}
// TODO: multipart support for xl is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
/*
if len(xl.config.NodeDiskMap) > 0 {
metadata := make(map[string]string)
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
metadata["contentType"] = contentType
if strings.TrimSpace(expectedMD5Sum) != "" {
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
partMetadata, err := xl.putObjectPart(bucket, key, expectedMD5Sum, uploadID, partID, data, size, metadata, signature)
if err != nil {
return "", err.Trace()
}
return partMetadata.ETag, nil
}
*/
if !xl.storedBuckets.Exists(bucket) {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
strBucket := xl.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
if strBucket.multiPartSession[key].UploadID != uploadID {
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
}
// get object key
parts := strBucket.partMetadata[key]
if _, ok := parts[partID]; ok {
return parts[partID].ETag, nil
}
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
if strings.TrimSpace(expectedMD5Sum) != "" {
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
// calculate md5
hash := md5.New()
sha256hash := sha256.New()
var totalLength int64
var err error
for err == nil {
var length int
byteBuffer := make([]byte, 1024*1024)
length, err = data.Read(byteBuffer) // do not read error return error here, we will handle this error later
if length != 0 {
hash.Write(byteBuffer[0:length])
sha256hash.Write(byteBuffer[0:length])
ok := xl.multiPartObjects[uploadID].Append(partID, byteBuffer[0:length])
if !ok {
return "", probe.NewError(InternalError{})
}
totalLength += int64(length)
go debug.FreeOSMemory()
}
}
if totalLength != size {
xl.multiPartObjects[uploadID].Delete(partID)
return "", probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
}
if err != io.EOF {
return "", probe.NewError(err)
}
md5SumBytes := hash.Sum(nil)
md5Sum := hex.EncodeToString(md5SumBytes)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
return "", err.Trace()
}
}
if signature != nil {
{
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil)))
if err != nil {
return "", err.Trace()
}
if !ok {
return "", probe.NewError(SignDoesNotMatch{})
}
}
}
newPart := PartMetadata{
PartNumber: partID,
LastModified: time.Now().UTC(),
ETag: md5Sum,
Size: totalLength,
}
parts[partID] = newPart
strBucket.partMetadata[key] = parts
multiPartSession := strBucket.multiPartSession[key]
multiPartSession.TotalParts++
strBucket.multiPartSession[key] = multiPartSession
xl.storedBuckets.Set(bucket, strBucket)
return md5Sum, nil
}
// cleanupMultipartSession invoked during an abort or complete multipart session to cleanup session from memory
func (xl API) cleanupMultipartSession(bucket, key, uploadID string) {
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
for i := 1; i <= storedBucket.multiPartSession[key].TotalParts; i++ {
xl.multiPartObjects[uploadID].Delete(i)
}
delete(storedBucket.multiPartSession, key)
delete(storedBucket.partMetadata, key)
xl.storedBuckets.Set(bucket, storedBucket)
}
func (xl API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string, fullObjectWriter *io.PipeWriter) {
for _, part := range parts.Part {
recvMD5 := part.ETag
object, ok := xl.multiPartObjects[uploadID].Get(part.PartNumber)
if ok == false {
fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidPart{})))
return
}
calcMD5Bytes := md5.Sum(object)
// complete multi part request header md5sum per part is hex encoded
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
if err != nil {
fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidDigest{Md5: recvMD5})))
return
}
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(BadDigest{})))
return
}
if _, err := io.Copy(fullObjectWriter, bytes.NewReader(object)); err != nil {
fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
object = nil
}
fullObjectWriter.Close()
return
}
// CompleteMultipartUpload - complete a multipart upload and persist the data
func (xl API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signature4.Sign) (ObjectMetadata, *probe.Error) {
xl.lock.Lock()
defer xl.lock.Unlock()
size := int64(xl.multiPartObjects[uploadID].Stats().Bytes)
fullObjectReader, err := xl.completeMultipartUploadV2(bucket, key, uploadID, data, signature)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
objectMetadata, err := xl.createObject(bucket, key, "", "", size, fullObjectReader, nil)
if err != nil {
// No need to call internal cleanup functions here, caller should call AbortMultipartUpload()
// which would in-turn cleanup properly in accordance with S3 Spec
return ObjectMetadata{}, err.Trace()
}
xl.cleanupMultipartSession(bucket, key, uploadID)
return objectMetadata, nil
}
func (xl API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *signature4.Sign) (io.Reader, *probe.Error) {
if !IsValidBucket(bucket) {
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return nil, probe.NewError(ObjectNameInvalid{Object: key})
}
// TODO: multipart support for xl is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
// if len(xl.config.NodeDiskMap) > 0 {
// xl.lock.Unlock()
// return xl.completeMultipartUpload(bucket, key, uploadID, data, signature)
// }
if !xl.storedBuckets.Exists(bucket) {
return nil, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
if storedBucket.multiPartSession[key].UploadID != uploadID {
return nil, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
partBytes, err := ioutil.ReadAll(data)
if err != nil {
return nil, probe.NewError(err)
}
if signature != nil {
partHashBytes := sha256.Sum256(partBytes)
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(partHashBytes[:]))
if err != nil {
return nil, err.Trace()
}
if !ok {
return nil, probe.NewError(SignDoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
if err := xml.Unmarshal(partBytes, parts); err != nil {
return nil, probe.NewError(MalformedXML{})
}
if !sort.IsSorted(completedParts(parts.Part)) {
return nil, probe.NewError(InvalidPartOrder{})
}
fullObjectReader, fullObjectWriter := io.Pipe()
go xl.mergeMultipart(parts, uploadID, fullObjectWriter)
return fullObjectReader, nil
}
// byKey is a sortable interface for UploadMetadata slice
type byKey []*UploadMetadata
func (a byKey) Len() int { return len(a) }
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
// ListMultipartUploads - list incomplete multipart sessions for a given bucket
func (xl API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
// TODO handle delimiter, low priority
xl.lock.Lock()
defer xl.lock.Unlock()
if !IsValidBucket(bucket) {
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
// TODO: multipart support for xl is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
// if len(xl.config.NodeDiskMap) > 0 {
// return xl.listMultipartUploads(bucket, resources)
// }
if !xl.storedBuckets.Exists(bucket) {
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
var uploads []*UploadMetadata
for key, session := range storedBucket.multiPartSession {
if strings.HasPrefix(key, resources.Prefix) {
if len(uploads) > resources.MaxUploads {
sort.Sort(byKey(uploads))
resources.Upload = uploads
resources.NextKeyMarker = key
resources.NextUploadIDMarker = session.UploadID
resources.IsTruncated = true
return resources, nil
}
// uploadIDMarker is ignored if KeyMarker is empty
switch {
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
if key > resources.KeyMarker {
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
if session.UploadID > resources.UploadIDMarker {
if key >= resources.KeyMarker {
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
default:
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
}
sort.Sort(byKey(uploads))
resources.Upload = uploads
return resources, nil
}
// partNumber is a sortable interface for Part slice
type partNumber []*PartMetadata
func (a partNumber) Len() int { return len(a) }
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// ListObjectParts - list parts from incomplete multipart session for a given object
func (xl API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
// Verify upload id
xl.lock.Lock()
defer xl.lock.Unlock()
if !IsValidBucket(bucket) {
return ObjectResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return ObjectResourcesMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
// TODO: multipart support for xl is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
// if len(xl.config.NodeDiskMap) > 0 {
// return xl.listObjectParts(bucket, key, resources)
// }
if !xl.storedBuckets.Exists(bucket) {
return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
if _, ok := storedBucket.multiPartSession[key]; ok == false {
return ObjectResourcesMetadata{}, probe.NewError(ObjectNotFound{Object: key})
}
if storedBucket.multiPartSession[key].UploadID != resources.UploadID {
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
storedParts := storedBucket.partMetadata[key]
objectResourcesMetadata := resources
objectResourcesMetadata.Bucket = bucket
objectResourcesMetadata.Key = key
var parts []*PartMetadata
var startPartNumber int
switch {
case objectResourcesMetadata.PartNumberMarker == 0:
startPartNumber = 1
default:
startPartNumber = objectResourcesMetadata.PartNumberMarker
}
for i := startPartNumber; i <= storedBucket.multiPartSession[key].TotalParts; i++ {
if len(parts) > objectResourcesMetadata.MaxParts {
sort.Sort(partNumber(parts))
objectResourcesMetadata.IsTruncated = true
objectResourcesMetadata.Part = parts
objectResourcesMetadata.NextPartNumberMarker = i
return objectResourcesMetadata, nil
}
part, ok := storedParts[i]
if !ok {
return ObjectResourcesMetadata{}, probe.NewError(InvalidPart{})
}
parts = append(parts, &part)
}
sort.Sort(partNumber(parts))
objectResourcesMetadata.Part = parts
return objectResourcesMetadata, nil
}
// evictedPart - call back function called by caching module during individual cache evictions
func (xl API) evictedPart(a ...interface{}) {
// loop through all buckets
buckets := xl.storedBuckets.GetAll()
for bucketName, bucket := range buckets {
b := bucket.(storedBucket)
xl.storedBuckets.Set(bucketName, b)
}
debug.FreeOSMemory()
}

@ -1,78 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/xl/block"
)
// node struct internal
type node struct {
hostname string
disks map[int]block.Block
}
// newNode - instantiates a new node
func newNode(hostname string) (node, *probe.Error) {
if hostname == "" {
return node{}, probe.NewError(InvalidArgument{})
}
disks := make(map[int]block.Block)
n := node{
hostname: hostname,
disks: disks,
}
return n, nil
}
// GetHostname - return hostname
func (n node) GetHostname() string {
return n.hostname
}
// ListDisks - return number of disks
func (n node) ListDisks() (map[int]block.Block, *probe.Error) {
return n.disks, nil
}
// AttachDisk - attach a disk
func (n node) AttachDisk(disk block.Block, diskOrder int) *probe.Error {
if diskOrder < 0 {
return probe.NewError(InvalidArgument{})
}
n.disks[diskOrder] = disk
return nil
}
// DetachDisk - detach a disk
func (n node) DetachDisk(diskOrder int) *probe.Error {
delete(n.disks, diskOrder)
return nil
}
// SaveConfig - save node configuration
func (n node) SaveConfig() *probe.Error {
return probe.NewError(NotImplemented{Function: "SaveConfig"})
}
// LoadConfig - load node configuration from saved configs
func (n node) LoadConfig() *probe.Error {
return probe.NewError(NotImplemented{Function: "LoadConfig"})
}

@ -1,55 +0,0 @@
##### Users Collection
```js
"minio": {
"version": 1,
"users": [{
"secretAccessKey": String,
"accessKeyId": String,
"status": String // enum: ok, disabled, deleted
}],
"hosts": [{
"address": String,
"uuid": String,
"status": String, // enum: ok, disabled, deleted, busy, offline.
"disks": [{
"disk": String,
"uuid": String,
"status": String // ok, offline, disabled, busy.
}]
}]
}
```
##### Bucket Collection
```js
"buckets": {
"bucket": String, // index
"deleted": Boolean,
"permissions": String
}
```
##### Object Collection
```js
"objects": {
"key": String, // index
"createdAt": Date,
"hosts[16]": [{
"host": String,
"disk": String,
}],
"deleted": Boolean
}
```
```js
"meta": {
"key": String, // index
"type": String // content-type
// type speific meta
}
```

@ -1,683 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/signature4"
"github.com/minio/minio/pkg/xl/block"
)
// config files used inside XL
const (
// bucket, object metadata
bucketMetadataConfig = "bucketMetadata.json"
objectMetadataConfig = "objectMetadata.json"
// versions
objectMetadataVersion = "1.0.0"
bucketMetadataVersion = "1.0.0"
)
/// v1 API functions
// makeBucket - make a new bucket
func (xl API) makeBucket(bucket string, acl BucketACL) *probe.Error {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return probe.NewError(InvalidArgument{})
}
return xl.makeXLBucket(bucket, acl.String())
}
// getBucketMetadata - get bucket metadata
func (xl API) getBucketMetadata(bucketName string) (BucketMetadata, *probe.Error) {
if err := xl.listXLBuckets(); err != nil {
return BucketMetadata{}, err.Trace()
}
if _, ok := xl.buckets[bucketName]; !ok {
return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucketName})
}
metadata, err := xl.getXLBucketMetadata()
if err != nil {
return BucketMetadata{}, err.Trace()
}
return metadata.Buckets[bucketName], nil
}
// setBucketMetadata - set bucket metadata
func (xl API) setBucketMetadata(bucketName string, bucketMetadata map[string]string) *probe.Error {
if err := xl.listXLBuckets(); err != nil {
return err.Trace()
}
metadata, err := xl.getXLBucketMetadata()
if err != nil {
return err.Trace()
}
oldBucketMetadata := metadata.Buckets[bucketName]
acl, ok := bucketMetadata["acl"]
if !ok {
return probe.NewError(InvalidArgument{})
}
oldBucketMetadata.ACL = BucketACL(acl)
metadata.Buckets[bucketName] = oldBucketMetadata
return xl.setXLBucketMetadata(metadata)
}
// listBuckets - return list of buckets
func (xl API) listBuckets() (map[string]BucketMetadata, *probe.Error) {
if err := xl.listXLBuckets(); err != nil {
return nil, err.Trace()
}
metadata, err := xl.getXLBucketMetadata()
if err != nil {
// intentionally left out the error when XL is empty
// but we need to revisit this area in future - since we need
// to figure out between acceptable and unacceptable errors
return make(map[string]BucketMetadata), nil
}
if metadata == nil {
return make(map[string]BucketMetadata), nil
}
return metadata.Buckets, nil
}
// listObjects - return list of objects
func (xl API) listObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) {
if err := xl.listXLBuckets(); err != nil {
return ListObjectsResults{}, err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return ListObjectsResults{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
listObjects, err := xl.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys)
if err != nil {
return ListObjectsResults{}, err.Trace()
}
return listObjects, nil
}
// putObject - put object
func (xl API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *signature4.Sign) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if err := xl.listXLBuckets(); err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
bucketMeta, err := xl.getXLBucketMetadata()
if err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
return ObjectMetadata{}, probe.NewError(ObjectExists{Object: object})
}
objMetadata, err := xl.buckets[bucket].WriteObject(object, reader, size, expectedMD5Sum, metadata, signature)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
bucketMeta.Buckets[bucket].BucketObjects[object] = struct{}{}
if err := xl.setXLBucketMetadata(bucketMeta); err != nil {
return ObjectMetadata{}, err.Trace()
}
return objMetadata, nil
}
// putObject - put object
func (xl API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *signature4.Sign) (PartMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return PartMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return PartMetadata{}, probe.NewError(InvalidArgument{})
}
if err := xl.listXLBuckets(); err != nil {
return PartMetadata{}, err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return PartMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
bucketMeta, err := xl.getXLBucketMetadata()
if err != nil {
return PartMetadata{}, err.Trace()
}
if _, ok := bucketMeta.Buckets[bucket].Multiparts[object]; !ok {
return PartMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
return PartMetadata{}, probe.NewError(ObjectExists{Object: object})
}
objectPart := object + "/" + "multipart" + "/" + strconv.Itoa(partID)
objmetadata, err := xl.buckets[bucket].WriteObject(objectPart, reader, size, expectedMD5Sum, metadata, signature)
if err != nil {
return PartMetadata{}, err.Trace()
}
partMetadata := PartMetadata{
PartNumber: partID,
LastModified: objmetadata.Created,
ETag: objmetadata.MD5Sum,
Size: objmetadata.Size,
}
multipartSession := bucketMeta.Buckets[bucket].Multiparts[object]
multipartSession.Parts[strconv.Itoa(partID)] = partMetadata
bucketMeta.Buckets[bucket].Multiparts[object] = multipartSession
if err := xl.setXLBucketMetadata(bucketMeta); err != nil {
return PartMetadata{}, err.Trace()
}
return partMetadata, nil
}
// getObject - get object
func (xl API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return nil, 0, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return nil, 0, probe.NewError(InvalidArgument{})
}
if err := xl.listXLBuckets(); err != nil {
return nil, 0, err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return nil, 0, probe.NewError(BucketNotFound{Bucket: bucket})
}
return xl.buckets[bucket].ReadObject(object)
}
// getObjectMetadata - get object metadata
func (xl API) getObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) {
if err := xl.listXLBuckets(); err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
bucketMeta, err := xl.getXLBucketMetadata()
if err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; !ok {
return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: object})
}
objectMetadata, err := xl.buckets[bucket].GetObjectMetadata(object)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
return objectMetadata, nil
}
// newMultipartUpload - new multipart upload request
func (xl API) newMultipartUpload(bucket, object, contentType string) (string, *probe.Error) {
if err := xl.listXLBuckets(); err != nil {
return "", err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
allbuckets, err := xl.getXLBucketMetadata()
if err != nil {
return "", err.Trace()
}
bucketMetadata := allbuckets.Buckets[bucket]
multiparts := make(map[string]MultiPartSession)
if len(bucketMetadata.Multiparts) > 0 {
multiparts = bucketMetadata.Multiparts
}
id := []byte(strconv.Itoa(rand.Int()) + bucket + object + time.Now().String())
uploadIDSum := sha512.Sum512(id)
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
multipartSession := MultiPartSession{
UploadID: uploadID,
Initiated: time.Now().UTC(),
Parts: make(map[string]PartMetadata),
TotalParts: 0,
}
multiparts[object] = multipartSession
bucketMetadata.Multiparts = multiparts
allbuckets.Buckets[bucket] = bucketMetadata
if err := xl.setXLBucketMetadata(allbuckets); err != nil {
return "", err.Trace()
}
return uploadID, nil
}
// listObjectParts list all object parts
func (xl API) listObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectResourcesMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return ObjectResourcesMetadata{}, probe.NewError(InvalidArgument{})
}
if err := xl.listXLBuckets(); err != nil {
return ObjectResourcesMetadata{}, err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
allBuckets, err := xl.getXLBucketMetadata()
if err != nil {
return ObjectResourcesMetadata{}, err.Trace()
}
bucketMetadata := allBuckets.Buckets[bucket]
if _, ok := bucketMetadata.Multiparts[object]; !ok {
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
if bucketMetadata.Multiparts[object].UploadID != resources.UploadID {
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
objectResourcesMetadata := resources
objectResourcesMetadata.Bucket = bucket
objectResourcesMetadata.Key = object
var parts []*PartMetadata
var startPartNumber int
switch {
case objectResourcesMetadata.PartNumberMarker == 0:
startPartNumber = 1
default:
startPartNumber = objectResourcesMetadata.PartNumberMarker
}
for i := startPartNumber; i <= bucketMetadata.Multiparts[object].TotalParts; i++ {
if len(parts) > objectResourcesMetadata.MaxParts {
sort.Sort(partNumber(parts))
objectResourcesMetadata.IsTruncated = true
objectResourcesMetadata.Part = parts
objectResourcesMetadata.NextPartNumberMarker = i
return objectResourcesMetadata, nil
}
part, ok := bucketMetadata.Multiparts[object].Parts[strconv.Itoa(i)]
if !ok {
return ObjectResourcesMetadata{}, probe.NewError(InvalidPart{})
}
parts = append(parts, &part)
}
sort.Sort(partNumber(parts))
objectResourcesMetadata.Part = parts
return objectResourcesMetadata, nil
}
// completeMultipartUpload complete an incomplete multipart upload
func (xl API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signature4.Sign) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if err := xl.listXLBuckets(); err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
allBuckets, err := xl.getXLBucketMetadata()
if err != nil {
return ObjectMetadata{}, err.Trace()
}
bucketMetadata := allBuckets.Buckets[bucket]
if _, ok := bucketMetadata.Multiparts[object]; !ok {
return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if bucketMetadata.Multiparts[object].UploadID != uploadID {
return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
var partBytes []byte
{
var err error
partBytes, err = ioutil.ReadAll(data)
if err != nil {
return ObjectMetadata{}, probe.NewError(err)
}
}
if signature != nil {
partHashBytes := sha256.Sum256(partBytes)
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(partHashBytes[:]))
if err != nil {
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
if err := xml.Unmarshal(partBytes, parts); err != nil {
return ObjectMetadata{}, probe.NewError(MalformedXML{})
}
if !sort.IsSorted(completedParts(parts.Part)) {
return ObjectMetadata{}, probe.NewError(InvalidPartOrder{})
}
for _, part := range parts.Part {
if strings.Trim(part.ETag, "\"") != bucketMetadata.Multiparts[object].Parts[strconv.Itoa(part.PartNumber)].ETag {
return ObjectMetadata{}, probe.NewError(InvalidPart{})
}
}
var finalETagBytes []byte
var finalSize int64
totalParts := strconv.Itoa(bucketMetadata.Multiparts[object].TotalParts)
for _, part := range bucketMetadata.Multiparts[object].Parts {
partETagBytes, err := hex.DecodeString(part.ETag)
if err != nil {
return ObjectMetadata{}, probe.NewError(err)
}
finalETagBytes = append(finalETagBytes, partETagBytes...)
finalSize += part.Size
}
finalETag := hex.EncodeToString(finalETagBytes)
objMetadata := ObjectMetadata{}
objMetadata.MD5Sum = finalETag + "-" + totalParts
objMetadata.Object = object
objMetadata.Bucket = bucket
objMetadata.Size = finalSize
objMetadata.Created = bucketMetadata.Multiparts[object].Parts[totalParts].LastModified
return objMetadata, nil
}
// listMultipartUploads list all multipart uploads
func (xl API) listMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
if err := xl.listXLBuckets(); err != nil {
return BucketMultipartResourcesMetadata{}, err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
allbuckets, err := xl.getXLBucketMetadata()
if err != nil {
return BucketMultipartResourcesMetadata{}, err.Trace()
}
bucketMetadata := allbuckets.Buckets[bucket]
var uploads []*UploadMetadata
for key, session := range bucketMetadata.Multiparts {
if strings.HasPrefix(key, resources.Prefix) {
if len(uploads) > resources.MaxUploads {
sort.Sort(byKey(uploads))
resources.Upload = uploads
resources.NextKeyMarker = key
resources.NextUploadIDMarker = session.UploadID
resources.IsTruncated = true
return resources, nil
}
// uploadIDMarker is ignored if KeyMarker is empty
switch {
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
if key > resources.KeyMarker {
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
if session.UploadID > resources.UploadIDMarker {
if key >= resources.KeyMarker {
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
default:
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
}
sort.Sort(byKey(uploads))
resources.Upload = uploads
return resources, nil
}
// abortMultipartUpload - abort a incomplete multipart upload
func (xl API) abortMultipartUpload(bucket, object, uploadID string) *probe.Error {
if err := xl.listXLBuckets(); err != nil {
return err.Trace()
}
if _, ok := xl.buckets[bucket]; !ok {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
allbuckets, err := xl.getXLBucketMetadata()
if err != nil {
return err.Trace()
}
bucketMetadata := allbuckets.Buckets[bucket]
if _, ok := bucketMetadata.Multiparts[object]; !ok {
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if bucketMetadata.Multiparts[object].UploadID != uploadID {
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
delete(bucketMetadata.Multiparts, object)
allbuckets.Buckets[bucket] = bucketMetadata
if err := xl.setXLBucketMetadata(allbuckets); err != nil {
return err.Trace()
}
return nil
}
//// internal functions
// getBucketMetadataWriters -
func (xl API) getBucketMetadataWriters() ([]io.WriteCloser, *probe.Error) {
var writers []io.WriteCloser
for _, node := range xl.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err.Trace()
}
writers = make([]io.WriteCloser, len(disks))
for order, disk := range disks {
bucketMetaDataWriter, err := disk.CreateFile(filepath.Join(xl.config.XLName, bucketMetadataConfig))
if err != nil {
return nil, err.Trace()
}
writers[order] = bucketMetaDataWriter
}
}
return writers, nil
}
// getBucketMetadataReaders - readers are returned in map rather than slice
func (xl API) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) {
readers := make(map[int]io.ReadCloser)
disks := make(map[int]block.Block)
var err *probe.Error
for _, node := range xl.nodes {
nDisks := make(map[int]block.Block)
nDisks, err = node.ListDisks()
if err != nil {
return nil, err.Trace()
}
for k, v := range nDisks {
disks[k] = v
}
}
var bucketMetaDataReader io.ReadCloser
for order, disk := range disks {
bucketMetaDataReader, err = disk.Open(filepath.Join(xl.config.XLName, bucketMetadataConfig))
if err != nil {
continue
}
readers[order] = bucketMetaDataReader
}
if err != nil {
return nil, err.Trace()
}
return readers, nil
}
// setXLBucketMetadata -
func (xl API) setXLBucketMetadata(metadata *AllBuckets) *probe.Error {
writers, err := xl.getBucketMetadataWriters()
if err != nil {
return err.Trace()
}
for _, writer := range writers {
jenc := json.NewEncoder(writer)
if err := jenc.Encode(metadata); err != nil {
CleanupWritersOnError(writers)
return probe.NewError(err)
}
}
for _, writer := range writers {
writer.Close()
}
return nil
}
// getXLBucketMetadata -
func (xl API) getXLBucketMetadata() (*AllBuckets, *probe.Error) {
metadata := &AllBuckets{}
readers, err := xl.getBucketMetadataReaders()
if err != nil {
return nil, err.Trace()
}
for _, reader := range readers {
defer reader.Close()
}
{
var err error
for _, reader := range readers {
jenc := json.NewDecoder(reader)
if err = jenc.Decode(metadata); err == nil {
return metadata, nil
}
}
return nil, probe.NewError(err)
}
}
// makeXLBucket -
func (xl API) makeXLBucket(bucketName, acl string) *probe.Error {
if err := xl.listXLBuckets(); err != nil {
return err.Trace()
}
if _, ok := xl.buckets[bucketName]; ok {
return probe.NewError(BucketExists{Bucket: bucketName})
}
bkt, bucketMetadata, err := newBucket(bucketName, acl, xl.config.XLName, xl.nodes)
if err != nil {
return err.Trace()
}
nodeNumber := 0
xl.buckets[bucketName] = bkt
for _, node := range xl.nodes {
disks := make(map[int]block.Block)
disks, err = node.ListDisks()
if err != nil {
return err.Trace()
}
for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order)
err := disk.MakeDir(filepath.Join(xl.config.XLName, bucketSlice))
if err != nil {
return err.Trace()
}
}
nodeNumber = nodeNumber + 1
}
var metadata *AllBuckets
metadata, err = xl.getXLBucketMetadata()
if err != nil {
if os.IsNotExist(err.ToGoError()) {
metadata = new(AllBuckets)
metadata.Buckets = make(map[string]BucketMetadata)
metadata.Buckets[bucketName] = bucketMetadata
err = xl.setXLBucketMetadata(metadata)
if err != nil {
return err.Trace()
}
return nil
}
return err.Trace()
}
metadata.Buckets[bucketName] = bucketMetadata
err = xl.setXLBucketMetadata(metadata)
if err != nil {
return err.Trace()
}
return nil
}
// listXLBuckets -
func (xl API) listXLBuckets() *probe.Error {
var disks map[int]block.Block
var err *probe.Error
for _, node := range xl.nodes {
disks, err = node.ListDisks()
if err != nil {
return err.Trace()
}
}
var dirs []os.FileInfo
for _, disk := range disks {
dirs, err = disk.ListDir(xl.config.XLName)
if err == nil {
break
}
}
// if all disks are missing then return error
if err != nil {
return err.Trace()
}
for _, dir := range dirs {
splitDir := strings.Split(dir.Name(), "$")
if len(splitDir) < 3 {
return probe.NewError(CorruptedBackend{Backend: dir.Name()})
}
bucketName := splitDir[0]
// we dont need this once we cache from makeXLBucket()
bkt, _, err := newBucket(bucketName, "private", xl.config.XLName, xl.nodes)
if err != nil {
return err.Trace()
}
xl.buckets[bucketName] = bkt
}
return nil
}

@ -1,292 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedd.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
. "gopkg.in/check.v1"
)
func TestXL(t *testing.T) { TestingT(t) }
type MyXLSuite struct {
root string
}
var _ = Suite(&MyXLSuite{})
// create a dummy TestNodeDiskMap
func createTestNodeDiskMap(p string) map[string][]string {
nodes := make(map[string][]string)
nodes["localhost"] = make([]string, 16)
for i := 0; i < len(nodes["localhost"]); i++ {
diskPath := filepath.Join(p, strconv.Itoa(i))
if _, err := os.Stat(diskPath); err != nil {
if os.IsNotExist(err) {
os.MkdirAll(diskPath, 0700)
}
}
nodes["localhost"][i] = diskPath
}
return nodes
}
var dd Interface
func (s *MyXLSuite) SetUpSuite(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "xl-")
c.Assert(err, IsNil)
s.root = root
conf := new(Config)
conf.Version = "0.0.1"
conf.XLName = "test"
conf.NodeDiskMap = createTestNodeDiskMap(root)
conf.MaxSize = 100000
SetXLConfigPath(filepath.Join(root, "xl.json"))
perr := SaveConfig(conf)
c.Assert(perr, IsNil)
dd, perr = New()
c.Assert(perr, IsNil)
// testing empty xl
buckets, perr := dd.ListBuckets()
c.Assert(perr, IsNil)
c.Assert(len(buckets), Equals, 0)
}
func (s *MyXLSuite) TearDownSuite(c *C) {
os.RemoveAll(s.root)
}
// test make bucket without name
func (s *MyXLSuite) TestBucketWithoutNameFails(c *C) {
// fail to create new bucket without a name
err := dd.MakeBucket("", "private", nil, nil)
c.Assert(err, Not(IsNil))
err = dd.MakeBucket(" ", "private", nil, nil)
c.Assert(err, Not(IsNil))
}
// test empty bucket
func (s *MyXLSuite) TestEmptyBucket(c *C) {
c.Assert(dd.MakeBucket("foo1", "private", nil, nil), IsNil)
// check if bucket is empty
var resources BucketResourcesMetadata
resources.Maxkeys = 1
objectsMetadata, resources, err := dd.ListObjects("foo1", resources)
c.Assert(err, IsNil)
c.Assert(len(objectsMetadata), Equals, 0)
c.Assert(resources.CommonPrefixes, DeepEquals, []string{})
c.Assert(resources.IsTruncated, Equals, false)
}
// test bucket list
func (s *MyXLSuite) TestMakeBucketAndList(c *C) {
// create bucket
err := dd.MakeBucket("foo2", "private", nil, nil)
c.Assert(err, IsNil)
// check bucket exists
buckets, err := dd.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 5)
c.Assert(buckets[0].ACL, Equals, BucketACL("private"))
}
// test re-create bucket
func (s *MyXLSuite) TestMakeBucketWithSameNameFails(c *C) {
err := dd.MakeBucket("foo3", "private", nil, nil)
c.Assert(err, IsNil)
err = dd.MakeBucket("foo3", "private", nil, nil)
c.Assert(err, Not(IsNil))
}
// test make multiple buckets
func (s *MyXLSuite) TestCreateMultipleBucketsAndList(c *C) {
// add a second bucket
err := dd.MakeBucket("foo4", "private", nil, nil)
c.Assert(err, IsNil)
err = dd.MakeBucket("bar1", "private", nil, nil)
c.Assert(err, IsNil)
buckets, err := dd.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 2)
c.Assert(buckets[0].Name, Equals, "bar1")
c.Assert(buckets[1].Name, Equals, "foo4")
err = dd.MakeBucket("foobar1", "private", nil, nil)
c.Assert(err, IsNil)
buckets, err = dd.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 3)
c.Assert(buckets[2].Name, Equals, "foobar1")
}
// test object create without bucket
func (s *MyXLSuite) TestNewObjectFailsWithoutBucket(c *C) {
_, err := dd.CreateObject("unknown", "obj", "", 0, nil, nil, nil)
c.Assert(err, Not(IsNil))
}
// test create object metadata
func (s *MyXLSuite) TestNewObjectMetadata(c *C) {
data := "Hello World"
hasher := md5.New()
hasher.Write([]byte(data))
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
err := dd.MakeBucket("foo6", "private", nil, nil)
c.Assert(err, IsNil)
objectMetadata, err := dd.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"}, nil)
c.Assert(err, IsNil)
c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json")
}
// test create object fails without name
func (s *MyXLSuite) TestNewObjectFailsWithEmptyName(c *C) {
_, err := dd.CreateObject("foo", "", "", 0, nil, nil, nil)
c.Assert(err, Not(IsNil))
}
// test create object
func (s *MyXLSuite) TestNewObjectCanBeWritten(c *C) {
err := dd.MakeBucket("foo", "private", nil, nil)
c.Assert(err, IsNil)
data := "Hello World"
hasher := md5.New()
hasher.Write([]byte(data))
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
actualMetadata, err := dd.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"}, nil)
c.Assert(err, IsNil)
c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
var buffer bytes.Buffer
size, err := dd.GetObject(&buffer, "foo", "obj", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len(data)))
c.Assert(buffer.Bytes(), DeepEquals, []byte(data))
actualMetadata, err = dd.GetObjectMetadata("foo", "obj")
c.Assert(err, IsNil)
c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum)
c.Assert(int64(len(data)), Equals, actualMetadata.Size)
}
// test list objects
func (s *MyXLSuite) TestMultipleNewObjects(c *C) {
c.Assert(dd.MakeBucket("foo5", "private", nil, nil), IsNil)
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
_, err := dd.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil, nil)
c.Assert(err, IsNil)
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
_, err = dd.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil, nil)
c.Assert(err, IsNil)
var buffer1 bytes.Buffer
size, err := dd.GetObject(&buffer1, "foo5", "obj1", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("one"))))
c.Assert(buffer1.Bytes(), DeepEquals, []byte("one"))
var buffer2 bytes.Buffer
size, err = dd.GetObject(&buffer2, "foo5", "obj2", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("two"))))
c.Assert(buffer2.Bytes(), DeepEquals, []byte("two"))
/// test list of objects
// test list objects with prefix and delimiter
var resources BucketResourcesMetadata
resources.Prefix = "o"
resources.Delimiter = "1"
resources.Maxkeys = 10
objectsMetadata, resources, err := dd.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
// test list objects with only delimiter
resources.Prefix = ""
resources.Delimiter = "1"
resources.Maxkeys = 10
objectsMetadata, resources, err = dd.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(objectsMetadata[0].Object, Equals, "obj2")
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
// test list objects with only prefix
resources.Prefix = "o"
resources.Delimiter = ""
resources.Maxkeys = 10
objectsMetadata, resources, err = dd.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(objectsMetadata[0].Object, Equals, "obj1")
c.Assert(objectsMetadata[1].Object, Equals, "obj2")
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
_, err = dd.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil, nil)
c.Assert(err, IsNil)
var buffer bytes.Buffer
size, err = dd.GetObject(&buffer, "foo5", "obj3", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("three"))))
c.Assert(buffer.Bytes(), DeepEquals, []byte("three"))
// test list objects with maxkeys
resources.Prefix = "o"
resources.Delimiter = ""
resources.Maxkeys = 2
objectsMetadata, resources, err = dd.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, true)
c.Assert(len(objectsMetadata), Equals, 2)
}

@ -1,639 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"io"
"io/ioutil"
"log"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick"
"github.com/minio/minio/pkg/s3/signature4"
"github.com/minio/minio/pkg/xl/cache/data"
"github.com/minio/minio/pkg/xl/cache/metadata"
)
// total Number of buckets allowed
const (
totalBuckets = 100
)
// Config xl config
type Config struct {
Version string `json:"version"`
MaxSize uint64 `json:"max-size"`
XLName string `json:"xl-name"`
NodeDiskMap map[string][]string `json:"node-disk-map"`
}
// API - local variables
type API struct {
config *Config
lock *sync.Mutex
objects *data.Cache
multiPartObjects map[string]*data.Cache
storedBuckets *metadata.Cache
nodes map[string]node
buckets map[string]bucket
}
// storedBucket saved bucket
type storedBucket struct {
bucketMetadata BucketMetadata
objectMetadata map[string]ObjectMetadata
partMetadata map[string]map[int]PartMetadata
multiPartSession map[string]MultiPartSession
}
// New instantiate a new xl
func New() (Interface, *probe.Error) {
var conf *Config
var err *probe.Error
conf, err = LoadConfig()
if err != nil {
conf = &Config{
Version: "0.0.1",
MaxSize: 512000000,
NodeDiskMap: nil,
XLName: "",
}
if err := quick.CheckData(conf); err != nil {
return nil, err.Trace()
}
}
a := API{config: conf}
a.storedBuckets = metadata.NewCache()
a.nodes = make(map[string]node)
a.buckets = make(map[string]bucket)
a.objects = data.NewCache(a.config.MaxSize)
a.multiPartObjects = make(map[string]*data.Cache)
a.objects.OnEvicted = a.evictedObject
a.lock = new(sync.Mutex)
if len(a.config.NodeDiskMap) > 0 {
for k, v := range a.config.NodeDiskMap {
if len(v) == 0 {
return nil, probe.NewError(InvalidDisksArgument{})
}
err := a.AttachNode(k, v)
if err != nil {
return nil, err.Trace()
}
}
/// Initialization, populate all buckets into memory
buckets, err := a.listBuckets()
if err != nil {
return nil, err.Trace()
}
for k, v := range buckets {
var newBucket = storedBucket{}
newBucket.bucketMetadata = v
newBucket.objectMetadata = make(map[string]ObjectMetadata)
newBucket.multiPartSession = make(map[string]MultiPartSession)
newBucket.partMetadata = make(map[string]map[int]PartMetadata)
a.storedBuckets.Set(k, newBucket)
}
a.Heal()
}
return a, nil
}
/// V2 API functions
// GetObject - GET object from cache buffer
func (xl API) GetObject(w io.Writer, bucket string, object string, start, length int64) (int64, *probe.Error) {
xl.lock.Lock()
defer xl.lock.Unlock()
if !IsValidBucket(bucket) {
return 0, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return 0, probe.NewError(ObjectNameInvalid{Object: object})
}
if start < 0 {
return 0, probe.NewError(InvalidRange{
Start: start,
Length: length,
})
}
if !xl.storedBuckets.Exists(bucket) {
return 0, probe.NewError(BucketNotFound{Bucket: bucket})
}
objectKey := bucket + "/" + object
data, ok := xl.objects.Get(objectKey)
var written int64
if !ok {
if len(xl.config.NodeDiskMap) > 0 {
reader, size, err := xl.getObject(bucket, object)
if err != nil {
return 0, err.Trace()
}
if start > 0 {
if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil {
return 0, probe.NewError(err)
}
}
// new proxy writer to capture data read from disk
pw := NewProxyWriter(w)
{
var err error
if length > 0 {
written, err = io.CopyN(pw, reader, length)
if err != nil {
return 0, probe.NewError(err)
}
} else {
written, err = io.CopyN(pw, reader, size)
if err != nil {
return 0, probe.NewError(err)
}
}
}
/// cache object read from disk
ok := xl.objects.Append(objectKey, pw.writtenBytes)
pw.writtenBytes = nil
go debug.FreeOSMemory()
if !ok {
return 0, probe.NewError(InternalError{})
}
return written, nil
}
return 0, probe.NewError(ObjectNotFound{Object: object})
}
var err error
if start == 0 && length == 0 {
written, err = io.CopyN(w, bytes.NewBuffer(data), int64(xl.objects.Len(objectKey)))
if err != nil {
return 0, probe.NewError(err)
}
return written, nil
}
written, err = io.CopyN(w, bytes.NewBuffer(data[start:]), length)
if err != nil {
return 0, probe.NewError(err)
}
return written, nil
}
// GetBucketMetadata -
func (xl API) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) {
xl.lock.Lock()
defer xl.lock.Unlock()
if !IsValidBucket(bucket) {
return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !xl.storedBuckets.Exists(bucket) {
if len(xl.config.NodeDiskMap) > 0 {
bucketMetadata, err := xl.getBucketMetadata(bucket)
if err != nil {
return BucketMetadata{}, err.Trace()
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
storedBucket.bucketMetadata = bucketMetadata
xl.storedBuckets.Set(bucket, storedBucket)
}
return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
return xl.storedBuckets.Get(bucket).(storedBucket).bucketMetadata, nil
}
// SetBucketMetadata -
func (xl API) SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error {
xl.lock.Lock()
defer xl.lock.Unlock()
if !IsValidBucket(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !xl.storedBuckets.Exists(bucket) {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
if len(xl.config.NodeDiskMap) > 0 {
if err := xl.setBucketMetadata(bucket, metadata); err != nil {
return err.Trace()
}
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
storedBucket.bucketMetadata.ACL = BucketACL(metadata["acl"])
xl.storedBuckets.Set(bucket, storedBucket)
return nil
}
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
if err != nil {
return probe.NewError(err)
}
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
if err != nil {
return probe.NewError(err)
}
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
return probe.NewError(BadDigest{})
}
return nil
}
return probe.NewError(InvalidArgument{})
}
// CreateObject - create an object
func (xl API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *signature4.Sign) (ObjectMetadata, *probe.Error) {
xl.lock.Lock()
defer xl.lock.Unlock()
contentType := metadata["contentType"]
objectMetadata, err := xl.createObject(bucket, key, contentType, expectedMD5Sum, size, data, signature)
// free
debug.FreeOSMemory()
return objectMetadata, err.Trace()
}
// createObject - PUT object to cache buffer
func (xl API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signature4.Sign) (ObjectMetadata, *probe.Error) {
if len(xl.config.NodeDiskMap) == 0 {
if size > int64(xl.config.MaxSize) {
generic := GenericObjectError{Bucket: bucket, Object: key}
return ObjectMetadata{}, probe.NewError(EntityTooLarge{
GenericObjectError: generic,
Size: strconv.FormatInt(size, 10),
MaxSize: strconv.FormatUint(xl.config.MaxSize, 10),
})
}
}
if !IsValidBucket(bucket) {
return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
if !xl.storedBuckets.Exists(bucket) {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
// get object key
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return ObjectMetadata{}, probe.NewError(ObjectExists{Object: key})
}
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
if strings.TrimSpace(expectedMD5Sum) != "" {
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return ObjectMetadata{}, probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
if len(xl.config.NodeDiskMap) > 0 {
objMetadata, err := xl.putObject(
bucket,
key,
expectedMD5Sum,
data,
size,
map[string]string{
"contentType": contentType,
"contentLength": strconv.FormatInt(size, 10),
},
signature,
)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
storedBucket.objectMetadata[objectKey] = objMetadata
xl.storedBuckets.Set(bucket, storedBucket)
return objMetadata, nil
}
// calculate md5
hash := md5.New()
sha256hash := sha256.New()
var err error
var totalLength int64
for err == nil {
var length int
byteBuffer := make([]byte, 1024*1024)
length, err = data.Read(byteBuffer)
if length != 0 {
hash.Write(byteBuffer[0:length])
sha256hash.Write(byteBuffer[0:length])
ok := xl.objects.Append(objectKey, byteBuffer[0:length])
if !ok {
return ObjectMetadata{}, probe.NewError(InternalError{})
}
totalLength += int64(length)
go debug.FreeOSMemory()
}
}
if size != 0 {
if totalLength != size {
// Delete perhaps the object is already saved, due to the nature of append()
xl.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
}
}
if err != io.EOF {
return ObjectMetadata{}, probe.NewError(err)
}
md5SumBytes := hash.Sum(nil)
md5Sum := hex.EncodeToString(md5SumBytes)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
// Delete perhaps the object is already saved, due to the nature of append()
xl.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(BadDigest{})
}
}
if signature != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil)))
if err != nil {
// Delete perhaps the object is already saved, due to the nature of append()
xl.objects.Delete(objectKey)
return ObjectMetadata{}, err.Trace()
}
if !ok {
// Delete perhaps the object is already saved, due to the nature of append()
xl.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
}
}
m := make(map[string]string)
m["contentType"] = contentType
newObject := ObjectMetadata{
Bucket: bucket,
Object: key,
Metadata: m,
Created: time.Now().UTC(),
MD5Sum: md5Sum,
Size: int64(totalLength),
}
storedBucket.objectMetadata[objectKey] = newObject
xl.storedBuckets.Set(bucket, storedBucket)
return newObject, nil
}
// MakeBucket - create bucket in cache
func (xl API) MakeBucket(bucketName, acl string, location io.Reader, signature *signature4.Sign) *probe.Error {
xl.lock.Lock()
defer xl.lock.Unlock()
// do not have to parse location constraint, using this just for signature verification
locationSum := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
if location != nil {
locationConstraintBytes, err := ioutil.ReadAll(location)
if err != nil {
return probe.NewError(InternalError{})
}
locationConstraintHashBytes := sha256.Sum256(locationConstraintBytes)
locationSum = hex.EncodeToString(locationConstraintHashBytes[:])
}
if signature != nil {
ok, err := signature.DoesSignatureMatch(locationSum)
if err != nil {
return err.Trace()
}
if !ok {
return probe.NewError(SignDoesNotMatch{})
}
}
if xl.storedBuckets.Stats().Items == totalBuckets {
return probe.NewError(TooManyBuckets{Bucket: bucketName})
}
if !IsValidBucket(bucketName) {
return probe.NewError(BucketNameInvalid{Bucket: bucketName})
}
if !IsValidBucketACL(acl) {
return probe.NewError(InvalidACL{ACL: acl})
}
if xl.storedBuckets.Exists(bucketName) {
return probe.NewError(BucketExists{Bucket: bucketName})
}
if strings.TrimSpace(acl) == "" {
// default is private
acl = "private"
}
if len(xl.config.NodeDiskMap) > 0 {
if err := xl.makeBucket(bucketName, BucketACL(acl)); err != nil {
return err.Trace()
}
}
var newBucket = storedBucket{}
newBucket.objectMetadata = make(map[string]ObjectMetadata)
newBucket.multiPartSession = make(map[string]MultiPartSession)
newBucket.partMetadata = make(map[string]map[int]PartMetadata)
newBucket.bucketMetadata = BucketMetadata{}
newBucket.bucketMetadata.Name = bucketName
newBucket.bucketMetadata.Created = time.Now().UTC()
newBucket.bucketMetadata.ACL = BucketACL(acl)
xl.storedBuckets.Set(bucketName, newBucket)
return nil
}
// ListObjects - list objects from cache
func (xl API) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
xl.lock.Lock()
defer xl.lock.Unlock()
if !IsValidBucket(bucket) {
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidPrefix(resources.Prefix) {
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(ObjectNameInvalid{Object: resources.Prefix})
}
if !xl.storedBuckets.Exists(bucket) {
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(BucketNotFound{Bucket: bucket})
}
var results []ObjectMetadata
var keys []string
if len(xl.config.NodeDiskMap) > 0 {
listObjects, err := xl.listObjects(
bucket,
resources.Prefix,
resources.Marker,
resources.Delimiter,
resources.Maxkeys,
)
if err != nil {
return nil, BucketResourcesMetadata{IsTruncated: false}, err.Trace()
}
resources.CommonPrefixes = listObjects.CommonPrefixes
resources.IsTruncated = listObjects.IsTruncated
for key := range listObjects.Objects {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
results = append(results, listObjects.Objects[key])
}
if resources.IsTruncated && resources.Delimiter != "" {
resources.NextMarker = results[len(results)-1].Object
}
return results, resources, nil
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
for key := range storedBucket.objectMetadata {
if strings.HasPrefix(key, bucket+"/") {
key = key[len(bucket)+1:]
if strings.HasPrefix(key, resources.Prefix) {
if key > resources.Marker {
keys = append(keys, key)
}
}
}
}
if strings.TrimSpace(resources.Prefix) != "" {
keys = TrimPrefix(keys, resources.Prefix)
}
var prefixes []string
var filteredKeys []string
filteredKeys = keys
if strings.TrimSpace(resources.Delimiter) != "" {
filteredKeys = HasNoDelimiter(keys, resources.Delimiter)
prefixes = HasDelimiter(keys, resources.Delimiter)
prefixes = SplitDelimiter(prefixes, resources.Delimiter)
prefixes = SortUnique(prefixes)
}
for _, commonPrefix := range prefixes {
resources.CommonPrefixes = append(resources.CommonPrefixes, resources.Prefix+commonPrefix)
}
filteredKeys = RemoveDuplicates(filteredKeys)
sort.Strings(filteredKeys)
for _, key := range filteredKeys {
if len(results) == resources.Maxkeys {
resources.IsTruncated = true
if resources.IsTruncated && resources.Delimiter != "" {
resources.NextMarker = results[len(results)-1].Object
}
return results, resources, nil
}
object := storedBucket.objectMetadata[bucket+"/"+resources.Prefix+key]
results = append(results, object)
}
resources.CommonPrefixes = RemoveDuplicates(resources.CommonPrefixes)
sort.Strings(resources.CommonPrefixes)
return results, resources, nil
}
// byBucketName is a type for sorting bucket metadata by bucket name
type byBucketName []BucketMetadata
func (b byBucketName) Len() int { return len(b) }
func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets - List buckets from cache
func (xl API) ListBuckets() ([]BucketMetadata, *probe.Error) {
xl.lock.Lock()
defer xl.lock.Unlock()
var results []BucketMetadata
if len(xl.config.NodeDiskMap) > 0 {
buckets, err := xl.listBuckets()
if err != nil {
return nil, err.Trace()
}
for _, bucketMetadata := range buckets {
results = append(results, bucketMetadata)
}
sort.Sort(byBucketName(results))
return results, nil
}
for _, bucket := range xl.storedBuckets.GetAll() {
results = append(results, bucket.(storedBucket).bucketMetadata)
}
sort.Sort(byBucketName(results))
return results, nil
}
// GetObjectMetadata - get object metadata from cache
func (xl API) GetObjectMetadata(bucket, key string) (ObjectMetadata, *probe.Error) {
xl.lock.Lock()
defer xl.lock.Unlock()
// check if bucket exists
if !IsValidBucket(bucket) {
return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
if !xl.storedBuckets.Exists(bucket) {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := xl.storedBuckets.Get(bucket).(storedBucket)
objectKey := bucket + "/" + key
if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return objMetadata, nil
}
if len(xl.config.NodeDiskMap) > 0 {
objMetadata, err := xl.getObjectMetadata(bucket, key)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
// update
storedBucket.objectMetadata[objectKey] = objMetadata
xl.storedBuckets.Set(bucket, storedBucket)
return objMetadata, nil
}
return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: key})
}
// evictedObject callback function called when an item is evicted from memory
func (xl API) evictedObject(a ...interface{}) {
cacheStats := xl.objects.Stats()
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalEvicted: %d",
cacheStats.Bytes, cacheStats.Items, cacheStats.Evicted)
key := a[0].(string)
// loop through all buckets
for _, bucket := range xl.storedBuckets.GetAll() {
delete(bucket.(storedBucket).objectMetadata, key)
}
debug.FreeOSMemory()
}

@ -1,267 +0,0 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedc.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xl
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"io/ioutil"
"os"
"path/filepath"
"testing"
. "gopkg.in/check.v1"
)
func TestCache(t *testing.T) { TestingT(t) }
type MyCacheSuite struct {
root string
}
var _ = Suite(&MyCacheSuite{})
var dc Interface
func (s *MyCacheSuite) SetUpSuite(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "xl-")
c.Assert(err, IsNil)
s.root = root
SetXLConfigPath(filepath.Join(root, "xl.json"))
dc, _ = New()
// testing empty cache
var buckets []BucketMetadata
buckets, perr := dc.ListBuckets()
c.Assert(perr, IsNil)
c.Assert(len(buckets), Equals, 0)
}
func (s *MyCacheSuite) TearDownSuite(c *C) {
os.RemoveAll(s.root)
}
// test make bucket without name
func (s *MyCacheSuite) TestBucketWithoutNameFails(c *C) {
// fail to create new bucket without a name
err := dc.MakeBucket("", "private", nil, nil)
c.Assert(err, Not(IsNil))
err = dc.MakeBucket(" ", "private", nil, nil)
c.Assert(err, Not(IsNil))
}
// test empty bucket
func (s *MyCacheSuite) TestEmptyBucket(c *C) {
c.Assert(dc.MakeBucket("foo1", "private", nil, nil), IsNil)
// check if bucket is empty
var resources BucketResourcesMetadata
resources.Maxkeys = 1
objectsMetadata, resources, err := dc.ListObjects("foo1", resources)
c.Assert(err, IsNil)
c.Assert(len(objectsMetadata), Equals, 0)
c.Assert(resources.CommonPrefixes, DeepEquals, []string{})
c.Assert(resources.IsTruncated, Equals, false)
}
// test bucket list
func (s *MyCacheSuite) TestMakeBucketAndList(c *C) {
// create bucket
err := dc.MakeBucket("foo2", "private", nil, nil)
c.Assert(err, IsNil)
// check bucket exists
buckets, err := dc.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 5)
c.Assert(buckets[0].ACL, Equals, BucketACL("private"))
}
// test re-create bucket
func (s *MyCacheSuite) TestMakeBucketWithSameNameFails(c *C) {
err := dc.MakeBucket("foo3", "private", nil, nil)
c.Assert(err, IsNil)
err = dc.MakeBucket("foo3", "private", nil, nil)
c.Assert(err, Not(IsNil))
}
// test make multiple buckets
func (s *MyCacheSuite) TestCreateMultipleBucketsAndList(c *C) {
// add a second bucket
err := dc.MakeBucket("foo4", "private", nil, nil)
c.Assert(err, IsNil)
err = dc.MakeBucket("bar1", "private", nil, nil)
c.Assert(err, IsNil)
buckets, err := dc.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 2)
c.Assert(buckets[0].Name, Equals, "bar1")
c.Assert(buckets[1].Name, Equals, "foo4")
err = dc.MakeBucket("foobar1", "private", nil, nil)
c.Assert(err, IsNil)
buckets, err = dc.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 3)
c.Assert(buckets[2].Name, Equals, "foobar1")
}
// test object create without bucket
func (s *MyCacheSuite) TestNewObjectFailsWithoutBucket(c *C) {
_, err := dc.CreateObject("unknown", "obj", "", 0, nil, nil, nil)
c.Assert(err, Not(IsNil))
}
// test create object metadata
func (s *MyCacheSuite) TestNewObjectMetadata(c *C) {
data := "Hello World"
hasher := md5.New()
hasher.Write([]byte(data))
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
err := dc.MakeBucket("foo6", "private", nil, nil)
c.Assert(err, IsNil)
objectMetadata, err := dc.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"}, nil)
c.Assert(err, IsNil)
c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json")
}
// test create object fails without name
func (s *MyCacheSuite) TestNewObjectFailsWithEmptyName(c *C) {
_, err := dc.CreateObject("foo", "", "", 0, nil, nil, nil)
c.Assert(err, Not(IsNil))
}
// test create object
func (s *MyCacheSuite) TestNewObjectCanBeWritten(c *C) {
err := dc.MakeBucket("foo", "private", nil, nil)
c.Assert(err, IsNil)
data := "Hello World"
hasher := md5.New()
hasher.Write([]byte(data))
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
actualMetadata, err := dc.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"}, nil)
c.Assert(err, IsNil)
c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
var buffer bytes.Buffer
size, err := dc.GetObject(&buffer, "foo", "obj", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len(data)))
c.Assert(buffer.Bytes(), DeepEquals, []byte(data))
actualMetadata, err = dc.GetObjectMetadata("foo", "obj")
c.Assert(err, IsNil)
c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum)
c.Assert(int64(len(data)), Equals, actualMetadata.Size)
}
// test list objects
func (s *MyCacheSuite) TestMultipleNewObjects(c *C) {
c.Assert(dc.MakeBucket("foo5", "private", nil, nil), IsNil)
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
_, err := dc.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil, nil)
c.Assert(err, IsNil)
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
_, err = dc.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil, nil)
c.Assert(err, IsNil)
var buffer1 bytes.Buffer
size, err := dc.GetObject(&buffer1, "foo5", "obj1", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("one"))))
c.Assert(buffer1.Bytes(), DeepEquals, []byte("one"))
var buffer2 bytes.Buffer
size, err = dc.GetObject(&buffer2, "foo5", "obj2", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("two"))))
c.Assert(buffer2.Bytes(), DeepEquals, []byte("two"))
/// test list of objects
// test list objects with prefix and delimiter
var resources BucketResourcesMetadata
resources.Prefix = "o"
resources.Delimiter = "1"
resources.Maxkeys = 10
objectsMetadata, resources, err := dc.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
// test list objects with only delimiter
resources.Prefix = ""
resources.Delimiter = "1"
resources.Maxkeys = 10
objectsMetadata, resources, err = dc.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(objectsMetadata[0].Object, Equals, "obj2")
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
// test list objects with only prefix
resources.Prefix = "o"
resources.Delimiter = ""
resources.Maxkeys = 10
objectsMetadata, resources, err = dc.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(objectsMetadata[0].Object, Equals, "obj1")
c.Assert(objectsMetadata[1].Object, Equals, "obj2")
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
_, err = dc.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil, nil)
c.Assert(err, IsNil)
var buffer bytes.Buffer
size, err = dc.GetObject(&buffer, "foo5", "obj3", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("three"))))
c.Assert(buffer.Bytes(), DeepEquals, []byte("three"))
// test list objects with maxkeys
resources.Prefix = "o"
resources.Delimiter = ""
resources.Maxkeys = 2
objectsMetadata, resources, err = dc.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, true)
c.Assert(len(objectsMetadata), Equals, 2)
}
Loading…
Cancel
Save