list/xl: Fix the way marker is handled in leafDirectory verification.

master
Harshavardhana 9 years ago committed by Harshavardhana
parent c302875774
commit c7bf471c9e
  1. 2
      api-router.go
  2. 4
      object-api.go
  3. 19
      routers.go
  4. 10
      storage-rpc-server.go
  5. 2
      web-router.go
  6. 256
      xl-v1-healfile.go
  7. 10
      xl-v1-readfile.go
  8. 240
      xl-v1.go

@ -20,7 +20,7 @@ import router "github.com/gorilla/mux"
// objectAPIHandler implements and provides http handlers for S3 API. // objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct { type objectAPIHandlers struct {
ObjectAPI *objectAPI ObjectAPI objectAPI
} }
// registerAPIRouter - registers S3 compatible APIs. // registerAPIRouter - registers S3 compatible APIs.

@ -33,8 +33,8 @@ type objectAPI struct {
storage StorageAPI storage StorageAPI
} }
func newObjectLayer(storage StorageAPI) *objectAPI { func newObjectLayer(storage StorageAPI) objectAPI {
return &objectAPI{storage} return objectAPI{storage}
} }
/// Bucket operations /// Bucket operations

@ -26,7 +26,7 @@ import (
// configureServer handler returns final handler for the http server. // configureServer handler returns final handler for the http server.
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler { func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
var storageHandlers StorageAPI var storageAPI StorageAPI
var e error var e error
if len(srvCmdConfig.exportPaths) == 1 { if len(srvCmdConfig.exportPaths) == 1 {
// Verify if export path is a local file system path. // Verify if export path is a local file system path.
@ -34,37 +34,40 @@ func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
st, e = os.Stat(srvCmdConfig.exportPaths[0]) st, e = os.Stat(srvCmdConfig.exportPaths[0])
if e == nil && st.Mode().IsDir() { if e == nil && st.Mode().IsDir() {
// Initialize storage API. // Initialize storage API.
storageHandlers, e = newFS(srvCmdConfig.exportPaths[0]) storageAPI, e = newFS(srvCmdConfig.exportPaths[0])
fatalIf(probe.NewError(e), "Initializing fs failed.", nil) fatalIf(probe.NewError(e), "Initializing fs failed.", nil)
} else { } else {
// Initialize network storage API. // Initialize network storage API.
storageHandlers, e = newNetworkFS(srvCmdConfig.exportPaths[0]) storageAPI, e = newNetworkFS(srvCmdConfig.exportPaths[0])
fatalIf(probe.NewError(e), "Initializing network fs failed.", nil) fatalIf(probe.NewError(e), "Initializing network fs failed.", nil)
} }
} else { } else {
// Initialize XL storage API. // Initialize XL storage API.
storageHandlers, e = newXL(srvCmdConfig.exportPaths...) storageAPI, e = newXL(srvCmdConfig.exportPaths...)
fatalIf(probe.NewError(e), "Initializing XL failed.", nil) fatalIf(probe.NewError(e), "Initializing XL failed.", nil)
} }
// Initialize object layer. // Initialize object layer.
objectAPI := newObjectLayer(storageHandlers) objAPI := newObjectLayer(storageAPI)
// Initialize storage rpc.
storageRPC := newStorageRPC(storageAPI)
// Initialize API. // Initialize API.
apiHandlers := objectAPIHandlers{ apiHandlers := objectAPIHandlers{
ObjectAPI: objectAPI, ObjectAPI: objAPI,
} }
// Initialize Web. // Initialize Web.
webHandlers := &webAPIHandlers{ webHandlers := &webAPIHandlers{
ObjectAPI: objectAPI, ObjectAPI: objAPI,
} }
// Initialize router. // Initialize router.
mux := router.NewRouter() mux := router.NewRouter()
// Register all routers. // Register all routers.
registerStorageRPCRouter(mux, storageHandlers) registerStorageRPCRouter(mux, storageRPC)
registerWebRouter(mux, webHandlers) registerWebRouter(mux, webHandlers)
registerAPIRouter(mux, apiHandlers) registerAPIRouter(mux, apiHandlers)
// Add new routers here. // Add new routers here.

@ -99,11 +99,15 @@ func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericRep
return nil return nil
} }
// registerStorageRPCRouter - register storage rpc router. // Initialize new storage rpc.
func registerStorageRPCRouter(mux *router.Router, storageAPI StorageAPI) { func newStorageRPC(storageAPI StorageAPI) *storageServer {
stServer := &storageServer{ return &storageServer{
storage: storageAPI, storage: storageAPI,
} }
}
// registerStorageRPCRouter - register storage rpc router.
func registerStorageRPCRouter(mux *router.Router, stServer *storageServer) {
storageRPCServer := rpc.NewServer() storageRPCServer := rpc.NewServer()
storageRPCServer.RegisterName("Storage", stServer) storageRPCServer.RegisterName("Storage", stServer)
storageRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() storageRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter()

@ -30,7 +30,7 @@ import (
// webAPI container for Web API. // webAPI container for Web API.
type webAPIHandlers struct { type webAPIHandlers struct {
ObjectAPI *objectAPI ObjectAPI objectAPI
} }
// indexHandler - Handler to serve index.html // indexHandler - Handler to serve index.html

@ -0,0 +1,256 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"encoding/json"
"errors"
"fmt"
"io"
slashpath "path"
"strconv"
)
func (xl XL) selfHeal(volume string, path string) error {
totalShards := xl.DataBlocks + xl.ParityBlocks
needsSelfHeal := make([]bool, totalShards)
var metadata = make(map[string]string)
var readers = make([]io.Reader, totalShards)
var writers = make([]io.WriteCloser, totalShards)
for index, disk := range xl.storageDisks {
metadataFile := slashpath.Join(path, metadataFile)
// Start from the beginning, we are not reading partial metadata files.
offset := int64(0)
metadataReader, err := disk.ReadFile(volume, metadataFile, offset)
if err != nil {
if err != errFileNotFound {
continue
}
// Needs healing if part.json is not found
needsSelfHeal[index] = true
continue
}
defer metadataReader.Close()
decoder := json.NewDecoder(metadataReader)
if err = decoder.Decode(&metadata); err != nil {
// needs healing if parts.json is not parsable
needsSelfHeal[index] = true
}
erasurePart := slashpath.Join(path, fmt.Sprintf("part.%d", index))
erasuredPartReader, err := disk.ReadFile(volume, erasurePart, offset)
if err != nil {
if err == errFileNotFound {
// Needs healing if part file not found
needsSelfHeal[index] = true
}
return err
}
readers[index] = erasuredPartReader
defer erasuredPartReader.Close()
}
// Check if there is atleast one part that needs to be healed.
atleastOneSelfHeal := false
for _, shNeeded := range needsSelfHeal {
if shNeeded {
atleastOneSelfHeal = true
break
}
}
if !atleastOneSelfHeal {
// Return if healing not needed anywhere.
return nil
}
// create writers for parts where healing is needed.
for index, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
var err error
erasurePart := slashpath.Join(path, fmt.Sprintf("part.%d", index))
writers[index], err = xl.storageDisks[index].CreateFile(volume, erasurePart)
if err != nil {
// Unexpected error
closeAndRemoveWriters(writers...)
return err
}
}
size, err := strconv.ParseInt(metadata["file.size"], 10, 64)
if err != nil {
closeAndRemoveWriters(writers...)
return err
}
var totalLeft = size
for totalLeft > 0 {
// Figure out the right blockSize.
var curBlockSize int
if erasureBlockSize < totalLeft {
curBlockSize = erasureBlockSize
} else {
curBlockSize = int(totalLeft)
}
// Calculate the current shard size.
curShardSize := getEncodedBlockLen(curBlockSize, xl.DataBlocks)
enShards := make([][]byte, totalShards)
// Loop through all readers and read.
for index, reader := range readers {
// Initialize shard slice and fill the data from each parts.
// ReedSolomon.Verify() expects that slice is not nil even if the particular
// part needs healing.
enShards[index] = make([]byte, curShardSize)
if needsSelfHeal[index] {
// Skip reading if the part needs healing.
continue
}
_, e := io.ReadFull(reader, enShards[index])
if e != nil && e != io.ErrUnexpectedEOF {
enShards[index] = nil
}
}
// Check blocks if they are all zero in length.
if checkBlockSize(enShards) == 0 {
err = errors.New("Data likely corrupted, all blocks are zero in length.")
return err
}
// Verify the shards.
ok, e := xl.ReedSolomon.Verify(enShards)
if e != nil {
closeAndRemoveWriters(writers...)
return e
}
// Verification failed, shards require reconstruction.
if !ok {
for index, shNeeded := range needsSelfHeal {
if shNeeded {
// Reconstructs() reconstructs the parts if the array is nil.
enShards[index] = nil
}
}
e = xl.ReedSolomon.Reconstruct(enShards)
if e != nil {
closeAndRemoveWriters(writers...)
return e
}
// Verify reconstructed shards again.
ok, e = xl.ReedSolomon.Verify(enShards)
if e != nil {
closeAndRemoveWriters(writers...)
return e
}
if !ok {
// Shards cannot be reconstructed, corrupted data.
e = errors.New("Verification failed after reconstruction, data likely corrupted.")
closeAndRemoveWriters(writers...)
return e
}
}
for index, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
_, e := writers[index].Write(enShards[index])
if e != nil {
closeAndRemoveWriters(writers...)
return e
}
}
totalLeft = totalLeft - erasureBlockSize
}
// After successful healing Close() the writer so that the temp
// files are committed to their location.
for index, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
writers[index].Close()
}
// Write part.json where ever healing was done.
var metadataWriters = make([]io.WriteCloser, len(xl.storageDisks))
for index, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
metadataFile := slashpath.Join(path, metadataFile)
metadataWriters[index], err = xl.storageDisks[index].CreateFile(volume, metadataFile)
if err != nil {
closeAndRemoveWriters(writers...)
return err
}
}
metadataBytes, err := json.Marshal(metadata)
if err != nil {
closeAndRemoveWriters(metadataWriters...)
return err
}
for index, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
_, err = metadataWriters[index].Write(metadataBytes)
if err != nil {
closeAndRemoveWriters(metadataWriters...)
return err
}
}
// Metadata written for all the healed parts hence Close() so that
// temp files can be committed.
for index := range xl.storageDisks {
if !needsSelfHeal[index] {
continue
}
metadataWriters[index].Close()
}
return nil
}
// self heal.
type selfHeal struct {
volume string
path string
errCh chan<- error
}
// selfHealRoutine - starts a go routine and listens on a channel for healing requests.
func (xl *XL) selfHealRoutine() {
xl.selfHealCh = make(chan selfHeal)
// Healing request can be made like this:
// errCh := make(chan error)
// xl.selfHealCh <- selfHeal{"testbucket", "testobject", errCh}
// fmt.Println(<-errCh)
go func() {
for sh := range xl.selfHealCh {
if sh.volume == "" || sh.path == "" {
sh.errCh <- errors.New("volume or path can not be empty")
continue
}
xl.selfHeal(sh.volume, sh.path)
sh.errCh <- nil
}
}()
}

@ -90,7 +90,7 @@ func (xl XL) getReadFileQuorumDisks(volume, path string) (quorumDisks []quorumDi
for disk, version := range diskVersionMap { for disk, version := range diskVersionMap {
if version > higherVersion { if version > higherVersion {
higherVersion = version higherVersion = version
quorumDisks = []quorumDisk{quorumDisk{disk, i}} quorumDisks = []quorumDisk{{disk, i}}
} else if version == higherVersion { } else if version == higherVersion {
quorumDisks = append(quorumDisks, quorumDisk{disk, i}) quorumDisks = append(quorumDisks, quorumDisk{disk, i})
} }
@ -133,10 +133,10 @@ func (xl XL) ReadFile(volume, path string, offset int64) (io.ReadCloser, error)
return nil, errInvalidArgument return nil, errInvalidArgument
} }
// Acquire a read lock. // Acquire a read lock. - TODO - disable this due to stack overflow bug.
readLock := true // readLock := true
xl.lockNS(volume, path, readLock) // xl.lockNS(volume, path, readLock)
defer xl.unlockNS(volume, path, readLock) // defer xl.unlockNS(volume, path, readLock)
// Check read quorum. // Check read quorum.
quorumDisks := xl.getReadFileQuorumDisks(volume, path) quorumDisks := xl.getReadFileQuorumDisks(volume, path)

@ -21,7 +21,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"os" "os"
slashpath "path" slashpath "path"
"sort" "sort"
@ -40,13 +39,6 @@ const (
maxErasureBlocks = 16 maxErasureBlocks = 16
) )
// Self Heal data
type selfHeal struct {
volume string
fsPath string
errCh chan error
}
// XL layer structure. // XL layer structure.
type XL struct { type XL struct {
ReedSolomon reedsolomon.Encoder // Erasure encoder/decoder. ReedSolomon reedsolomon.Encoder // Erasure encoder/decoder.
@ -57,7 +49,9 @@ type XL struct {
nameSpaceLockMapMutex *sync.Mutex nameSpaceLockMapMutex *sync.Mutex
readQuorum int readQuorum int
writeQuorum int writeQuorum int
selfHealCh chan selfHeal
// Heal input/output channel.
selfHealCh chan selfHeal
} }
// lockNS - locks the given resource, using a previously allocated // lockNS - locks the given resource, using a previously allocated
@ -163,7 +157,7 @@ func newXL(disks ...string) (StorageAPI, error) {
xl.writeQuorum = len(xl.storageDisks) xl.writeQuorum = len(xl.storageDisks)
} }
// Start self heal go routine. // Start self heal go routine, taking inputs over self heal channel.
xl.selfHealRoutine() xl.selfHealRoutine()
// Return successfully initialized. // Return successfully initialized.
@ -227,8 +221,9 @@ func (xl XL) StatVol(volume string) (volInfo VolInfo, err error) {
// format it means that the parent directory is the actual object name. // format it means that the parent directory is the actual object name.
func (xl XL) isLeafDirectory(volume, leafPath string) (isLeaf bool) { func (xl XL) isLeafDirectory(volume, leafPath string) (isLeaf bool) {
var allFileInfos []FileInfo var allFileInfos []FileInfo
var markerPath string
for { for {
fileInfos, eof, e := xl.storageDisks[0].ListFiles(volume, leafPath, "", false, 1000) fileInfos, eof, e := xl.storageDisks[0].ListFiles(volume, leafPath, markerPath, false, 1000)
if e != nil { if e != nil {
break break
} }
@ -236,6 +231,8 @@ func (xl XL) isLeafDirectory(volume, leafPath string) (isLeaf bool) {
if eof { if eof {
break break
} }
// MarkerPath to get the next set of files.
markerPath = allFileInfos[len(allFileInfos)-1].Name
} }
for _, fileInfo := range allFileInfos { for _, fileInfo := range allFileInfos {
if fileInfo.Mode.IsDir() { if fileInfo.Mode.IsDir() {
@ -477,224 +474,3 @@ func (xl XL) DeleteFile(volume, path string) error {
} }
return nil return nil
} }
// selfHeal - called by the healing go-routine, heals using erasure coding.
func (xl XL) selfHeal(volume string, fsPath string) error {
totalShards := xl.DataBlocks + xl.ParityBlocks
needsSelfHeal := make([]bool, totalShards)
var metadata = make(map[string]string)
var readers = make([]io.Reader, totalShards)
var writers = make([]io.WriteCloser, totalShards)
for index, disk := range xl.storageDisks {
metadataFile := slashpath.Join(fsPath, metadataFile)
// Start from the beginning, we are not reading partial metadata files.
offset := int64(0)
metadataReader, err := disk.ReadFile(volume, metadataFile, offset)
if err != nil {
if err != errFileNotFound {
continue
}
// Needs healing if part.json is not found
needsSelfHeal[index] = true
continue
}
defer metadataReader.Close()
decoder := json.NewDecoder(metadataReader)
if err = decoder.Decode(&metadata); err != nil {
// needs healing if parts.json is not parsable
needsSelfHeal[index] = true
}
erasurePart := slashpath.Join(fsPath, fmt.Sprintf("part.%d", index))
erasuredPartReader, err := disk.ReadFile(volume, erasurePart, offset)
if err != nil {
if err != errFileNotFound {
continue
}
// needs healing if part file not found
needsSelfHeal[index] = true
} else {
readers[index] = erasuredPartReader
defer erasuredPartReader.Close()
}
}
// Check if there is atleat one part that needs to be healed.
atleastOneSelfHeal := false
for _, shNeeded := range needsSelfHeal {
if shNeeded {
atleastOneSelfHeal = true
break
}
}
if !atleastOneSelfHeal {
// return if healing not needed anywhere.
return nil
}
// create writers for parts where healing is needed.
for i, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
var err error
erasurePart := slashpath.Join(fsPath, fmt.Sprintf("part.%d", i))
writers[i], err = xl.storageDisks[i].CreateFile(volume, erasurePart)
if err != nil {
// Unexpected error
closeAndRemoveWriters(writers...)
return err
}
}
size, err := strconv.ParseInt(metadata["file.size"], 10, 64)
if err != nil {
closeAndRemoveWriters(writers...)
return err
}
var totalLeft = size
for totalLeft > 0 {
// Figure out the right blockSize.
var curBlockSize int
if erasureBlockSize < totalLeft {
curBlockSize = erasureBlockSize
} else {
curBlockSize = int(totalLeft)
}
// Calculate the current shard size.
curShardSize := getEncodedBlockLen(curBlockSize, xl.DataBlocks)
enShards := make([][]byte, totalShards)
// Loop through all readers and read.
for i, reader := range readers {
// Initialize shard slice and fill the data from each parts.
// ReedSolomon.Verify() expects that slice is not nil even if the particular
// part needs healing.
enShards[i] = make([]byte, curShardSize)
if needsSelfHeal[i] {
// Skip reading if the part needs healing.
continue
}
_, e := io.ReadFull(reader, enShards[i])
if e != nil && e != io.ErrUnexpectedEOF {
enShards[i] = nil
}
}
// Check blocks if they are all zero in length.
if checkBlockSize(enShards) == 0 {
err = errors.New("Data likely corrupted, all blocks are zero in length.")
return err
}
// Verify the shards.
ok, e := xl.ReedSolomon.Verify(enShards)
if e != nil {
closeAndRemoveWriters(writers...)
return e
}
// Verification failed, shards require reconstruction.
if !ok {
for i, shNeeded := range needsSelfHeal {
if shNeeded {
// Reconstructs() reconstructs the parts if the array is nil.
enShards[i] = nil
}
}
e = xl.ReedSolomon.Reconstruct(enShards)
if e != nil {
closeAndRemoveWriters(writers...)
return e
}
// Verify reconstructed shards again.
ok, e = xl.ReedSolomon.Verify(enShards)
if e != nil {
closeAndRemoveWriters(writers...)
return e
}
if !ok {
// Shards cannot be reconstructed, corrupted data.
e = errors.New("Verification failed after reconstruction, data likely corrupted.")
closeAndRemoveWriters(writers...)
return e
}
}
for i, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
_, e := writers[i].Write(enShards[i])
if e != nil {
closeAndRemoveWriters(writers...)
return e
}
}
totalLeft = totalLeft - erasureBlockSize
}
// After successful healing Close() the writer so that the temp files are renamed.
for i, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
writers[i].Close()
}
// Write part.json where ever healing was done.
var metadataWriters = make([]io.WriteCloser, len(xl.storageDisks))
for i, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
metadataFile := slashpath.Join(fsPath, metadataFile)
metadataWriters[i], err = xl.storageDisks[i].CreateFile(volume, metadataFile)
if err != nil {
closeAndRemoveWriters(writers...)
return err
}
}
metadataBytes, err := json.Marshal(metadata)
if err != nil {
closeAndRemoveWriters(metadataWriters...)
return err
}
for i, shNeeded := range needsSelfHeal {
if !shNeeded {
continue
}
_, err := metadataWriters[i].Write(metadataBytes)
if err != nil {
closeAndRemoveWriters(metadataWriters...)
return err
}
}
// part.json written for all the healed parts hence Close() so that temp files can be renamed.
for index := range xl.storageDisks {
if !needsSelfHeal[index] {
continue
}
metadataWriters[index].Close()
}
return nil
}
// selfHealRoutine - starts a go routine and listens on a channel for healing requests.
func (xl *XL) selfHealRoutine() {
xl.selfHealCh = make(chan selfHeal)
// Healing request can be made like this:
// errCh := make(chan error)
// xl.selfHealCh <- selfHeal{"testbucket", "testobject", errCh}
// fmt.Println(<-errCh)
go func() {
for sh := range xl.selfHealCh {
if sh.volume == "" || sh.fsPath == "" {
sh.errCh <- errors.New("volume or path can not be empty")
continue
}
xl.selfHeal(sh.volume, sh.fsPath)
sh.errCh <- nil
}
}()
}

Loading…
Cancel
Save