diff --git a/common/common.go b/common/common.go index b27f986..a59f917 100644 --- a/common/common.go +++ b/common/common.go @@ -6,11 +6,16 @@ import ( "crypto/sha256" "fmt" "hash" + "net/http" "os" "regexp" + "runtime" + "strconv" "strings" "sync" "time" + + "golang.org/x/sync/semaphore" ) // SyncServiceError is a common error type used in the sync service @@ -258,6 +263,79 @@ func GetHash(hashAlgo string) (hash.Hash, crypto.Hash, SyncServiceError) { } } +func GetStartAndEndRangeFromRangeHeader(request *http.Request) (int64, int64, SyncServiceError) { + // Get range from the "Range:bytes={startOffset}-{endOffset}" + requestRangeAll := request.Header.Get("Range") + if requestRangeAll == "" { + return -1, -1, nil + } + requestRange := requestRangeAll[6:] + ranges := strings.Split(requestRange, "-") + + if len(ranges) != 2 { + return -1, -1, &InvalidRequest{Message: "Failed to parse Range header: " + requestRangeAll} + } + + beginOffset, err := strconv.ParseInt(ranges[0], 10, 64) + if err != nil { + return -1, -1, &InvalidRequest{Message: "Failed to get begin offset from Range header: " + err.Error()} + } + + endOffset, err := strconv.ParseInt(ranges[1], 10, 64) + if err != nil { + return -1, -1, &InvalidRequest{Message: "Failed to get end offset from Range header: " + err.Error()} + } + + if beginOffset > endOffset { + return -1, -1, &InvalidRequest{Message: "Begin offset cannot be greater than end offset"} + } + + return beginOffset, endOffset, nil +} + +// Content-Range: bytes 1-2/*\ +// Returns totalsize, startOffset, endOffset, err +func GetStartAndEndRangeFromContentRangeHeader(request *http.Request) (int64, int64, int64, SyncServiceError) { + // Get range from the "Range:bytes={startOffset}-{endOffset}" + requestContentRange := request.Header.Get("Content-Range") + if requestContentRange == "" { + return 0, -1, -1, nil + } + contentRange := strings.Replace(requestContentRange, "bytes ", "", -1) + // 1-2/30 + ranges := strings.Split(contentRange, "/") + + if len(ranges) != 2 { + return 0, -1, -1, &InvalidRequest{Message: "Failed to parse Content-Range header: " + requestContentRange} + } + // [1-2, 30] + totalSize, err := strconv.ParseInt(ranges[1], 10, 64) + if err != nil { + return 0, -1, -1, &InvalidRequest{Message: "Failed to get total size from Content-Range header: " + err.Error()} + } + + offsets := strings.Split(ranges[0], "-") + if len(offsets) != 2 { + return 0, -1, -1, &InvalidRequest{Message: "Failed to get offsets from Content-Range header: " + requestContentRange} + } + + startOffset, err := strconv.ParseInt(offsets[0], 10, 64) + if err != nil { + return 0, -1, -1, &InvalidRequest{Message: "Failed to get start offset from Content-Range header: " + err.Error()} + } + + endOffset, err := strconv.ParseInt(offsets[1], 10, 64) + if err != nil { + return 0, -1, -1, &InvalidRequest{Message: "Failed to get end offset from Content-Range header: " + err.Error()} + } + + if startOffset > endOffset { + return 0, -1, -1, &InvalidRequest{Message: "Begin offset cannot be greater than end offset"} + } + + return totalSize, startOffset, endOffset, nil +} + // MetaData is the metadata that identifies and defines the sync service object. // Every object includes metadata (mandatory) and data (optional). The metadata and data can be updated independently. // Each sync service node (ESS) has an address that is composed of the node's ID, Type, and Organization. @@ -403,6 +481,10 @@ type MetaData struct { // Optional field, default is false (not visiable to all users) Public bool `json:"public" bson:"public"` + // DataVerified is an internal field set by ESS after ESS downloads data from CSS or by CSS after ESS uploads data + // Data can be obtained only when DataVerified field is true + DataVerified bool `json:"dataVerified" bson:"data-verified"` + // OwnerID is an internal field indicating who creates the object // This field should not be set by users OwnerID string `json:"ownerID" bson:"owner-id"` @@ -598,6 +680,7 @@ const ( const ( Update = "update" Updated = "updated" + HandleUpdate = "handleUpdate" Consumed = "consumed" AckConsumed = "ackconsumed" ConsumedByDestination = "consumedByDest" @@ -776,6 +859,15 @@ func NewLocks(name string) *Locks { return &locks } +// ObjectDownloadSemaphore sets the concurrent spi object download concurrency +var ObjectDownloadSemaphore *semaphore.Weighted + +// InitObjectDownloadSemaphore initializes ObjectDownloadSemaphore +func InitObjectDownloadSemaphore() { + maxWorkers := runtime.GOMAXPROCS(-1) * Configuration.HTTPCSSObjDownloadConcurrencyMultiplier + ObjectDownloadSemaphore = semaphore.NewWeighted(int64(maxWorkers)) +} + // ObjectLocks are locks for object and notification changes var ObjectLocks Locks @@ -942,6 +1034,13 @@ func IsValidHashAlgorithm(hashAlgorithm string) bool { return false } +func NeedDataVerification(metaData MetaData) bool { + if IsValidHashAlgorithm(metaData.HashAlgorithm) && metaData.PublicKey != "" && metaData.Signature != "" { + return true + } + return false +} + // IsValidName checks if the string only contains letters, digits, and !@#%^*-_.~ var IsValidName = regexp.MustCompile(`^[a-zA-Z0-9|!|@|#|$|^|*|\-|_|.|~|\pL|\pN]+$`).MatchString diff --git a/common/config.go b/common/config.go index d0a1afc..9a100ef 100644 --- a/common/config.go +++ b/common/config.go @@ -204,6 +204,14 @@ type Config struct { // default is 120s HTTPESSClientTimeout int `env:"HTTPESSClientTimeout"` + // HTTPESSObjClientTimeout is to specify the http client timeout for downloading models (or objects) in seconds for ESS + // default is 600s + HTTPESSObjClientTimeout int `env:"HTTPESSObjClientTimeout"` + + // HTTPCSSObjDownloadConcurrencyMultiplier specifies a number to multiple the number of threads by to set allowed concurrent downloads per CSS + // default is 1 + HTTPCSSObjDownloadConcurrencyMultiplier int `env:"HTTPCSSObjDownloadConcurrencyMultiplier"` + // LogLevel specifies the logging level in string format LogLevel string `env:"LOG_LEVEL"` @@ -257,6 +265,10 @@ type Config struct { // A value of zero means ESSs are never removed RemoveESSRegistrationTime int16 `env:"REMOVE_ESS_REGISTRATION_TIME"` + // EnableDataChunk specifies whether or not to transfer data in chunks between CSS and ESS + // It is always true for MQTT + EnableDataChunk bool `env:"ENABLE_DATA_CHUNK"` + // Maximum size of data that can be sent in one message MaxDataChunkSize int `env:"MAX_DATA_CHUNK_SIZE"` @@ -493,6 +505,7 @@ func ValidateConfig() error { } if mqtt { Configuration.CommunicationProtocol = MQTTProtocol + Configuration.EnableDataChunk = true } else if wiotp { Configuration.CommunicationProtocol = WIoTP } else { @@ -505,6 +518,7 @@ func ValidateConfig() error { if http { if mqtt { Configuration.CommunicationProtocol = HybridMQTT + Configuration.EnableDataChunk = true } else if wiotp { Configuration.CommunicationProtocol = HybridWIoTP } else { @@ -513,6 +527,7 @@ func ValidateConfig() error { } else { if mqtt { Configuration.CommunicationProtocol = MQTTProtocol + Configuration.EnableDataChunk = true } else if wiotp { Configuration.CommunicationProtocol = WIoTP } @@ -713,7 +728,8 @@ func SetDefaultConfig(config *Config) { config.ESSCallSPIRetryInterval = 2 config.ESSPingInterval = 1 config.RemoveESSRegistrationTime = 30 - config.MaxDataChunkSize = 120 * 1024 + config.EnableDataChunk = true + config.MaxDataChunkSize = 5120 * 1024 config.MaxInflightChunks = 1 config.MongoAddressCsv = "localhost:27017" config.MongoDbName = "d_edge" @@ -733,6 +749,8 @@ func SetDefaultConfig(config *Config) { config.HTTPCSSUseSSL = false config.HTTPCSSCACertificate = "" config.HTTPESSClientTimeout = 120 + config.HTTPESSObjClientTimeout = 600 + config.HTTPCSSObjDownloadConcurrencyMultiplier = 1 config.MessagingGroupCacheExpiration = 60 config.ShutdownQuiesceTime = 60 config.ESSConsumedObjectsKept = 1000 diff --git a/core/base/apiModule.go b/core/base/apiModule.go index 3d3a945..5a8b061 100644 --- a/core/base/apiModule.go +++ b/core/base/apiModule.go @@ -227,24 +227,42 @@ func UpdateObject(orgID string, objectType string, objectID string, metaData com metaData.ExpectedConsumers = math.MaxInt32 } + lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + apiObjectLocks.Lock(lockIndex) + common.ObjectLocks.Lock(lockIndex) + + existingObject, existingObjStatus, _ := store.RetrieveObjectAndStatus(orgID, objectType, objectID) + if existingObjStatus != "" && existingObjStatus != common.ReadyToSend && existingObjStatus != common.NotReadyToSend { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return &common.InvalidRequest{Message: "Can't update object of the receiving side"} + } + // Store the object in the storage module status := common.NotReadyToSend - if data != nil || metaData.Link != "" || metaData.NoData || metaData.SourceDataURI != "" { + metaData.DataVerified = false + if metaData.Link != "" || metaData.NoData || metaData.SourceDataURI != "" { status = common.ReadyToSend } else if metaData.MetaOnly { - reader, err := store.RetrieveObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + reader, err := store.RetrieveObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, false) if err != nil { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) return err } if reader != nil { status = common.ReadyToSend store.CloseDataReader(reader) } - } + // for MetaOnly, we will re-use the checksum fields + if existingObject != nil { + metaData.HashAlgorithm = existingObject.HashAlgorithm + metaData.PublicKey = existingObject.PublicKey + metaData.Signature = existingObject.Signature + } - lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) - apiObjectLocks.Lock(lockIndex) - common.ObjectLocks.Lock(lockIndex) + metaData.DataVerified = true //don't need to verify data again + } if metaData.NoData { data = nil @@ -252,43 +270,60 @@ func UpdateObject(orgID string, objectType string, objectID string, metaData com metaData.SourceDataURI = "" metaData.PublicKey = "" metaData.Signature = "" - } else if data != nil { + metaData.DataVerified = true + } + + if !metaData.DataVerified && !common.NeedDataVerification(metaData) { + metaData.DataVerified = true + } + + if data != nil && metaData.DataVerified { + metaData.ObjectSize = int64(len(data)) + status = common.ReadyToSend + } + + metaData.ChunkSize = common.Configuration.MaxDataChunkSize + + // Store metadata and data, with correct verified status + deletedDestinations, err := store.StoreObject(metaData, data, status) + if err != nil { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return err + } + + // Verify + if data != nil { // data signature verification if metadata has both publicKey and signature // data is nil for metaOnly object. Meta-only object will not apply data verification - if common.IsValidHashAlgorithm(metaData.HashAlgorithm) && metaData.PublicKey != "" && metaData.Signature != "" { + if common.NeedDataVerification(metaData) { // will no store data if object metadata not exist dataReader := bytes.NewReader(data) dataVf := dataVerifier.NewDataVerifier(metaData.HashAlgorithm, metaData.PublicKey, metaData.Signature) if success, err := dataVf.VerifyDataSignature(dataReader, orgID, objectType, objectID, ""); !success || err != nil { if trace.IsLogging(logger.ERROR) { - trace.Error("Failed to verify data for object %s %s, remove temp data\n", objectType, objectID) + if err != nil { + trace.Error("Failed to verify data for object %s %s, Error: %s\n", objectType, objectID, err.Error()) + + } + } - dataVf.RemoveTempData(orgID, objectType, objectID, "") + + dataVf.RemoveUnverifiedData(metaData) + store.UpdateObjectStatus(orgID, objectType, objectID, common.NotReadyToSend) common.ObjectLocks.Unlock(lockIndex) apiObjectLocks.Unlock(lockIndex) return err } - dataVf.RemoveTempData(orgID, objectType, objectID, "") + if err = store.UpdateObjectDataVerifiedStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, true); err != nil { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return err + } + status = common.ReadyToSend } - - metaData.ObjectSize = int64(len(data)) - } - metaData.ChunkSize = common.Configuration.MaxDataChunkSize - - _, existingObjStatus, _ := store.RetrieveObjectAndStatus(orgID, objectType, objectID) - if existingObjStatus != "" && existingObjStatus != common.ReadyToSend && existingObjStatus != common.NotReadyToSend { - common.ObjectLocks.Unlock(lockIndex) - apiObjectLocks.Unlock(lockIndex) - return &common.InvalidRequest{Message: "Can't update object of the receiving side"} - } - - deletedDestinations, err := store.StoreObject(metaData, data, status) - if err != nil { - common.ObjectLocks.Unlock(lockIndex) - apiObjectLocks.Unlock(lockIndex) - return err } store.DeleteNotificationRecords(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, "", "") @@ -301,6 +336,7 @@ func UpdateObject(orgID string, objectType string, objectID string, metaData com // StoreObject increments the instance id, we need to fetch the updated meta data updatedMetaData, err := store.RetrieveObject(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + if err != nil { common.ObjectLocks.Unlock(lockIndex) apiObjectLocks.Unlock(lockIndex) @@ -491,13 +527,17 @@ func GetObjectData(orgID string, objectType string, objectID string) (io.Reader, if metaData == nil || status == common.NotReadyToSend || status == common.PartiallyReceived { return nil, nil } + + if !metaData.DataVerified { + return nil, nil + } if metaData.DestinationDataURI != "" && status == common.CompletelyReceived { - return dataURI.GetData(metaData.DestinationDataURI) + return dataURI.GetData(metaData.DestinationDataURI, false) } if metaData.SourceDataURI != "" && status == common.ReadyToSend { - return dataURI.GetData(metaData.SourceDataURI) + return dataURI.GetData(metaData.SourceDataURI, false) } - return store.RetrieveObjectData(orgID, objectType, objectID) + return store.RetrieveObjectData(orgID, objectType, objectID, false) } // GetRemovedDestinationPolicyServicesFromESS get the removedDestinationPolicyServices list @@ -520,14 +560,14 @@ func GetRemovedDestinationPolicyServicesFromESS(orgID string, objectType string, return removedDestinationPolicyServices, err } -// PutObjectData stores an object's data +// PutObjectAllData stores an object's data // Verify data signature (if publicKey and signature both have value) // Call the storage module to store the object's data // Return true if the object was found and updated // Return false and no error if the object was not found -func PutObjectData(orgID string, objectType string, objectID string, dataReader io.Reader) (bool, common.SyncServiceError) { +func PutObjectAllData(orgID string, objectType string, objectID string, dataReader io.Reader) (bool, common.SyncServiceError) { if trace.IsLogging(logger.DEBUG) { - trace.Debug("In PutObjectData. Update data %s %s\n", objectType, objectID) + trace.Debug("In PutObjectAllData. Update data %s %s\n", objectType, objectID) } common.HealthStatus.ClientRequestReceived() @@ -559,7 +599,7 @@ func PutObjectData(orgID string, objectType string, objectID string, dataReader } var dataVf *dataVerifier.DataVerifier - if common.IsValidHashAlgorithm(metaData.HashAlgorithm) && metaData.PublicKey != "" && metaData.Signature != "" { + if common.NeedDataVerification(*metaData) { //start data verification if trace.IsLogging(logger.DEBUG) { trace.Debug("In PutObjectData. Start data verification %s %s\n", objectType, objectID) @@ -568,9 +608,9 @@ func PutObjectData(orgID string, objectType string, objectID string, dataReader dataVf = dataVerifier.NewDataVerifier(metaData.HashAlgorithm, metaData.PublicKey, metaData.Signature) if success, err := dataVf.VerifyDataSignature(dataReader, orgID, objectType, objectID, ""); !success || err != nil { if trace.IsLogging(logger.ERROR) { - trace.Error("Failed to verify data for object %s %s, remove temp data\n", objectType, objectID) + trace.Error("Failed to verify data for object %s %s, remove unverified data\n", objectType, objectID) } - dataVf.RemoveTempData(orgID, objectType, objectID, "") + dataVf.RemoveUnverifiedData(*metaData) common.ObjectLocks.Unlock(lockIndex) apiObjectLocks.Unlock(lockIndex) return false, &common.InvalidRequest{Message: "Failed to verify and store data, Error: " + err.Error()} @@ -581,10 +621,9 @@ func PutObjectData(orgID string, objectType string, objectID string, dataReader } - // If the data has been verified, then we retrieve the temp data, store in DB, and delete temp data if dataVf != nil { - if err := dataVf.StoreVerifiedData(orgID, objectType, objectID, ""); err != nil { - dataVf.RemoveTempData(orgID, objectType, objectID, "") + // If the data has been verified, then set metadata.DataVerified to true + if err = store.UpdateObjectDataVerifiedStatus(orgID, objectType, objectID, true); err != nil { common.ObjectLocks.Unlock(lockIndex) apiObjectLocks.Unlock(lockIndex) return false, err @@ -638,6 +677,153 @@ func PutObjectData(orgID string, objectType string, objectID string, dataReader return true, nil } +func PutObjectChunkData(orgID string, objectType string, objectID string, dataReader io.Reader, startOffset int64, endOffset int64, totalSize int64) (bool, common.SyncServiceError) { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In PutObjectChunkData. Update data %s %s %s, startOffset: %d, endOffset: %d\n", orgID, objectType, objectID, startOffset, endOffset) + } + + common.HealthStatus.ClientRequestReceived() + + lockIndex := common.HashStrings(orgID, objectType, objectID) + apiObjectLocks.Lock(lockIndex) + common.ObjectLocks.Lock(lockIndex) + + metaData, status, err := store.RetrieveObjectAndStatus(orgID, objectType, objectID) + if err != nil { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return false, err + } + if metaData == nil { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return false, nil + } + if status != common.ReadyToSend && status != common.NotReadyToSend { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return false, &common.InvalidRequest{Message: "Can't update data of the receiving side"} + } + if metaData.NoData { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return false, &common.InvalidRequest{Message: "Can't update data, the NoData flag is set to true"} + } + + isFirstChunk := startOffset == 0 + isLastChunk := false + dataSize := endOffset - startOffset + 1 + + // append Data to temp file/data + isTempData := false + if common.NeedDataVerification(*metaData) { + isTempData = true + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In PutObjectChunkData for %s %s %s, isTempData: %t, isFirstChunk: %t, dataSize to store: %d \n", orgID, objectType, objectID, isTempData, isFirstChunk, dataSize) + } + + if isLastChunk, err = store.AppendObjectData(orgID, objectType, objectID, dataReader, 0, startOffset, totalSize, + isFirstChunk, isLastChunk, isTempData); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Failed to append data for %s %s %s from offset %d to %d, Error: %s\n", orgID, objectType, objectID, startOffset, endOffset, err.Error()) + } + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return false, err + } + + if !isLastChunk { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + + return true, nil + } else { + // Is lastChunk + if isTempData { + // Verify data + if common.NeedDataVerification(*metaData) { + //start data verification + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In PutObjectData. Start data verification %s %s\n", objectType, objectID) + } + // verify data + dataVf := dataVerifier.NewDataVerifier(metaData.HashAlgorithm, metaData.PublicKey, metaData.Signature) + if dr, err := dataVf.GetTempData(*metaData); err != nil { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return false, &common.InvalidRequest{Message: "Failed to get temp data for data verify, Error: " + err.Error()} + } else if success, err := dataVf.VerifyDataSignature(dr, orgID, objectType, objectID, ""); !success || err != nil { + if trace.IsLogging(logger.ERROR) { + trace.Error("Failed to verify data for object %s %s, remove unverified data\n", objectType, objectID) + } + dataVf.RemoveUnverifiedData(*metaData) + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + errMsg := "" + if err != nil && trace.IsLogging(logger.ERROR) { + errMsg = err.Error() + trace.Error("Failed to verify data for object %s %s, Error: %s\n", objectType, objectID, errMsg) + } + + return false, &common.InvalidRequest{Message: "Failed to verify and store data, Error: " + errMsg} + } + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In PutObjectChunkData. data verified for object %s %s\n", objectType, objectID) + } + + // If the data has been verified, then set metadata.DataVerified to true + if err = store.UpdateObjectDataVerifiedStatus(orgID, objectType, objectID, true); err != nil { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return false, err + } + + } + } else { + // handle object info (update metadata.ObjectSize, metadata.InstanceId, metaData.DataId and object status from notReady to Ready), because Store.AppendObjectData will not modify those object info + if _, err := store.HandleObjectInfoForLastDataChunk(orgID, objectType, objectID, false, totalSize); err != nil { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return false, err + } + } + + var updatedMetaData *common.MetaData + // StoreObject increments the instance id if this is a data update, we need to fetch the updated meta data + // Also, StoreObjectData updates the ObjectSize, so we need to fetch the updated meta data + updatedMetaData, err = store.RetrieveObject(orgID, objectType, objectID) + if err != nil { + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return false, err + } + + if updatedMetaData.Inactive { + // Don't send inactive objects to the other side + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + return true, nil + } + + // Should be in antoher thread + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In PutObjectChunkData. Send object to objectQueue %s %s\n", objectType, objectID) + } + + common.ObjectLocks.Unlock(lockIndex) + apiObjectLocks.Unlock(lockIndex) + objectInQueue := common.ObjectInQueue{NotificationAction: common.Update, NotificationType: common.TypeObject, Object: *updatedMetaData, Destinations: []common.StoreDestinationStatus{}} + objectQueue.SendObjectToQueue(objectInQueue) + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In PutObjectChunkData. Return response for PutObjectChunkData %s %s\n", objectType, objectID) + } + return true, nil + } +} + // ObjectConsumed is used when an app indicates that it consumed the object // Send "consumed" notification to the object's origin // Call the storage module to mark the object as consumed diff --git a/core/base/apiModule_test.go b/core/base/apiModule_test.go index 09699ab..281725e 100644 --- a/core/base/apiModule_test.go +++ b/core/base/apiModule_test.go @@ -32,7 +32,6 @@ func setupDB(dbType string) { } else if dbType == common.Bolt { dir, _ := os.Getwd() common.Configuration.PersistenceRootPath = dir + "/persist" - fmt.Printf("common.Configuration.PersistenceRootPath: %s\n", common.Configuration.PersistenceRootPath) boltStore := &storage.BoltStorage{} boltStore.Cleanup(true) store = boltStore @@ -109,6 +108,7 @@ func testObjectAPI(store storage.Storage, t *testing.T) { dataVerifier.Store = store common.InitObjectLocks() + common.InitObjectDownloadSemaphore() dests := []string{"device:dev1", "device2:dev", "device2:dev1"} @@ -422,7 +422,7 @@ func testObjectAPI(store storage.Storage, t *testing.T) { } // Get data - dataReader, err := store.RetrieveObjectData(row.orgID, row.objectType, row.objectID) + dataReader, err := store.RetrieveObjectData(row.orgID, row.objectType, row.objectID, false) if err != nil { t.Errorf("An error occurred in data fetch (objectID = %s). Error: %s", row.objectID, err.Error()) } @@ -619,7 +619,7 @@ func testObjectAPI(store storage.Storage, t *testing.T) { key := fmt.Sprintf("%s/%s/%s", row.orgID, row.objectType, row.objectID) metaInstanceIdMap[key] = instance - ok, err := PutObjectData(row.orgID, row.objectType, row.objectID, bytes.NewReader(row.newData)) + ok, err := PutObjectAllData(row.orgID, row.objectType, row.objectID, bytes.NewReader(row.newData)) if err != nil { if !row.metaData.NoData { t.Errorf("Failed to update object's data (objectID = %s). Error: %s", row.objectID, err.Error()) @@ -697,7 +697,6 @@ func testObjectAPI(store storage.Storage, t *testing.T) { } } - //fmt.Printf("for object %s/%s/%s, notification.InstanceID: %d, instance: %d\n", row.orgID, row.objectType, row.objectID, notification.InstanceID, instance) if row.expectedStatus == common.ReadyToSend && notification.InstanceID <= instance { if i == MAX_RETRY-1 { t.Errorf("Wrong instance ID in notification after data update: %d should be greater than %d (objectID = %s)", @@ -807,6 +806,7 @@ func testESSObjectDeletedAPI(store storage.Storage, t *testing.T) { communications.Store = store dataVerifier.Store = store common.InitObjectLocks() + common.InitObjectDownloadSemaphore() if err := store.Init(); err != nil { t.Errorf("Failed to initialize storage driver. Error: %s\n", err.Error()) @@ -902,6 +902,7 @@ func TestObjectDestinationsAPI(t *testing.T) { func testObjectDestinationsAPI(store storage.Storage, t *testing.T) { communications.Store = store common.InitObjectLocks() + common.InitObjectDownloadSemaphore() if err := store.Init(); err != nil { t.Errorf("Failed to initialize storage driver. Error: %s\n", err.Error()) @@ -1330,6 +1331,7 @@ func testObjectWithPolicyAPI(store storage.Storage, t *testing.T) { communications.Store = store common.InitObjectLocks() + common.InitObjectDownloadSemaphore() if err := store.Init(); err != nil { t.Errorf("Failed to initialize storage driver. Error: %s\n", err.Error()) @@ -1414,6 +1416,7 @@ func testObjectWithPolicyAPI(store storage.Storage, t *testing.T) { } common.InitObjectLocks() + common.InitObjectDownloadSemaphore() for _, destination := range destinations { if err := store.StoreDestination(destination); err != nil { @@ -1426,7 +1429,6 @@ func testObjectWithPolicyAPI(store storage.Storage, t *testing.T) { err := store.DeleteStoredObject(test.metaData.DestOrgID, test.metaData.ObjectType, test.metaData.ObjectID) if err != nil { t.Errorf("Failed to delete object (objectID = %s). Error: %s\n", test.metaData.ObjectID, err.Error()) - fmt.Printf("Error: %#v\n", err) } // Insert if err := UpdateObject(test.metaData.DestOrgID, test.metaData.ObjectType, test.metaData.ObjectID, @@ -1473,7 +1475,7 @@ func testObjectWithPolicyAPI(store storage.Storage, t *testing.T) { } if test.data != nil { - ok, err := PutObjectData(test.metaData.DestOrgID, test.metaData.ObjectType, test.metaData.ObjectID, bytes.NewReader(test.data)) + ok, err := PutObjectAllData(test.metaData.DestOrgID, test.metaData.ObjectType, test.metaData.ObjectID, bytes.NewReader(test.data)) if !ok || err != nil { t.Errorf("Failed to update object's data (objectID = %s). Error: %s", test.metaData.ObjectID, err.Error()) } diff --git a/core/base/apiServer.go b/core/base/apiServer.go index 01a9f85..59a172c 100644 --- a/core/base/apiServer.go +++ b/core/base/apiServer.go @@ -2154,7 +2154,26 @@ func handleObjectPutData(orgID string, objectType string, objectID string, write writer.Write(unauthorizedBytes) return } - if found, err := PutObjectData(orgID, objectType, objectID, request.Body); err == nil { + + totalSize, startOffset, endOffset, err := common.GetStartAndEndRangeFromContentRangeHeader(request) + if err != nil { + reqErr := &common.InvalidRequest{Message: fmt.Sprintf("Failed to parse Content-Range header, Error: %s", err.Error())} + communications.SendErrorResponse(writer, reqErr, "", 0) + return + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In handleObjectPutData. TotalSize: %d, startOffset: %d, endOffset: %d\n", totalSize, startOffset, endOffset) + } + + var found bool + if totalSize == 0 && startOffset == -1 && endOffset == -1 { + found, err = PutObjectAllData(orgID, objectType, objectID, request.Body) + } else { + found, err = PutObjectChunkData(orgID, objectType, objectID, request.Body, startOffset, endOffset, totalSize) + } + + if err == nil { if !found { writer.WriteHeader(http.StatusNotFound) } else { diff --git a/core/base/apiServer_test.go b/core/base/apiServer_test.go index 21dd2a0..c17e34c 100644 --- a/core/base/apiServer_test.go +++ b/core/base/apiServer_test.go @@ -1403,6 +1403,7 @@ func testAPIServerSetup(nodeType string, storageType string) string { } common.InitObjectLocks() + common.InitObjectDownloadSemaphore() security.SetAuthentication(&security.TestAuthenticate{}) security.Store = store diff --git a/core/base/base.go b/core/base/base.go index b93a538..2b688fb 100644 --- a/core/base/base.go +++ b/core/base/base.go @@ -145,14 +145,12 @@ func Start(swaggerFile string, registerHandlers bool) common.SyncServiceError { if common.Configuration.NodeType == common.ESS { common.Registered = false - if common.Configuration.CommunicationProtocol == common.HTTPProtocol { - go communication.Register() - } } common.ResendAcked = true common.InitObjectLocks() + common.InitObjectDownloadSemaphore() // storage, lock should be setup before initialize objectQueue queueBufferSize := common.Configuration.ObjectQueueBufferSize diff --git a/core/communications/communicationWrapper.go b/core/communications/communicationWrapper.go index e8188b8..6d5e730 100644 --- a/core/communications/communicationWrapper.go +++ b/core/communications/communicationWrapper.go @@ -171,6 +171,15 @@ func (communication *Wrapper) GetData(metaData common.MetaData, offset int64) co return comm.GetData(metaData, offset) } +// PushData uploade data to from ESS to CSS +func (communication *Wrapper) PushData(metaData *common.MetaData, offset int64) common.SyncServiceError { + comm, err := communication.selectCommunicator("", metaData.DestOrgID, metaData.OriginType, metaData.OriginID) + if err != nil { + return err + } + return comm.PushData(metaData, offset) +} + // SendData sends data from the CSS to the ESS or from the ESS to the CSS func (communication *Wrapper) SendData(orgID string, destType string, destID string, message []byte, chunked bool) common.SyncServiceError { comm, err := communication.selectCommunicator("", orgID, destType, destID) diff --git a/core/communications/communicator.go b/core/communications/communicator.go index 06578fa..1ec47a0 100644 --- a/core/communications/communicator.go +++ b/core/communications/communicator.go @@ -2,6 +2,7 @@ package communications import ( "bytes" + "io" "net/http" "strings" "time" @@ -54,6 +55,9 @@ type Communicator interface { // GetData requests data to be sent from the CSS to the ESS or from the ESS to the CSS GetData(metaData common.MetaData, offset int64) common.SyncServiceError + // PushData uploade data to from ESS to CSS + PushData(metaData *common.MetaData, offset int64) common.SyncServiceError + // SendData sends data from the CSS to the ESS or from the ESS to the CSS SendData(orgID string, destType string, destID string, message []byte, chunked bool) common.SyncServiceError @@ -85,6 +89,22 @@ func (e *Error) Error() string { return e.message } +type dataTransportTimeOutError struct { + message string +} + +func (e *dataTransportTimeOutError) Error() string { + if e.message == "" { + return "Download timeout" + } + return e.message +} + +func isDataTransportTimeoutError(err error) bool { + _, ok := err.(*dataTransportTimeOutError) + return ok +} + // ignoredByHandler error is returned if a notification is ignored by the notification handler type ignoredByHandler struct { message string @@ -115,6 +135,8 @@ var DestReqQueue *DestinationRequestQueue func SendErrorResponse(writer http.ResponseWriter, err error, message string, statusCode int) { if statusCode == 0 { switch err.(type) { + case *dataTransportTimeOutError: + statusCode = http.StatusGatewayTimeout case *common.InvalidRequest: statusCode = http.StatusBadRequest case *storage.Error: @@ -147,7 +169,11 @@ func SendErrorResponse(writer http.ResponseWriter, err error, message string, st func IsTransportError(pResp *http.Response, err error) bool { if err != nil { - if strings.Contains(err.Error(), ": EOF") { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return true + } + + if strings.Contains(err.Error(), " EOF") { return true } @@ -167,7 +193,10 @@ func IsTransportError(pResp *http.Response, err error) bool { // 504: gateway timeout return true } else if pResp.StatusCode == http.StatusServiceUnavailable { - //503: service unavailable + // 503: service unavailable + return true + } else if pResp.StatusCode == http.StatusTooManyRequests { + // 429: too many requests return true } } diff --git a/core/communications/httpCommunication.go b/core/communications/httpCommunication.go index cc8b622..bf0ac0c 100644 --- a/core/communications/httpCommunication.go +++ b/core/communications/httpCommunication.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "net/http" "os" "strconv" @@ -33,11 +34,13 @@ var unauthorizedBytes = []byte("Unauthorized") // HTTP is the struct for the HTTP communications layer type HTTP struct { - httpClient http.Client - started bool - httpPollTimer *time.Timer - httpPollStopChannel chan int - requestWrapper *httpRequestWrapper + httpClient http.Client + httpObjectDownloadClient http.Client + started bool + httpPollTimer *time.Timer + httpPollStopChannel chan int + requestWrapper *httpRequestWrapper + objDownloadRequestWrapper *httpRequestWrapper } type updateMessage struct { @@ -60,6 +63,10 @@ func (communication *HTTP) StartCommunication() common.SyncServiceError { http.Handle(pingURL, http.StripPrefix(pingURL, http.HandlerFunc(communication.handlePing))) http.Handle(objectRequestURL, http.StripPrefix(objectRequestURL, http.HandlerFunc(communication.handleObjects))) } else { + communication.httpObjectDownloadClient = http.Client{ + Transport: &http.Transport{}, + Timeout: time.Second * time.Duration(common.Configuration.HTTPESSObjClientTimeout), + } communication.httpClient = http.Client{ Transport: &http.Transport{}, Timeout: time.Second * time.Duration(common.Configuration.HTTPESSClientTimeout), @@ -85,9 +92,11 @@ func (communication *HTTP) StartCommunication() common.SyncServiceError { caCertPool.AppendCertsFromPEM(certificate) tlsConfig := &tls.Config{RootCAs: caCertPool} communication.httpClient.Transport = &http.Transport{TLSClientConfig: tlsConfig} + communication.httpObjectDownloadClient.Transport = &http.Transport{TLSClientConfig: tlsConfig} } communication.httpPollStopChannel = make(chan int, 1) communication.requestWrapper = newHTTPRequestWrapper(communication.httpClient) + communication.objDownloadRequestWrapper = newHTTPRequestWrapper(communication.httpObjectDownloadClient) } communication.started = true @@ -136,14 +145,15 @@ func (communication *HTTP) StopCommunication() common.SyncServiceError { } communication.requestWrapper.cancel() + communication.objDownloadRequestWrapper.cancel() return nil } // HandleRegAck handles a registration acknowledgement message from the CSS func (communication *HTTP) HandleRegAck() { - if trace.IsLogging(logger.TRACE) { - trace.Trace("Received regack") + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Received regack") } communication.startPolling() } @@ -200,6 +210,9 @@ func (communication *HTTP) handleGetUpdates(writer http.ResponseWriter, request } for _, n := range notifications { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Get notification %s %s %s %s %s, status is %s\n", n.DestOrgID, n.ObjectType, n.ObjectID, n.DestType, n.DestID, n.Status) + } metaData, err := Store.RetrieveObject(n.DestOrgID, n.ObjectType, n.ObjectID) if err != nil { message := fmt.Sprintf("Error in handleGetUpdates. Error: %s\n", err) @@ -308,7 +321,7 @@ func (communication *HTTP) SendNotificationMessage(notificationTopic string, des response, err = communication.requestWrapper.do(request) if response != nil && response.Body != nil { - defer response.Body.Close() + response.Body.Close() } if IsTransportError(response, err) { @@ -335,17 +348,44 @@ func (communication *HTTP) SendNotificationMessage(notificationTopic string, des if response.StatusCode == http.StatusNoContent { switch notificationTopic { case common.Update: - // Push the data - if metaData.Link == "" && !metaData.NoData && !metaData.MetaOnly { - if err = communication.pushData(metaData); err != nil { - return err - } - } // Mark updated if err = handleObjectUpdated(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, destType, destID, instanceID, dataID); err != nil { return err } + + // Push the data + if metaData.Link == "" && !metaData.NoData && !metaData.MetaOnly { + if metaData.ChunkSize <= 0 || metaData.ObjectSize <= 0 || !common.Configuration.EnableDataChunk { + if err := communication.PushData(metaData, 0); err != nil { + return err + } + } else { + lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + communication.LockDataChunks(lockIndex, metaData) + var offset int64 + for offset < metaData.ObjectSize { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("(i=%d)pushData from offset: %d\n", i, offset) + } + if err := communication.PushData(metaData, offset); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Receive error from PushData, offset %d, Error: %s\n", offset, err.Error()) + } + + if !isDataTransportTimeoutError(err) { + communication.UnlockDataChunks(lockIndex, metaData) + return err + } + } + // If received data transport timeout error, this for loop will continue with next offset without throwing an error. + // ResendNotification will push the chunks with data transport error + offset += int64(metaData.ChunkSize) + } + communication.UnlockDataChunks(lockIndex, metaData) + } + } + case common.Delete: return handleAckDelete(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, destType, destID, instanceID, dataID) @@ -638,21 +678,64 @@ func (communication *HTTP) GetData(metaData common.MetaData, offset int64) commo trace.Trace("In http.GetData %s %s", metaData.ObjectType, metaData.ObjectID) } - // For debugging + if !common.Configuration.EnableDataChunk || metaData.ChunkSize == 0 || metaData.ObjectSize == 0 || int64(metaData.ChunkSize) >= metaData.ObjectSize { + if err := communication.GetAllData(metaData, 0); err != nil { + return err + } + } else { + if err := communication.GetDataByChunk(metaData, offset); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Receive error from GetDataByChunk, offset %d, Error: %s\n", offset, err.Error()) + } + + if !isDataTransportTimeoutError(err) { + return err + } + } + } + + return nil +} + +func (communication *HTTP) GetAllData(metaData common.MetaData, offset int64) common.SyncServiceError { + lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + + common.ObjectLocks.Lock(lockIndex) if trace.IsLogging(logger.DEBUG) { - trace.Debug("In http.GetData, retrieve notification %s, %s. %s, %s, %s", metaData.DestID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID) + trace.Debug("In http.GetAllData, retrieve notification %s, %s. %s, %s, %s", metaData.DestID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID) + } + if n, err := Store.RetrieveNotificationRecord(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Error when retrieve notification record, %s", err.Error()) + } + common.ObjectLocks.Unlock(lockIndex) + return err + } else if n != nil && metaData.InstanceID < n.InstanceID { + trace.Debug("In GetAllData: metaData instance ID (%d) < notification instance ID (%d), ignore...", metaData.InstanceID, n.InstanceID) + common.ObjectLocks.Unlock(lockIndex) + return nil + } else if n != nil { + trace.Debug("In GetAllData: notification status %s", n.Status) + } - if n, err := Store.RetrieveNotificationRecord(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID); err != nil { - trace.Debug("Error when retrieve notification record, %s", err.Error()) - } else if n == nil { - trace.Debug("In GetData: nil notifications") - } else { - trace.Debug("In GetData: notification status %s", n.Status) + if obj, objStatus, err := Store.RetrieveObjectAndStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Error when retrieve object, %s", err.Error()) } - trace.Debug("In http.GetData, updating notification %s, %s. %s, %s, %s to getdata status", metaData.DestID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID) + common.ObjectLocks.Unlock(lockIndex) + return err + } else if obj != nil && objStatus == common.CompletelyReceived { + trace.Debug("In GetAllData: object (%s %s %s) is already completely received, ignore...", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + common.ObjectLocks.Unlock(lockIndex) + return nil + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In http.GetAllData, updating notification %s, %s. %s, %s, %s to getdata status", metaData.DestID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID) } if err := updateGetDataNotification(metaData, metaData.OriginType, metaData.OriginID, offset); err != nil { + common.ObjectLocks.Unlock(lockIndex) return err } @@ -667,6 +750,7 @@ func (communication *HTTP) GetData(metaData common.MetaData, offset int64) commo trace.Debug("Notification status is %s after updating", n.Status) } } + common.ObjectLocks.Unlock(lockIndex) url := buildObjectURL(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.InstanceID, metaData.DataID, common.Data) request, err := http.NewRequest("GET", url, nil) @@ -676,10 +760,27 @@ func (communication *HTTP) GetData(metaData common.MetaData, offset int64) commo security.AddIdentityToSPIRequest(request, url) request.Close = true - response, err := communication.requestWrapper.do(request) + response, err := communication.objDownloadRequestWrapper.do(request) if response != nil && response.Body != nil { defer response.Body.Close() } + + if IsTransportError(response, err) { + msg := "Timeout in GetAllData: failed to receive data from the other side" + if err != nil { + msg = fmt.Sprintf("%s. Error: %s", msg, err.Error()) + } + + if response != nil { + msg = fmt.Sprintf("%s. Response code: %d", msg, response.StatusCode) + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("%s", msg) + } + return &dataTransportTimeOutError{msg} + } + if err != nil { return &Error{"Error in GetData: failed to get data. Error: " + err.Error()} } @@ -691,46 +792,50 @@ func (communication *HTTP) GetData(metaData common.MetaData, offset int64) commo return ¬ificationHandlerError{msg} } - lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) common.ObjectLocks.Lock(lockIndex) var dataVf *dataVerifier.DataVerifier - if common.IsValidHashAlgorithm(metaData.HashAlgorithm) && metaData.PublicKey != "" && metaData.Signature != "" { + if common.NeedDataVerification(metaData) { dataVf = dataVerifier.NewDataVerifier(metaData.HashAlgorithm, metaData.PublicKey, metaData.Signature) if dataVerified, err := dataVf.VerifyDataSignature(response.Body, metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI); !dataVerified || err != nil { if log.IsLogging(logger.ERROR) { log.Error("Failed to verify data for object %s %s, remove temp data\n", metaData.ObjectType, metaData.ObjectID) } - dataVf.RemoveTempData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI) + dataVf.RemoveUnverifiedData(metaData) common.ObjectLocks.Unlock(lockIndex) return err } + } else { + // Directly store the data + if metaData.DestinationDataURI != "" { + if _, err := dataURI.StoreData(metaData.DestinationDataURI, response.Body, 0); err != nil { + common.ObjectLocks.Unlock(lockIndex) + return err + } + } else { + found, err := Store.StoreObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, response.Body) + if err != nil { + common.ObjectLocks.Unlock(lockIndex) + return err + } else if !found { + common.ObjectLocks.Unlock(lockIndex) + return &Error{"Failed to store object's data."} + } + } } - if dataVf != nil { - if err := dataVf.StoreVerifiedData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI); err != nil { - dataVf.RemoveTempData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI) - common.ObjectLocks.Unlock(lockIndex) - return err - } - } else if metaData.DestinationDataURI != "" { - if _, err := dataURI.StoreData(metaData.DestinationDataURI, response.Body, 0); err != nil { - common.ObjectLocks.Unlock(lockIndex) - return err - } - } else { - found, err := Store.StoreObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, response.Body) - if err != nil { - common.ObjectLocks.Unlock(lockIndex) - return err - } else if !found { - common.ObjectLocks.Unlock(lockIndex) - return &Error{"Failed to store object's data."} + // set metadata.DataVerified = true + if err = Store.UpdateObjectDataVerifiedStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, true); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Failed to update metadata.DataVerified to true for object %s %s\n", metaData.ObjectType, metaData.ObjectID) } + common.ObjectLocks.Unlock(lockIndex) + return err } if trace.IsLogging(logger.DEBUG) { - trace.Debug("Updating ESS object status to completelyReceived for %s %s %s", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + trace.Debug("Updated object DataVerified to true for %s %s %s", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + trace.Debug("Updating ESS object status to completelyReceived for %s %s %s...", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) } if err := Store.UpdateObjectStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, common.CompletelyReceived); err != nil { common.ObjectLocks.Unlock(lockIndex) @@ -744,6 +849,10 @@ func (communication *HTTP) GetData(metaData common.MetaData, offset int64) commo common.ObjectLocks.Unlock(lockIndex) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Updating ESS object status to completelyReceived for %s %s %s", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + } + notificationsInfo, err := PrepareObjectStatusNotification(metaData, common.Received) if err != nil { return err @@ -758,6 +867,255 @@ func (communication *HTTP) GetData(metaData common.MetaData, offset int64) commo return nil } +func (communication *HTTP) GetDataByChunk(metaData common.MetaData, offset int64) common.SyncServiceError { + lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + common.ObjectLocks.Lock(lockIndex) + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In http.GetDataByChunk for %s %s %s, offset: %d, object size: %d, chunk size: %d\n", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, offset, metaData.ObjectSize, metaData.ChunkSize) + } + if n, err := Store.RetrieveNotificationRecord(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Error when retrieve notification record, %s", err.Error()) + } + common.ObjectLocks.Unlock(lockIndex) + return err + } else if n != nil && metaData.InstanceID < n.InstanceID { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In GetDataByChunk: metaData instance ID (%d) < notification instance ID (%d), ignore...", metaData.InstanceID, n.InstanceID) + } + common.ObjectLocks.Unlock(lockIndex) + return nil + } else if n != nil && n.Status == common.ReceiverError && metaData.InstanceID <= n.InstanceID { + if trace.IsLogging(logger.DEBUG) { + // If object notification is already "receiverError", only the new metaData can overrite the notification status to getdata + trace.Debug("In GetDataByChunk: notification status is %s, and metaData instance ID (%d) <= notification instance ID (%d), ignore...", n.Status, metaData.InstanceID, n.InstanceID) + } + common.ObjectLocks.Unlock(lockIndex) + return nil + } else if n != nil { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In GetDataByChunk: notification status %s", n.Status) + } + } + + if obj, objStatus, err := Store.RetrieveObjectAndStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Error when retrieve object, %s", err.Error()) + } + common.ObjectLocks.Unlock(lockIndex) + return err + } else if obj != nil && obj.InstanceID == metaData.InstanceID && objStatus == common.CompletelyReceived { + trace.Debug("In GetDataByChunk: object (%s %s %s) is already completely received, ignore...", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + common.ObjectLocks.Unlock(lockIndex) + return nil + } + + if err := updateGetDataNotification(metaData, metaData.OriginType, metaData.OriginID, offset); err != nil { + common.ObjectLocks.Unlock(lockIndex) + return err + } + + // now the ESS notification status is "getdata" + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Checking notifications after updating notification status") + if n, err := Store.RetrieveNotificationRecord(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID); err != nil { + trace.Debug("Error when retrieve notification record, %s\n", err.Error()) + } else if n == nil { + trace.Debug("Nil notifications") + } else { + trace.Debug("Notification status is %s after updating", n.Status) + } + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In http.GetDataByChunk, for %s %s %s, check if current chunk with offset %d will be the last chunk\n", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, offset) + } + + // Now check if this is going to be the last chunk + total, chunkAlreadyReceived, err := checkNotificationRecord(metaData, metaData.OriginType, metaData.OriginID, metaData.InstanceID, + common.Getdata, offset) + if err != nil { + // This notification doesn't match the existing notification record, ignore + if trace.IsLogging(logger.INFO) { + trace.Info("Ignoring data of %s %s (%s)\n", metaData.ObjectType, metaData.ObjectID, err.Error()) + } + common.ObjectLocks.Unlock(lockIndex) + return ¬ificationHandlerError{fmt.Sprintf("Error in handleData: checkNotificationRecord failed. Error: %s\n", err.Error())} + } + common.ObjectLocks.Unlock(lockIndex) + + isFirstChunk := total == 0 + isLastChunk := false + if !chunkAlreadyReceived && total+int64(metaData.ChunkSize) >= metaData.ObjectSize { + isLastChunk = true + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In http.GetDataByChunk, for %s %s %s with offset %d, isFirstChunk: %t, isLastCHunk: %t\n", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, offset, isFirstChunk, isLastChunk) + } + + url := buildObjectURL(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.InstanceID, metaData.DataID, common.Data) + request, err := http.NewRequest("GET", url, nil) + if err != nil { + return &Error{"Failed to create data request. Error: " + err.Error()} + } + security.AddIdentityToSPIRequest(request, url) + + // add offset to header + var rangeHeader string + if offset+int64(metaData.ChunkSize)-1 > metaData.ObjectSize { + rangeHeader = fmt.Sprintf("bytes=%s-%s", strconv.FormatInt(offset, 10), strconv.FormatInt(metaData.ObjectSize-1, 10)) + } else { + rangeHeader = fmt.Sprintf("bytes=%s-%s", strconv.FormatInt(offset, 10), strconv.FormatInt(offset+int64(metaData.ChunkSize)-1, 10)) + } + request.Header.Add("Range", rangeHeader) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In GetDataByChunk, Add Range header to the request: %s\n", request.Header.Get("Range")) + } + if isLastChunk { + request.Close = true + } + + response, err := communication.objDownloadRequestWrapper.do(request) + if response != nil && response.Body != nil { + defer response.Body.Close() + } + if IsTransportError(response, err) { + msg := fmt.Sprintf("In interrupted network during GetDataByChunk, for %s %s, offset: %d\n", metaData.ObjectType, metaData.ObjectID, offset) + if err != nil { + msg = fmt.Sprintf("%s. Error: %s", msg, err.Error()) + } + + if response != nil { + msg = fmt.Sprintf("%s. Response code: %d", msg, response.StatusCode) + } + if log.IsLogging(logger.ERROR) { + log.Error("%s", msg) + } + return &dataTransportTimeOutError{msg} + + } + if err != nil { + return &Error{"Error in GetDataByChunk: failed to get data. Error: " + err.Error()} + } + if response.StatusCode == http.StatusNotFound { + return &common.NotFound{} + } + + if response.StatusCode == http.StatusConflict { + // ignored by CSS + return nil + } + if response.StatusCode != http.StatusPartialContent && response.StatusCode != http.StatusOK { + msg := fmt.Sprintf("Error in GetDataByChunk: failed to receive data from the other side. Error code: %d, ", response.StatusCode) + return ¬ificationHandlerError{msg} + } + + common.ObjectLocks.Lock(lockIndex) + + // extract dataLengh from response header + dataLengthInString := response.Header.Get("Content-Length") + dataLength, err := strconv.ParseUint(dataLengthInString, 10, 32) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In GetDataByChunk, Content-Length header in response is %s, dataLength is %d\n", response.Header.Get("Content-Length"), dataLength) + } + + if err != nil { + common.ObjectLocks.Unlock(lockIndex) + return &Error{"Failed to extract Content-Length from response header. Error: " + err.Error()} + } + + isTempData := false + if dataLength != 0 { + if common.NeedDataVerification(metaData) { + isTempData = true + } + + if metaData.DestinationDataURI != "" { + _, err = dataURI.AppendData(metaData.DestinationDataURI, response.Body, uint32(dataLength), offset, metaData.ObjectSize, + isFirstChunk, isLastChunk, isTempData) + } else { + _, err = Store.AppendObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, response.Body, uint32(dataLength), offset, metaData.ObjectSize, + isFirstChunk, isLastChunk, isTempData) + } + + if err != nil { + common.ObjectLocks.Unlock(lockIndex) + if log.IsLogging(logger.ERROR) { + log.Error("In interrupted network while appending object data, will try again to download data for this chunk for %s %s. Error: %s\n", metaData.ObjectType, metaData.ObjectID, err.Error()) + } + msg := "Interrupted network during appending object data" + return &dataTransportTimeOutError{msg} + + } + + if isLastChunk && isTempData { + // verify data + dataVf := dataVerifier.NewDataVerifier(metaData.HashAlgorithm, metaData.PublicKey, metaData.Signature) + if dr, err := dataVf.GetTempData(metaData); err != nil { + common.ObjectLocks.Unlock(lockIndex) + return err + } else if success, err := dataVf.VerifyDataSignature(dr, metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI); !success || err != nil { + // remove temp data + dataVf.RemoveUnverifiedData(metaData) + common.ObjectLocks.Unlock(lockIndex) + return err + } + + // set metadata.DataVerified = true + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Updated object DataVerified to true for %s %s %s", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + } + + if err = Store.UpdateObjectDataVerifiedStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, true); err != nil { + common.ObjectLocks.Unlock(lockIndex) + return err + } + } + } + + if _, err := handleChunkReceived(metaData, offset, int64(dataLength), false); err != nil { + common.ObjectLocks.Unlock(lockIndex) + return ¬ificationHandlerError{"Error in handleData: handleChunkReceived failed. Error: " + err.Error()} + } + + if !isLastChunk { + common.ObjectLocks.Unlock(lockIndex) + } else { + removeNotificationChunksInfo(metaData, metaData.OriginType, metaData.OriginID) + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Updating ESS object status to completelyReceived for %s %s %s", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + } + if err := Store.UpdateObjectStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, common.CompletelyReceived); err != nil { + common.ObjectLocks.Unlock(lockIndex) + return &Error{fmt.Sprintf("Error in GetDataByChunk: %s\n", err)} + } + + common.ObjectLocks.Unlock(lockIndex) + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Prepare %s notification and send to CSS for object %s %s %s", common.Received, metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + } + + notificationsInfo, err := PrepareObjectStatusNotification(metaData, common.Received) + if err != nil { + return err + } + + // Send "received" notification + if err := SendNotifications(notificationsInfo); err != nil { + return err + } + + callWebhooks(&metaData) + + } + + return nil +} + // SendData sends data from the CSS to the ESS or from the ESS to the CSS func (communication *HTTP) SendData(orgID string, destType string, destID string, message []byte, chunked bool) common.SyncServiceError { return nil @@ -792,8 +1150,8 @@ func (communication *HTTP) Poll() bool { } if response.StatusCode == http.StatusNoContent { - if trace.IsLogging(logger.TRACE) { - trace.Trace("Polled the CSS, received 0 objects.\n") + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Polled the CSS, received 0 objects.\n") } return false } @@ -814,14 +1172,16 @@ func (communication *HTTP) Poll() bool { return false } - if trace.IsLogging(logger.TRACE) { - trace.Trace("Polled the CSS, received %d objects.\n", len(payload)) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Polled the CSS, received %d objects.\n", len(payload)) } for _, message := range payload { switch message.Type { case common.Update: - if err = handleUpdate(message.MetaData, 1); err != nil { + // For httpCommunication, we don't need maxInFlightChunks to control data chunk, so give it a large number + httpMaxInFlightChunks := math.MaxInt64 + if err = handleUpdate(message.MetaData, httpMaxInFlightChunks); err != nil { if isIgnoredByHandler(err) { if log.IsLogging(logger.DEBUG) { log.Error("Ignore handler error, ignore for %s %s %s %d", message.MetaData.DestOrgID, message.MetaData.ObjectType, message.MetaData.ObjectID, message.MetaData.InstanceID) @@ -965,7 +1325,8 @@ func (communication *HTTP) handleObjects(writer http.ResponseWriter, request *ht err = extractErr } else { metaData.OwnerID = orgID + "/" + destID - err = handleUpdate(*metaData, 1) + // ESS calls PUT object to update, maxInflightChunks set to 0 will not change any behavior + err = handleUpdate(*metaData, 0) } case common.Updated: err = handleObjectUpdated(orgID, objectType, objectID, destType, destID, instanceID, dataID) @@ -975,6 +1336,8 @@ func (communication *HTTP) handleObjects(writer http.ResponseWriter, request *ht err = handleAckConsumed(orgID, objectType, objectID, destType, destID, instanceID, dataID) case common.Received: err = handleObjectReceived(orgID, objectType, objectID, destType, destID, instanceID, dataID) + case common.AckReceived: + err = handleAckObjectReceived(orgID, objectType, objectID, destType, destID, instanceID, dataID) case common.Feedback: payload := feedbackMessage{} if err = json.NewDecoder(request.Body).Decode(&payload); err == nil { @@ -1054,78 +1417,205 @@ func (communication *HTTP) handlePutData(orgID string, objectType string, object return err } + totalSize, startOffset, endOffset, err := common.GetStartAndEndRangeFromContentRangeHeader(request) + if err != nil { + common.ObjectLocks.Unlock(lockIndex) + return &common.InvalidRequest{Message: fmt.Sprintf("Failed to parse Content-Range header, Error: %s", err.Error())} + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("totalSize: %d, startOffset: %d, endOffset: %d\n", totalSize, startOffset, endOffset) + } + + isLastChunk := false + var handlErr common.SyncServiceError + if totalSize == 0 && startOffset == -1 && endOffset == -1 { + //no Content-Range header, return all data + if isLastChunk, handlErr = communication.handlePutAllData(*metaData, request); err != nil { + common.ObjectLocks.Unlock(lockIndex) + return handlErr + } + } else { + // return data by range + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Get Content-Range header, will handle put chunked data") + } + if isLastChunk, handlErr = communication.handlePutChunkedData(*metaData, request, startOffset, endOffset, totalSize); err != nil { + common.ObjectLocks.Unlock(lockIndex) + return handlErr + } + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In handlePutData, isLastChunk is %t\n", isLastChunk) + } + + if isLastChunk { + if metaData, err := Store.RetrieveObject(orgID, objectType, objectID); err == nil && metaData != nil { + handleDataReceived(*metaData) + common.ObjectLocks.Unlock(lockIndex) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In handlePutData, prepare %s notification for %s %s %s %s %s %s\n", common.Received, orgID, objectType, objectID, metaData.DestOrgID, metaData.DestType, metaData.DestID) + } + notificationsInfo, err := PrepareObjectStatusNotification(*metaData, common.Received) + + if err != nil { + return err + } + if err := SendNotifications(notificationsInfo); err != nil { + return err + } + + callWebhooks(metaData) + } else { + common.ObjectLocks.Unlock(lockIndex) + return &common.InvalidRequest{Message: "Failed to find object to set data"} + } + } else { + common.ObjectLocks.Unlock(lockIndex) + } + return nil +} + +func (communication *HTTP) handlePutAllData(metaData common.MetaData, request *http.Request) (bool, common.SyncServiceError) { var dataVf *dataVerifier.DataVerifier - if common.IsValidHashAlgorithm(metaData.HashAlgorithm) && metaData.PublicKey != "" && metaData.Signature != "" { + if common.NeedDataVerification(metaData) { dataVf = dataVerifier.NewDataVerifier(metaData.HashAlgorithm, metaData.PublicKey, metaData.Signature) if dataVerified, err := dataVf.VerifyDataSignature(request.Body, metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI); !dataVerified || err != nil { if log.IsLogging(logger.ERROR) { - log.Error("Failed to verify data for object %s %s, remove temp data\n", metaData.ObjectType, metaData.ObjectID) + log.Error("Failed to verify data for object %s %s, remove unverified data\n", metaData.ObjectType, metaData.ObjectID) } - dataVf.RemoveTempData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI) - common.ObjectLocks.Unlock(lockIndex) - return err + dataVf.RemoveUnverifiedData(metaData) + return true, err } } if dataVf != nil { - if err := dataVf.StoreVerifiedData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI); err != nil { - dataVf.RemoveTempData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI) - common.ObjectLocks.Unlock(lockIndex) - return err + if err := Store.UpdateObjectDataVerifiedStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, true); err != nil { + return true, err } - } else if found, err := Store.StoreObjectData(orgID, objectType, objectID, request.Body); err != nil { // No data verification applied, then store data directly - common.ObjectLocks.Unlock(lockIndex) - return err + } else if found, err := Store.StoreObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, request.Body); err != nil { // No data verification applied, then store data directly + return true, err } else if !found { - common.ObjectLocks.Unlock(lockIndex) - return &common.InvalidRequest{Message: "Failed to find object to set data"} + return true, &common.InvalidRequest{Message: "Failed to find object to set data"} } - if err := Store.UpdateObjectStatus(orgID, objectType, objectID, common.CompletelyReceived); err != nil { - common.ObjectLocks.Unlock(lockIndex) - return err + if err := Store.UpdateObjectStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, common.CompletelyReceived); err != nil { + return true, err } - if metaData, err := Store.RetrieveObject(orgID, objectType, objectID); err == nil && metaData != nil { - handleDataReceived(*metaData) - common.ObjectLocks.Unlock(lockIndex) - notificationsInfo, err := PrepareObjectStatusNotification(*metaData, common.Received) + return true, nil - if err != nil { - return err +} + +func (communication *HTTP) handlePutChunkedData(metaData common.MetaData, request *http.Request, startOffset int64, endOffset int64, totalSize int64) (bool, common.SyncServiceError) { + + isFirstChunk := startOffset == 0 + isLastChunk := false + dataSize := endOffset - startOffset + 1 + + // append Data to temp file/data + isTempData := false + if common.NeedDataVerification(metaData) { + isTempData = true + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Inside handlePutChunkedData for %s %s %s, isTempData: %t, isFirstChunk: %t \n", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, isTempData, isFirstChunk) + } + + var err common.SyncServiceError + if metaData.DestinationDataURI != "" { + if isLastChunk, err = dataURI.AppendData(metaData.DestinationDataURI, request.Body, uint32(dataSize), startOffset, metaData.ObjectSize, + isFirstChunk, isLastChunk, isTempData); err != nil { + return isLastChunk, err } - if err := SendNotifications(notificationsInfo); err != nil { - return err + } else { + if isLastChunk, err = Store.AppendObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, request.Body, uint32(dataSize), startOffset, metaData.ObjectSize, + isFirstChunk, isLastChunk, isTempData); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Failed to apend data for %s %s %s, Error: %s\n", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, err.Error()) + } + return isLastChunk, err } + } - callWebhooks(metaData) - } else { - common.ObjectLocks.Unlock(lockIndex) - return &common.InvalidRequest{Message: "Failed to find object to set data"} + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Inside putChunkedData, isLastChunk is: %t\n", isLastChunk) } - return nil + + if isLastChunk && isTempData { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Start data verification for %s %s %s\n", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + } + + // verify + dataVf := dataVerifier.NewDataVerifier(metaData.HashAlgorithm, metaData.PublicKey, metaData.Signature) + if dr, err := dataVf.GetTempData(metaData); err != nil { + return isLastChunk, err + } else if success, err := dataVf.VerifyDataSignature(dr, metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.DestinationDataURI); !success || err != nil { + // remove temp data + dataVf.RemoveUnverifiedData(metaData) + return isLastChunk, err + } + + // set metadata.DataVerified = true + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Updated object DataVerified to true for %s %s %s", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + } + + if err := Store.UpdateObjectDataVerifiedStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, true); err != nil { + return isLastChunk, err + } + } + + if isLastChunk { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Updated object status to completelyReceived for %s %s %s", metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + } + if err := Store.UpdateObjectStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, common.CompletelyReceived); err != nil { + return isLastChunk, err + } + } + return isLastChunk, nil } func (communication *HTTP) handleGetData(orgID string, objectType string, objectID string, destType string, destID string, instanceID int64, dataID int64, writer http.ResponseWriter, request *http.Request) { - updateNotificationRecord := false if trace.IsLogging(logger.TRACE) { trace.Trace("Handling object get data of %s %s %s %s \n", objectType, objectID, destType, destID) } + + if common.ObjectDownloadSemaphore.TryAcquire(1) == false { + // If too many downloads are in flight, agent will get error and retry. Originally, there was a lock around the download that + // caused the downloads to be serial. It was changed to use a semaphore to allow limited concurrency. + if trace.IsLogging(logger.TRACE) { + trace.Trace("Failed to acquire semaphore for handleGetData of %s %s %s %s \n", objectType, objectID, destType, destID) + } + err := &Error{"Error in handleGetData: Unable to acquire object semaphore."} + SendErrorResponse(writer, err, "", http.StatusTooManyRequests) + return + } + + defer common.ObjectDownloadSemaphore.Release(1) + lockIndex := common.HashStrings(orgID, objectType, objectID) common.ObjectLocks.Lock(lockIndex) - defer common.ObjectLocks.Unlock(lockIndex) if trace.IsLogging(logger.DEBUG) { - trace.Trace("Handling object get data, retrieve notification record for %s %s %s %s %s\n", orgID, objectType, objectID, destType, destID) + trace.Debug("Handling object get data, retrieve notification record for %s %s %s %s %s\n", orgID, objectType, objectID, destType, destID) } notification, err := Store.RetrieveNotificationRecord(orgID, objectType, objectID, destType, destID) + common.ObjectLocks.Unlock(lockIndex) + if err != nil { SendErrorResponse(writer, err, "", 0) + return } else if notification == nil { err = &Error{"Error in handleGetData: no notification to update."} SendErrorResponse(writer, err, "", 0) + return } else if notification.InstanceID != instanceID { if log.IsLogging(logger.ERROR) { log.Error("Handling object get data, notification.InstanceID(%d) != metaData,InstanceID(%d), notification status(%s) for %s %s %s %s %s\n", notification.InstanceID, instanceID, notification.Status, orgID, objectType, objectID, destType, destID) @@ -1133,31 +1623,50 @@ func (communication *HTTP) handleGetData(orgID string, objectType string, object err = &ignoredByHandler{"Error in handleGetData: notification.InstanceID != instanceID or notification status is not updated."} SendErrorResponse(writer, err, "", 0) - } else if notification.Status == common.Updated || notification.Status == common.Update || notification.Status == common.UpdatePending { - // notification.InstanceID == instanceID - updateNotificationRecord = true - if trace.IsLogging(logger.DEBUG) { - trace.Debug("In handleGetData: notification (status: %s) is updated status, for %s %s %s %s %s, set updateNotificationRecord to %t \n", notification.Status, orgID, objectType, objectID, destType, destID, updateNotificationRecord) - } + return } else { - // notification status "error" cannot update notification status to "data" if trace.IsLogging(logger.DEBUG) { - trace.Debug("In handleGetData: notification (status: %s) is not in updated status, for %s %s %s %s %s, set updateNotificationRecord to %t \n", notification.Status, orgID, objectType, objectID, destType, destID, updateNotificationRecord) + trace.Debug("In handleGetData: notification status is: %s, for %s %s %s %s %s", notification.Status, orgID, objectType, objectID, destType, destID) } } - if trace.IsLogging(logger.DEBUG) { - trace.Trace("Handling object get data, retrieve object data for %s %s\n", objectType, objectID) + objectMeta, _, err := Store.RetrieveObjectAndStatus(orgID, objectType, objectID) + if err != nil { + SendErrorResponse(writer, err, "", 0) } - if dataReader, err := Store.RetrieveObjectData(orgID, objectType, objectID); err != nil { + hasRangeHeader := true + startOffset, endOffset, err := common.GetStartAndEndRangeFromRangeHeader(request) + if err != nil { SendErrorResponse(writer, err, "", 0) - } else { + } + + if startOffset == -1 && endOffset == -1 { + // Range header not specified, will get all data + startOffset = 0 + endOffset = objectMeta.ObjectSize - 1 + hasRangeHeader = false + } + + dataLength := int(endOffset - startOffset + 1) + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Handling object get data, retrieve object data for %s %s with range %d-%d\n", objectType, objectID, startOffset, endOffset) + } + + if dataLength == int(objectMeta.ObjectSize) || !hasRangeHeader { + dataReader, err := Store.RetrieveObjectData(orgID, objectType, objectID, false) + if err != nil { + SendErrorResponse(writer, err, "", 0) + } + if dataReader == nil { writer.WriteHeader(http.StatusNotFound) } else { writer.Header().Add("Content-Type", "application/octet-stream") writer.WriteHeader(http.StatusOK) + + // Start the download if _, err := io.Copy(writer, dataReader); err != nil { SendErrorResponse(writer, err, "", 0) } @@ -1167,32 +1676,77 @@ func (communication *HTTP) handleGetData(orgID string, objectType string, object if trace.IsLogging(logger.DEBUG) { trace.Debug("Handling object get data, update notification for %s %s %s %s, status: %s\n", objectType, objectID, destType, destID, common.Data) } - // update notification only if current notification.InstanceID == metadata.InstanceID && current notification.status == "updated" - if updateNotificationRecord { + } + } else { + // dataLength is partial && no range header + if objectData, eof, length, err := Store.ReadObjectData(orgID, objectType, objectID, dataLength, startOffset); err != nil { + SendErrorResponse(writer, err, "", 0) + } else { + if len(objectData) == 0 { if trace.IsLogging(logger.DEBUG) { - trace.Debug("Handling object get data, update notification status to data for %s %s %s %s %s\n", orgID, objectType, objectID, destType, destID) - } - notification := common.Notification{ObjectID: objectID, ObjectType: objectType, - DestOrgID: orgID, DestID: destID, DestType: destType, Status: common.Data, InstanceID: instanceID, DataID: dataID} - if err = Store.UpdateNotificationRecord(notification); err != nil { - if log.IsLogging(logger.ERROR) { - log.Error("Handling object get data, failed to update notification for %s %s %s %s with status: %s\n", objectType, objectID, destType, destID, common.Data) - } - } else { - if trace.IsLogging(logger.DEBUG) { - log.Debug("Handling object get data, update notification for %s %s %s %s with status %s is done\n", objectType, objectID, destType, destID, common.Data) - } + trace.Debug("Object data length is 0 for %s %s, return 404", objectType, objectID) } + writer.WriteHeader(http.StatusNotFound) } else { - if trace.IsLogging(logger.DEBUG) { - trace.Debug("Handling object get data, return without update notification status to data for %s %s %s %s %s, set updateNotificationRecord to %t \n", orgID, objectType, objectID, destType, destID, updateNotificationRecord) + dataReader := bytes.NewReader(objectData) + writer.Header().Add("Content-Type", "application/octet-stream") + writer.Header().Add("Content-Length", strconv.Itoa(length)) + if eof { + endOffset = objectMeta.ObjectSize - 1 } + writer.Header().Add("Content-Range", fmt.Sprintf("bytes %d-%d/%d", startOffset, endOffset, objectMeta.ObjectSize)) + writer.WriteHeader(http.StatusPartialContent) + + if _, err := io.Copy(writer, dataReader); err != nil { + SendErrorResponse(writer, err, "", 0) + } + if err := Store.CloseDataReader(dataReader); err != nil { + SendErrorResponse(writer, err, "", 0) + } + } + } + } + + /** + Removed the code with the CSS setting notificationRecord to status: common.Data since with the introduction of the semaphore and a client timeout + there are more possibilities of the agent receiving an error due to timeout but the CSS thinks everything completed. If the agent set the status to + an error but then the CSS set the status to common.Data, the agent would not receive the model update. The only way to guarantee that the CSS would + not overwrite the status from the agent was to eliminate the CSS setting the status at all. + **/ + +} + +func (communication *HTTP) PushData(metaData *common.MetaData, offset int64) common.SyncServiceError { + if common.Configuration.NodeType != common.ESS { + return nil + } + + if trace.IsLogging(logger.TRACE) { + trace.Trace("In http.pushData %s %s", metaData.ObjectType, metaData.ObjectID) + } + + if !common.Configuration.EnableDataChunk || metaData.ChunkSize == 0 || metaData.ObjectSize == 0 || int64(metaData.ChunkSize) >= metaData.ObjectSize { + if err := communication.pushAllData(metaData); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Failed to send all data at once. Error: %s.", err.Error()) + } + return err + } + } else { + if err := communication.pushDataByChunk(metaData, offset); err != nil { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Receive error from pushDataByChunk, offset %d, Error: %s\n, is data transport timeout error: %t", offset, err.Error(), isDataTransportTimeoutError(err)) + } + if !isDataTransportTimeoutError(err) { + return err } } } + + return nil } -func (communication *HTTP) pushData(metaData *common.MetaData) common.SyncServiceError { +func (communication *HTTP) pushAllData(metaData *common.MetaData) common.SyncServiceError { lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) common.ObjectLocks.RLock(lockIndex) defer common.ObjectLocks.RUnlock(lockIndex) @@ -1202,9 +1756,9 @@ func (communication *HTTP) pushData(metaData *common.MetaData) common.SyncServic var dataReader io.Reader var err error if metaData.SourceDataURI != "" { - dataReader, err = dataURI.GetData(metaData.SourceDataURI) + dataReader, err = dataURI.GetData(metaData.SourceDataURI, false) } else { - dataReader, err = Store.RetrieveObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + dataReader, err = Store.RetrieveObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, false) } if err != nil { return err @@ -1213,7 +1767,7 @@ func (communication *HTTP) pushData(metaData *common.MetaData) common.SyncServic request, err := http.NewRequest("PUT", url, dataReader) if err != nil { - return &Error{"Failed to read data. Error: " + err.Error()} + return &Error{"Failed to create HTTP request to upload data. Error: " + err.Error()} } security.AddIdentityToSPIRequest(request, url) request.Close = true @@ -1222,7 +1776,16 @@ func (communication *HTTP) pushData(metaData *common.MetaData) common.SyncServic if response != nil && response.Body != nil { defer response.Body.Close() } - if err != nil { + if IsTransportError(response, err) { + if log.IsLogging(logger.ERROR) { + log.Error("In interrupted network, will try to upload data by chunk for %s %s\n", metaData.ObjectType, metaData.ObjectID) + } + msg := "Timeout in PushAllData: failed to receive data from the other side." + if response != nil { + msg = fmt.Sprintf("%s, response code for pushAllData is: %d\n", msg, response.StatusCode) + } + return &dataTransportTimeOutError{msg} + } else if err != nil { return &Error{"Failed to send HTTP request. Error: " + err.Error()} } if response.StatusCode != http.StatusNoContent { @@ -1234,6 +1797,161 @@ func (communication *HTTP) pushData(metaData *common.MetaData) common.SyncServic return nil } +func (communication *HTTP) pushDataByChunk(metaData *common.MetaData, offset int64) common.SyncServiceError { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In pushDataByChunk, after updatePushDataNotification with offset %d\n", offset) + } + lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + common.ObjectLocks.RLock(lockIndex) + defer common.ObjectLocks.RUnlock(lockIndex) + + if n, err := Store.RetrieveNotificationRecord(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID); err != nil { + if log.IsLogging(logger.ERROR) { + log.Error("Error when retrieve notification record, %s", err.Error()) + } + return err + } else if n != nil && metaData.InstanceID < n.InstanceID { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In pushDataByChunk: metaData instance ID (%d) < notification instance ID (%d), ignore...", metaData.InstanceID, n.InstanceID) + } + return nil + } else if n != nil && n.Status == common.ReceiverError && metaData.InstanceID <= n.InstanceID { + if trace.IsLogging(logger.DEBUG) { + // If object notification is already "receiverError", only the new metaData can overrite the notification status to data + trace.Debug("In pushDataByChunk: notification status is %s, and metaData instance ID (%d) <= notification instance ID (%d), ignore...", n.Status, metaData.InstanceID, n.InstanceID) + } + return nil + } else if n != nil { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In pushDataByChunk: notification status %s", n.Status) + } + + } + + if err := updatePushDataNotification(*metaData, metaData.OriginType, metaData.OriginID, offset); err != nil { + return err + } + + // now the ESS notification status is "data" + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Checking notifications after updating notification status") + if n, err := Store.RetrieveNotificationRecord(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID); err != nil { + trace.Debug("Error when retrieve notification record, %s\n", err.Error()) + } else if n == nil { + trace.Debug("Nil notifications") + } else { + trace.Debug("Notification status is %s after updating", n.Status) + } + } + + // check if this is the last chunk to send out + total, chunkAlreadySend, err := checkNotificationRecord(*metaData, metaData.OriginType, metaData.OriginID, metaData.InstanceID, + common.Data, offset) + if err != nil { + // This notification doesn't match the existing notification record, ignore + if trace.IsLogging(logger.INFO) { + trace.Info("Ignoring data of %s %s (%s)\n", metaData.ObjectType, metaData.ObjectID, err.Error()) + } + return ¬ificationHandlerError{fmt.Sprintf("Error in handleData: checkNotificationRecord failed. Error: %s\n", err.Error())} + } + + isLastChunk := false + if !chunkAlreadySend && total+int64(metaData.ChunkSize) >= metaData.ObjectSize { + isLastChunk = true + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In pushDataByChunk, isLastChunk: %t for %s %s %s, will close request\n", isLastChunk, metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + } + } + + url := buildObjectURL(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.InstanceID, metaData.DataID, common.Data) + + startOffset := offset + endOffset := offset + int64(metaData.ChunkSize) - 1 + if endOffset >= metaData.ObjectSize { + endOffset = metaData.ObjectSize - 1 + } + + var objectData []byte + var length int + if metaData.SourceDataURI != "" { + objectData, _, length, err = dataURI.GetDataChunk(metaData.SourceDataURI, common.Configuration.MaxDataChunkSize, + offset) + } else { + objectData, _, length, err = Store.ReadObjectData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, + common.Configuration.MaxDataChunkSize, offset) + } + + if err != nil { + return err + } + + dataReader := bytes.NewReader(objectData) + request, err := http.NewRequest("PUT", url, dataReader) + if err != nil { + return &Error{"Failed to create HTTP request to upload data. Error: " + err.Error()} + } + security.AddIdentityToSPIRequest(request, url) + + // add offset to header + request.Header.Add("Content-Range", fmt.Sprintf("bytes %d-%d/%d", startOffset, endOffset, metaData.ObjectSize)) + request.Header.Add("Content-Length", strconv.Itoa(length)) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In pushDataByChunk, Add headers: Content-Range header is: %s, Content-Length header is: %s\n", request.Header.Get("Content-Range"), request.Header.Get("Content-Length")) + } + if isLastChunk { + request.Close = true + } + + response, err := communication.requestWrapper.do(request) + if response != nil && response.Body != nil { + defer response.Body.Close() + } + + if IsTransportError(response, err) { + if log.IsLogging(logger.ERROR) { + log.Error("In interrupted network, will try again to upload data for this chunk for %s %s\n", metaData.ObjectType, metaData.ObjectID) + } + msg := "Timeout in pushDataByChunk: failed to receive data from the other side." + return &dataTransportTimeOutError{msg} + } else if err != nil { + return &Error{"Failed to send data over HTTP request. Error: " + err.Error()} + } + + if response.StatusCode != http.StatusNoContent { + if log.IsLogging(logger.ERROR) { + log.Error("Failed to send chunked data. Received code: %d %s", response.StatusCode, response.Status) + } + return &Error{"Failed to push chunked data."} + } + + // marks that the chunk is received by the other side + if _, err := handleChunkReceived(*metaData, offset, int64(length), true); err != nil { + return ¬ificationHandlerError{"Error in pushDataByChunk: handleChunkReceived failed. Error: " + err.Error()} + } + + total, _, err = checkNotificationRecord(*metaData, metaData.OriginType, metaData.OriginID, metaData.InstanceID, + common.Data, offset) + if err != nil { + // This notification doesn't match the existing notification record, ignore + if trace.IsLogging(logger.INFO) { + trace.Info("Ignoring data of %s %s (%s)\n", metaData.ObjectType, metaData.ObjectID, err.Error()) + } + return ¬ificationHandlerError{fmt.Sprintf("Error in handleData: checkNotificationRecord failed. Error: %s\n", err.Error())} + } + + isLastChunk = total == metaData.ObjectSize + + if isLastChunk { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Removing notification chunks info for of %s %s\n", metaData.ObjectType, metaData.ObjectID) + } + removeNotificationChunksInfo(*metaData, metaData.OriginType, metaData.OriginID) + } + + return nil + +} + // ResendObjects requests to resend all the relevant objects func (communication *HTTP) ResendObjects() common.SyncServiceError { if common.Configuration.NodeType != common.ESS { @@ -1296,11 +2014,13 @@ func (communication *HTTP) DeleteOrganization(orgID string) common.SyncServiceEr // LockDataChunks locks one of the data chunks locks func (communication *HTTP) LockDataChunks(index uint32, metadata *common.MetaData) { // Noop on HTTP + dataChunksLocks.Lock(index) } // UnlockDataChunks unlocks one of the data chunks locks func (communication *HTTP) UnlockDataChunks(index uint32, metadata *common.MetaData) { // Noop on HTTP + dataChunksLocks.Unlock(index) } // SendFeedbackMessage sends a feedback message from the ESS to the CSS or from the CSS to the ESS diff --git a/core/communications/httpCommunications_test.go b/core/communications/httpCommunications_test.go index 8d0d20e..1ae309f 100644 --- a/core/communications/httpCommunications_test.go +++ b/core/communications/httpCommunications_test.go @@ -40,7 +40,7 @@ func TestHTTPCommUpdatedObjects(t *testing.T) { } testObjects := []httpTestObjectInfo{ - {common.MetaData{ObjectID: "1", ObjectType: "type1", DestOrgID: "myorg000", DestID: "dev1", DestType: "httpDevice"}, + {common.MetaData{ObjectID: "1", ObjectType: "type1", DestOrgID: "myorg000", DestID: "dev1", DestType: "httpDevice", ObjectSize: int64(len([]byte("plokmijnuhbygv")))}, common.ReadyToSend, []byte("plokmijnuhbygv")}, {common.MetaData{ObjectID: "2", ObjectType: "type1", DestOrgID: "myorg000", DestID: "dev1", DestType: "httpDevice", Deleted: true}, @@ -217,6 +217,7 @@ func TestHTTPCommEssSendObjects(t *testing.T) { t.Errorf("Failed to store destination. Error: %s", err.Error()) } + common.Configuration.CommunicationProtocol = common.HTTPProtocol for _, testObject := range testObjects { writer := newHTTPCommTestResponseWriter() theURL := testObject.metaData.DestOrgID + "/" + testObject.metaData.ObjectType + "/" + @@ -305,9 +306,9 @@ func TestEssHTTPComm(t *testing.T) { } ctx.pollPayload = []updateMessage{ - {common.Update, common.MetaData{ObjectID: "1", ObjectType: "type2", DestOrgID: "myorg000", NoData: true}}, + {common.Update, common.MetaData{ObjectID: "1", ObjectType: "type2", DestOrgID: "myorg000", NoData: true, ChunkSize: 2, ObjectSize: int64(len([]byte("wsxrfvyhnplijnygv")))}}, {common.Delete, common.MetaData{ObjectID: "2", ObjectType: "type2", DestOrgID: "myorg000", NoData: true}}, - {common.Consumed, common.MetaData{ObjectID: "3", ObjectType: "type2", DestOrgID: "myorg000", NoData: true, InstanceID: 1}}, + {common.Consumed, common.MetaData{ObjectID: "3", ObjectType: "type2", DestOrgID: "myorg000", NoData: false, InstanceID: 1, ChunkSize: 2, ObjectSize: int64(len([]byte("1234567890abcdefghijkl")))}}, {common.Deleted, common.MetaData{ObjectID: "4", ObjectType: "type2", DestOrgID: "myorg000", NoData: true, InstanceID: 1, Deleted: true}}, } statusAfterPoll := []string{common.CompletelyReceived, common.ObjDeleted, common.ConsumedByDest, common.ObjDeleted} @@ -320,11 +321,24 @@ func TestEssHTTPComm(t *testing.T) { Store.UpdateNotificationRecord(notification) ctx.subTest = "pushData" - err = httpComm.pushData(&ctx.pollPayload[2].MetaData) + common.Configuration.EnableDataChunk = false + err = httpComm.PushData(&ctx.pollPayload[2].MetaData, 0) if err != nil { t.Error(err) } + ctx.subTest = "pushDataByChunk" + common.Configuration.EnableDataChunk = true + common.Configuration.MaxDataChunkSize = 2 + offset := int64(0) + for offset < ctx.pollPayload[2].MetaData.ObjectSize { + err = httpComm.PushData(&ctx.pollPayload[2].MetaData, offset) + if err != nil { + t.Error(err) + } + offset += int64(metaData.ChunkSize) + } + metaData = ctx.pollPayload[3].MetaData Store.StoreObject(metaData, nil, common.ObjDeleted) notification = common.Notification{ObjectID: metaData.ObjectID, ObjectType: metaData.ObjectType, @@ -346,11 +360,24 @@ func TestEssHTTPComm(t *testing.T) { } ctx.subTest = "getData" + common.Configuration.EnableDataChunk = false err = httpComm.GetData(ctx.pollPayload[0].MetaData, 0) if err != nil { t.Error(err) } + ctx.subTest = "getDataByChunk" + common.Configuration.EnableDataChunk = true + common.Configuration.MaxDataChunkSize = 2 + offset = int64(0) + for offset < ctx.pollPayload[0].MetaData.ObjectSize { + err = httpComm.GetData(ctx.pollPayload[0].MetaData, offset) + if err != nil { + t.Error(err) + } + offset += int64(ctx.pollPayload[0].MetaData.ChunkSize) + } + ctx.subTest = "notification" notification = common.Notification{ObjectID: "xyzzy", ObjectType: "plover", DestOrgID: "myorg000", DestID: "dev2", DestType: "httpDevice", @@ -539,6 +566,22 @@ func (ctx *testEssCommContext) testHandleObjects(writer http.ResponseWriter, req // This is "received" notification sent from httpComm.GetData writer.WriteHeader(http.StatusNoContent) } + case "getDataByChunk": + if request.Method == http.MethodGet { + startOffset, endOffset, _ := common.GetStartAndEndRangeFromRangeHeader(request) + if startOffset == -1 && endOffset == -1 { + writer.WriteHeader(http.StatusGatewayTimeout) + } else { + slice := []byte("wsxrfvyhnplijnygv") + returnContent := slice[startOffset:(endOffset + 1)] + writer.WriteHeader(http.StatusPartialContent) + writer.Header().Add("Content-Length", strconv.Itoa(len(returnContent))) + writer.Write(returnContent) + } + } else { + // This is "received" notification sent from httpComm.GetData + writer.WriteHeader(http.StatusNoContent) + } case "notification": writer.WriteHeader(http.StatusNoContent) @@ -546,6 +589,14 @@ func (ctx *testEssCommContext) testHandleObjects(writer http.ResponseWriter, req case "pushData": writer.WriteHeader(http.StatusNoContent) + case "pushDataByChunk": + _, startOffset, endOffset, _ := common.GetStartAndEndRangeFromContentRangeHeader(request) + if startOffset == -1 && endOffset == -1 { + writer.WriteHeader(http.StatusGatewayTimeout) + } else { + writer.WriteHeader(http.StatusNoContent) + } + case "resend": writer.WriteHeader(http.StatusNoContent) @@ -561,6 +612,7 @@ func testHTTPCommSetup(nodeType string) string { common.Running = true time.Sleep(100 * time.Millisecond) // Wait a bit common.InitObjectLocks() + common.InitObjectDownloadSemaphore() security.SetAuthentication(&security.TestAuthenticate{}) security.Start() diff --git a/core/communications/mqttCommunication.go b/core/communications/mqttCommunication.go index fcfc908..351cda7 100644 --- a/core/communications/mqttCommunication.go +++ b/core/communications/mqttCommunication.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/eclipse/paho.mqtt.golang" + mqtt "github.com/eclipse/paho.mqtt.golang" "github.com/eclipse/paho.mqtt.golang/packets" "github.com/open-horizon/edge-sync-service/common" "github.com/open-horizon/edge-sync-service/core/leader" @@ -1134,10 +1134,18 @@ func (communication *MQTT) GetData(metaData common.MetaData, offset int64) commo messageJSON, false); err != nil { return err } + lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + common.ObjectLocks.Lock(lockIndex) err = updateGetDataNotification(metaData, metaData.OriginType, metaData.OriginID, offset) + common.ObjectLocks.Unlock(lockIndex) return err } +// PushData uploade data to from ESS to CSS +func (communication *MQTT) PushData(metaData *common.MetaData, offset int64) common.SyncServiceError { + return nil +} + // SendData sends data from the CSS to the ESS or from the ESS to the CSS func (communication *MQTT) SendData(orgID string, destType string, destID string, message []byte, chunked bool) common.SyncServiceError { if log.IsLogging(logger.TRACE) { diff --git a/core/communications/notification.go b/core/communications/notification.go index 968f0ab..7100faf 100644 --- a/core/communications/notification.go +++ b/core/communications/notification.go @@ -237,30 +237,69 @@ func resendNotificationsForDestination(dest common.Destination, resendReceivedOb continue } common.ObjectLocks.Unlock(lockIndex) - Comm.LockDataChunks(lockIndex, metaData) - offsets := getOffsetsToResend(*n, *metaData) - for _, offset := range offsets { - if trace.IsLogging(logger.TRACE) { - trace.Trace("Resending GetData request for offset %d of %s:%s:%s\n", offset, n.DestOrgID, n.ObjectType, n.ObjectID) + + nc, err := Store.RetrieveNotificationRecord(notification.DestOrgID, notification.ObjectType, notification.ObjectID, + notification.DestType, notification.DestID) + if err == nil && nc != nil && nc.Status == notification.Status && nc.InstanceID == notification.InstanceID { + Comm.LockDataChunks(lockIndex, metaData) + offsets := getOffsetsToResend(*n, *metaData) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("len(offsets) to resend %d for %s:%s:%s\n", len(offsets), n.DestOrgID, n.ObjectType, n.ObjectID) } - if err = Comm.GetData(*metaData, offset); err != nil { - if common.IsNotFound(err) { - if log.IsLogging(logger.ERROR) { - log.Error("Resending GetData, get notFound error for offset %d of %s:%s:%s, deleting object Info...", offset, n.DestOrgID, n.ObjectType, n.ObjectID) + for _, offset := range offsets { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Resending GetData request for offset %d of %s:%s:%s\n", offset, n.DestOrgID, n.ObjectType, n.ObjectID) + } + if err = Comm.GetData(*metaData, offset); err != nil { + if common.IsNotFound(err) { + if log.IsLogging(logger.ERROR) { + log.Error("Resending GetData, get notFound error for offset %d of %s:%s:%s, deleting object Info...", offset, n.DestOrgID, n.ObjectType, n.ObjectID) + } + deleteObjectInfo("", "", "", n.DestType, n.DestID, metaData, true) } - deleteObjectInfo("", "", "", n.DestType, n.DestID, metaData, true) + break } - break + } + Comm.UnlockDataChunks(lockIndex, metaData) + } else { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Retrieved notification is nil or with different instanceID or status") } } - Comm.UnlockDataChunks(lockIndex, metaData) + if trace.IsLogging(logger.DEBUG) { trace.Debug("In notification.go, notification getdata status for destination, resend object %s %s to destination %s %s done", n.ObjectType, n.ObjectID, n.DestType, n.DestID) } case common.ReceivedByDestination: fallthrough - case common.CompletelyReceived: - fallthrough + case common.Updated: + if common.Configuration.NodeType == common.CSS { + if dest.DestType == "" { + common.ObjectLocks.Unlock(lockIndex) + continue + } + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In notification.go, notification %s status for destination with no persistent storage, need to resend object %s %s to destination %s %s\n", n.Status, n.ObjectType, n.ObjectID, n.DestType, n.DestID) + } + + // We get here only when an ESS without persistent storage reconnects, + // and the CSS has a notification with "updated" or "received by destination" status. + // Send update notification for this object (then notification status will be changed to: updatePending) + n.Status = common.Update + n.ResendTime = 0 + if err := Store.UpdateNotificationRecord(*n); err != nil && log.IsLogging(logger.ERROR) { + log.Error("Failed to update notification record. Error: " + err.Error()) + } + common.ObjectLocks.Unlock(lockIndex) + metaData.DestType = n.DestType + metaData.DestID = n.DestID + err = Comm.SendNotificationMessage(common.Update, dest.DestType, dest.DestID, metaData.InstanceID, metaData.DataID, metaData) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In notification.go, done with resend objects for notification with data status, metaData.DestType: %s, metaData.DestID: %s\n", metaData.DestType, metaData.DestID) + } + } else { + common.ObjectLocks.Unlock(lockIndex) + } case common.Data: if dest.DestType == "" { common.ObjectLocks.Unlock(lockIndex) @@ -269,21 +308,29 @@ func resendNotificationsForDestination(dest common.Destination, resendReceivedOb if trace.IsLogging(logger.DEBUG) { trace.Debug("In notification.go, notification data status for destination, need to resend object %s %s to destination %s %s\n", n.ObjectType, n.ObjectID, n.DestType, n.DestID) } - // We get here only when an ESS without persistent storage reconnects, - // and the CSS has a notification with "data" or "received by destination" status. - // Send update notification for this object. - n.Status = common.Update - n.ResendTime = 0 - if err := Store.UpdateNotificationRecord(*n); err != nil && log.IsLogging(logger.ERROR) { - log.Error("Failed to update notification record. Error: " + err.Error()) - } - common.ObjectLocks.Unlock(lockIndex) - metaData.DestType = n.DestType - metaData.DestID = n.DestID - err = Comm.SendNotificationMessage(common.Update, dest.DestType, dest.DestID, metaData.InstanceID, metaData.DataID, metaData) - if trace.IsLogging(logger.DEBUG) { - trace.Debug("In notification.go, done with resend objects for notification with data status, metaData.DestType: %s, metaData.DestID: %s\n", metaData.DestType, metaData.DestID) + + if common.Configuration.NodeType == common.ESS { + // ESS with a data status, is in progress of sending data to CSS + common.ObjectLocks.Unlock(lockIndex) + Comm.LockDataChunks(lockIndex, metaData) + offsets := getOffsetsToResend(*n, *metaData) + for _, offset := range offsets { + if trace.IsLogging(logger.TRACE) { + trace.Trace("Resending pushData request for offset %d of %s:%s:%s\n", offset, n.DestOrgID, n.ObjectType, n.ObjectID) + } + if err = Comm.PushData(metaData, offset); err != nil { + if common.IsNotFound(err) { + if log.IsLogging(logger.ERROR) { + log.Error("Resending Data, get notFound error for offset %d of %s:%s:%s, deleting object Info...", offset, n.DestOrgID, n.ObjectType, n.ObjectID) + } + deleteObjectInfo("", "", "", n.DestType, n.DestID, metaData, true) + } + break + } + } + Comm.UnlockDataChunks(lockIndex, metaData) } + case common.Error: // resend only when error notification instance ID == metadata instanceID if metaData.InstanceID == n.InstanceID { diff --git a/core/communications/notificationHandler.go b/core/communications/notificationHandler.go index 2b29005..77dc727 100644 --- a/core/communications/notificationHandler.go +++ b/core/communications/notificationHandler.go @@ -309,8 +309,11 @@ func handleUpdate(metaData common.MetaData, maxInflightChunks int) common.SyncSe notificationDataID := int64(-1) if notification, err := Store.RetrieveNotificationRecord(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, - metaData.OriginType, metaData.OriginID); err == nil && notification != nil && notification.Status != common.ReceiverError { - if notification.InstanceID >= metaData.InstanceID { + metaData.OriginType, metaData.OriginID); err == nil && notification != nil { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Notification status %s, notificaiton.InstanceID: %d, metadata.InstanceID: %d\n", notification.Status, notification.InstanceID, metaData.InstanceID) + } + if notification.InstanceID > metaData.InstanceID || (notification.Status != common.ReceiverError && notification.InstanceID == metaData.InstanceID) { // This object has been sent already, ignore if trace.IsLogging(logger.TRACE) { trace.Trace("Ignoring object update of %s %s %s %s, notification status: %s, notification.InstanceID: %d, send notification to other side\n", metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID, notification.Status, notification.InstanceID) @@ -340,22 +343,51 @@ func handleUpdate(metaData common.MetaData, maxInflightChunks int) common.SyncSe &metaData) } return &ignoredByHandler{"Ignore object update"} + } else if notification.InstanceID < metaData.InstanceID { + // new object + Store.DeleteNotificationRecords(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, + metaData.OriginType, metaData.OriginID) + removeNotificationChunksInfo(metaData, metaData.OriginType, metaData.OriginID) + notificationDataID = notification.DataID + } else { + // notification.Status == common.ReceiverError && notification.InstanceID == metaData.InstanceID (get the same object that previously received in error) + // Remove data or partially received data and data chunks + if trace.IsLogging(logger.DEBUG) { + trace.Debug("remove object Data for object\n") + } + if err := storage.DeleteStoredData(Store, metaData); err != nil { + common.ObjectLocks.Unlock(lockIndex) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Failed to delete stored data and tmp data for object. Error: %s", err.Error()) + } + return ¬ificationHandlerError{fmt.Sprintf("Error in handleUpdate: failed to delete stored data for object. Error: %s\n", err)} + } + if trace.IsLogging(logger.DEBUG) { + trace.Debug("remove notificationChunksInfo for object\n") + } + removeNotificationChunksInfo(metaData, metaData.OriginType, metaData.OriginID) } - if trace.IsLogging(logger.DEBUG) { - trace.Debug("notification.InstanceID(%d) < metaData.InstanceID(%d), delete local notification record\n", notification.InstanceID, metaData.InstanceID) - } - Store.DeleteNotificationRecords(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, - metaData.OriginType, metaData.OriginID) - removeNotificationChunksInfo(metaData, metaData.OriginType, metaData.OriginID) - notificationDataID = notification.DataID - } else if notification != nil { - if trace.IsLogging(logger.DEBUG) { - trace.Debug("Notification status %s, notificaiton.InstanceID: %d, metadata.InstanceID: %d\n", notification.Status, notification.InstanceID, metaData.InstanceID) + + } + + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Create/Update notification status to %s for %s %s %s %s %s\n", common.HandleUpdate, metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID) + } + // Set notification status to "handleUpdate" + notification := common.Notification{ObjectID: metaData.ObjectID, ObjectType: metaData.ObjectType, + DestOrgID: metaData.DestOrgID, DestID: metaData.OriginID, DestType: metaData.OriginType, + Status: common.HandleUpdate, InstanceID: metaData.InstanceID, DataID: metaData.DataID} + + // Store the notification records in storage as part of the object + if err := Store.UpdateNotificationRecord(notification); err != nil { + common.ObjectLocks.Unlock(lockIndex) + if log.IsLogging(logger.ERROR) { + log.Error("In handleUpdate, failed to update notification record status to %s\n", common.HandleUpdate) } + return err } // for receive resend error notification from CSS, the ESS notification status is still "receiverError" - if trace.IsLogging(logger.TRACE) { trace.Trace("Finish process notification, then set status to partiallyReceived of %s %s\n", metaData.ObjectType, metaData.ObjectID) } @@ -388,7 +420,13 @@ func handleUpdate(metaData common.MetaData, maxInflightChunks int) common.SyncSe _, existingObjStatus, _ := Store.RetrieveObjectAndStatus(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) if existingObjStatus == common.ReadyToSend || existingObjStatus == common.NotReadyToSend { common.ObjectLocks.Unlock(lockIndex) - return ¬ificationHandlerError{fmt.Sprintf("Error in handleUpdate: cannot update object from the receiver side.\n")} + return ¬ificationHandlerError{"Error in handleUpdate: cannot update object from the receiver side."} + } + + // If has data, and need to verifiy data, set DataVerified to false + metaData.DataVerified = true + if status == common.PartiallyReceived && common.NeedDataVerification(metaData) { + metaData.DataVerified = false } // Store the object. Now change the receiver status to "PartiallyReceived" or "CompletelyReceived" @@ -457,13 +495,16 @@ func handleUpdate(metaData common.MetaData, maxInflightChunks int) common.SyncSe Comm.LockDataChunks(lockIndex, &metaData) defer Comm.UnlockDataChunks(lockIndex, &metaData) - if metaData.ChunkSize <= 0 || metaData.ObjectSize <= 0 { + if metaData.ChunkSize <= 0 || metaData.ObjectSize <= 0 || !common.Configuration.EnableDataChunk { if err := Comm.GetData(metaData, 0); err != nil { return err } } else { var offset int64 for i := 0; i < maxInflightChunks && offset < metaData.ObjectSize; i++ { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("(i=%d)GetData from offset: %d, for %s/%s/%s, object size: %d", i, offset, metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.ObjectSize) + } if err := Comm.GetData(metaData, offset); err != nil { return err } @@ -536,7 +577,20 @@ func handleObjectConsumed(orgID string, objectType string, objectID string, dest return ¬ificationHandlerError{fmt.Sprintf("Error in handleObjectConsumed: failed to retrieve object. Error: %s\n", err)} } if notification == nil || metaData == nil || notification.InstanceID != instanceID || - (notification.Status != common.Data && notification.Status != common.Updated && notification.Status != common.ReceivedByDestination) { + (notification.Status != common.Data && notification.Status != common.Updated && notification.Status != common.Update && + notification.Status != common.UpdatePending && notification.Status != common.ReceivedByDestination) { + if trace.IsLogging(logger.TRACE) { + if notification == nil { + trace.Debug("notification is nil") + } else if metaData == nil { + trace.Debug("metaData is nil") + } else if notification.InstanceID != instanceID { + trace.Debug("Notificaiton.InstanceID(%d) != instanceID(%d)\n", notification.InstanceID, instanceID) + } else { + trace.Debug("notification status (%s) is not data, update, updated, updatePending, receivedByDestinaion\n", notification.Status) + } + } + // Something went wrong: we can't retrieve the notification or the object, or the received notification doesn't // match the existing notification record if trace.IsLogging(logger.TRACE) { @@ -715,21 +769,23 @@ func handleObjectReceived(orgID string, objectType string, objectID string, dest return &ignoredByHandler{"Ignore object received"} } - // Mark that the object was delivered to this destination - if trace.IsLogging(logger.DEBUG) { - trace.Debug("Update object status to delivery for %s %s %s %s %s\n", orgID, objectType, objectID, destType, destID) - } - _, err = Store.UpdateObjectDeliveryStatus(common.Delivered, "", orgID, objectType, objectID, destType, destID) - if err != nil && log.IsLogging(logger.ERROR) { - log.Error("Error in handleObjectReceived: failed to mark object (%s %s %s) as delivered to the destination(%s %s). Error: %s. Sending destination update request to destRequestQueue", orgID, objectType, objectID, destType, destID, err) - // put this request in queue - destinationUpdateRequestInQueue := common.DestinationRequestInQueue{ - Action: common.Update, - Status: common.Delivered, - Object: *metaData, - Destination: common.Destination{DestType: destType, DestID: destID}, + if common.Configuration.NodeType == common.CSS { + // Mark that the object was delivered to this destination + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Update object status to delivery for %s %s %s %s %s\n", orgID, objectType, objectID, destType, destID) + } + _, err = Store.UpdateObjectDeliveryStatus(common.Delivered, "", orgID, objectType, objectID, destType, destID) + if err != nil && log.IsLogging(logger.ERROR) { + log.Error("Error in handleObjectReceived: failed to mark object (%s %s %s) as delivered to the destination(%s %s). Error: %s. Sending destination update request to destRequestQueue", orgID, objectType, objectID, destType, destID, err) + // put this request in queue + destinationUpdateRequestInQueue := common.DestinationRequestInQueue{ + Action: common.Update, + Status: common.Delivered, + Object: *metaData, + Destination: common.Destination{DestType: destType, DestID: destID}, + } + DestReqQueue.SendDestReqToQueue(destinationUpdateRequestInQueue) } - DestReqQueue.SendDestReqToQueue(destinationUpdateRequestInQueue) } // Mark the corresponding update notification as "received by destination" @@ -1114,7 +1170,7 @@ func handleData(dataMessage []byte) (*common.MetaData, common.SyncServiceError) return nil, ¬ificationHandlerError{"Error in handleData: failed to find meta data.\n"} } - total, err := checkNotificationRecord(*metaData, metaData.OriginType, metaData.OriginID, instanceID, + total, _, err := checkNotificationRecord(*metaData, metaData.OriginType, metaData.OriginID, instanceID, common.Getdata, offset) if err != nil { // This notification doesn't match the existing notification record, ignore @@ -1135,14 +1191,14 @@ func handleData(dataMessage []byte) (*common.MetaData, common.SyncServiceError) if dataLength != 0 { if metaData.DestinationDataURI != "" { - if err := dataURI.AppendData(metaData.DestinationDataURI, dataReader, dataLength, offset, metaData.ObjectSize, - isFirstChunk, isLastChunk); err != nil { + if _, err := dataURI.AppendData(metaData.DestinationDataURI, dataReader, dataLength, offset, metaData.ObjectSize, + isFirstChunk, isLastChunk, false); err != nil { common.ObjectLocks.Unlock(lockIndex) return metaData, err } } else { - if err := Store.AppendObjectData(orgID, objectType, objectID, dataReader, dataLength, offset, metaData.ObjectSize, - isFirstChunk, isLastChunk); err != nil { + if _, err := Store.AppendObjectData(orgID, objectType, objectID, dataReader, dataLength, offset, metaData.ObjectSize, + isFirstChunk, isLastChunk, false); err != nil { if storage.IsDiscarded(err) { common.ObjectLocks.Unlock(lockIndex) return metaData, nil @@ -1153,7 +1209,7 @@ func handleData(dataMessage []byte) (*common.MetaData, common.SyncServiceError) } } - maxRequestedOffset, err := handleChunkReceived(*metaData, offset, int64(dataLength)) + maxRequestedOffset, err := handleChunkReceived(*metaData, offset, int64(dataLength), false) if err != nil { common.ObjectLocks.Unlock(lockIndex) return metaData, ¬ificationHandlerError{"Error in handleData: handleChunkReceived failed. Error: " + err.Error()} @@ -1571,64 +1627,71 @@ func parseDataMessage(message []byte) (orgID string, objectType string, objectID // checkNotificationRecord checks notification's instanceID, status and offset. // It returns the expected size of the data and no error if everything is OK, and 0 and an error if not. func checkNotificationRecord(metaData common.MetaData, destType string, destID string, instanceID int64, - status string, offset int64) (int64, common.SyncServiceError) { + status string, offset int64) (int64, bool, common.SyncServiceError) { notification, err := Store.RetrieveNotificationRecord(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, destType, destID) if err != nil { - return 0, err + return 0, false, err } if notification == nil { - return 0, ¬ificationHandlerError{"No notification"} + return 0, false, ¬ificationHandlerError{"No notification"} } if notification.InstanceID != instanceID { - return 0, ¬ificationHandlerError{fmt.Sprintf("InstanceID mismatch: expected=%d, received=%d", notification.InstanceID, instanceID)} + return 0, false, ¬ificationHandlerError{fmt.Sprintf("InstanceID mismatch: expected=%d, received=%d", notification.InstanceID, instanceID)} } if notification.Status != status { - return 0, ¬ificationHandlerError{fmt.Sprintf("Status mismatch: expected=%s, received=%s", notification.Status, status)} + return 0, false, ¬ificationHandlerError{fmt.Sprintf("Status mismatch: expected=%s, received=%s", notification.Status, status)} } id := common.CreateNotificationID(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, destType, destID) notificationLock.RLock() chunksInfo, ok := notificationChunks[id] notificationLock.RUnlock() if !ok { - return 0, ¬ificationHandlerError{"No notification chunk info"} + return 0, false, ¬ificationHandlerError{"No notification chunk info"} } if _, ok := chunksInfo.chunkResendTimes[offset]; !ok { - return 0, ¬ificationHandlerError{fmt.Sprintf("Offset mismatch: %d not found in set of inflight requests", offset)} + return 0, false, ¬ificationHandlerError{fmt.Sprintf("Offset mismatch: %d not found in set of inflight requests", offset)} } if len(chunksInfo.chunksReceived) == 0 { - return 0, ¬ificationHandlerError{"Invalid chunks info"} + return 0, false, ¬ificationHandlerError{"Invalid chunks info"} } - return chunksInfo.receivedDataSize, nil + + checkAlreadyReceived := checkChunkReceived(chunksInfo, offset) + + return chunksInfo.receivedDataSize, checkAlreadyReceived, nil } -func updateGetDataNotification(metaData common.MetaData, destType string, destID string, offset int64) common.SyncServiceError { - return updateNotificationChunkInfo(true, metaData, destType, destID, offset) +func updatePushDataNotification(metaData common.MetaData, destType string, destID string, offset int64) common.SyncServiceError { + return updateNotificationChunkInfo(true, metaData, destType, destID, offset, common.Data) } -func updateNotificationChunkInfo(createNotification bool, metaData common.MetaData, destType string, destID string, offset int64) common.SyncServiceError { - lockIndex := common.HashStrings(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) - common.ObjectLocks.Lock(lockIndex) - defer common.ObjectLocks.Unlock(lockIndex) +func updateGetDataNotification(metaData common.MetaData, destType string, destID string, offset int64) common.SyncServiceError { + return updateNotificationChunkInfo(true, metaData, destType, destID, offset, common.Getdata) +} +// The caller will need the object lock before calling this function and unlock after +func updateNotificationChunkInfo(createNotification bool, metaData common.MetaData, destType string, destID string, offset int64, status string) common.SyncServiceError { id := common.CreateNotificationID(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, destType, destID) notificationLock.RLock() chunksInfo, ok := notificationChunks[id] notificationLock.RUnlock() - if !ok { - if createNotification { - err := Store.UpdateNotificationRecord( - common.Notification{ObjectID: metaData.ObjectID, ObjectType: metaData.ObjectType, - DestOrgID: metaData.DestOrgID, DestID: destID, DestType: destType, - Status: common.Getdata, InstanceID: metaData.InstanceID, DataID: metaData.DataID}) - if err != nil { - return ¬ificationHandlerError{fmt.Sprintf("Failed to update notification record. Error: %s\n", err)} - } + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In updateNotificationChunkInfo, update notification status of %s to %s", id, status) + } + if createNotification { + err := Store.UpdateNotificationRecord( + common.Notification{ObjectID: metaData.ObjectID, ObjectType: metaData.ObjectType, + DestOrgID: metaData.DestOrgID, DestID: destID, DestType: destType, + Status: status, InstanceID: metaData.InstanceID, DataID: metaData.DataID}) + if err != nil { + return ¬ificationHandlerError{fmt.Sprintf("Failed to update notification record. Error: %s\n", err)} } + } + if !ok { chunksInfo = notificationChunksInfo{chunkSize: metaData.ChunkSize, chunkResendTimes: make(map[int64]int64)} if chunksInfo.chunkSize > 0 { numberOfBytes := int(((metaData.ObjectSize/int64(chunksInfo.chunkSize) + 1) / 8) + 1) @@ -1647,6 +1710,12 @@ func updateNotificationChunkInfo(createNotification bool, metaData common.MetaDa notificationLock.Lock() notificationChunks[id] = chunksInfo notificationLock.Unlock() + + if trace.IsLogging(logger.DEBUG) { + chunksInfo = notificationChunks[id] + trace.Debug("Get chunkResendTimes[%d]: %d\n", offset, chunksInfo.chunkResendTimes[offset]) + trace.Debug("chunksInfo.receivedDataSize is %d\n", chunksInfo.receivedDataSize) + } return nil } @@ -1661,7 +1730,7 @@ func deleteNotificationChunksInfo(orgID string, objectType string, objectID stri notificationLock.Unlock() } -func handleChunkReceived(metaData common.MetaData, offset int64, size int64) (int64, common.SyncServiceError) { +func handleChunkReceived(metaData common.MetaData, offset int64, size int64, isOtherSide bool) (int64, common.SyncServiceError) { id := common.CreateNotificationID(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, metaData.OriginType, metaData.OriginID) notificationLock.RLock() chunksInfo, ok := notificationChunks[id] @@ -1673,7 +1742,11 @@ func handleChunkReceived(metaData common.MetaData, offset int64, size int64) (in if _, ok := chunksInfo.chunkResendTimes[offset]; !ok { return 0, ¬ificationHandlerError{"Chunk's resend time not found"} } - delete(chunksInfo.chunkResendTimes, offset) + + // check if this is to track chunk received by other side + if !isOtherSide { + delete(chunksInfo.chunkResendTimes, offset) + } // The chunksInfo.chunksReceived byte array holds a bit per chunk (identified by its offset), so each byte holds the bits of 8 chunks. // To access the bit of a given chunk: @@ -1686,6 +1759,7 @@ func handleChunkReceived(metaData common.MetaData, offset int64, size int64) (in bitIndex := chunkIndex & 7 bitMask := byte(1 << bitIndex) if chunksInfo.chunksReceived[byteIndex]&bitMask == 0 { + // received new chunk chunksInfo.receivedDataSize += size chunksInfo.chunksReceived[byteIndex] |= bitMask } else { @@ -1713,12 +1787,17 @@ func handleDataReceived(metaData common.MetaData) { func getOffsetsToResend(notification common.Notification, metaData common.MetaData) []int64 { offsets := make([]int64, 0) - id := common.GetNotificationID(notification) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("In getOffsetsToResend, checking chunksInfo for %s\n", id) + } notificationLock.RLock() chunksInfo, ok := notificationChunks[id] notificationLock.RUnlock() if !ok { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("No chunksInfo found for %s, will get Offsets for resend From Scratch\n", id) + } return getOffsetsForResendFromScratch(notification, metaData) } @@ -1735,14 +1814,29 @@ func getOffsetsToResend(notification common.Notification, metaData common.MetaDa // been received or that chunks have been received out of order. // In such cases we want to scan the map and see if a chunk has to be re-requested. currentTime := time.Now().Unix() + if trace.IsLogging(logger.DEBUG) { + trace.Debug("chunksInfo.resendTime: %d, currentTime: %d\n", chunksInfo.resendTime, currentTime) + trace.Debug("len(chunksInfo.chunkResendTimes)=%d\n", len(chunksInfo.chunkResendTimes)) + trace.Debug("chunksInfo.maxRequestedOffset=%d, chunksInfo.maxReceivedOffset=%d, chunksInfo.chunkSize=%d", chunksInfo.maxRequestedOffset, chunksInfo.maxReceivedOffset, chunksInfo.chunkSize) + } if chunksInfo.resendTime <= currentTime || (chunksInfo.chunkSize > 0 && int(chunksInfo.maxRequestedOffset-chunksInfo.maxReceivedOffset)/chunksInfo.chunkSize < len(chunksInfo.chunkResendTimes)) { for offset, resendTime := range chunksInfo.chunkResendTimes { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("chunksInfo.chunkResendTimes, offset: %d, resendTime: %d\n", offset, resendTime) + } if resendTime <= currentTime { offsets = append(offsets, offset) + if trace.IsLogging(logger.DEBUG) { + trace.Debug("resendTime <= currentTime, adding offset %d to resend offsets list for %s\n", offset, id) + } } } + } else { + if trace.IsLogging(logger.DEBUG) { + trace.Debug("Skip adding offsets") + } } return offsets } @@ -1765,7 +1859,7 @@ func getOffsetsForResendFromScratch(notification common.Notification, metaData c maxInflightChunks = common.Configuration.MaxInflightChunks } - if err := updateNotificationChunkInfo(false, metaData, notification.DestType, notification.DestID, 0); err != nil { + if err := updateNotificationChunkInfo(false, metaData, notification.DestType, notification.DestID, 0, ""); err != nil { if log.IsLogging(logger.ERROR) { log.Error("Failed to resend getdata notification. Error: %s\n", err) } @@ -1803,3 +1897,21 @@ func deleteObjectInfo(orgID string, objectType string, objectID string, destType } Store.DeleteNotificationRecords(orgID, objectType, objectID, destType, destID) } + +// check if chunk for the given offset is received +func checkChunkReceived(chunksInfo notificationChunksInfo, offset int64) bool { + + // The chunksInfo.chunksReceived byte array holds a bit per chunk (identified by its offset), so each byte holds the bits of 8 chunks. + // To access the bit of a given chunk: + // offset/chunkSize is the chunkIndex + // chunkIndex/8 is the byteIndex + // chunkIndex&7 is the bitIndex + // (1 << bitIndex) is the bitMask which has 1 at bitIndex + + chunkIndex := uint(offset / int64(chunksInfo.chunkSize)) + byteIndex := chunkIndex >> 3 + bitIndex := chunkIndex & 7 + bitMask := byte(1 << bitIndex) + + return (chunksInfo.chunksReceived[byteIndex]&bitMask != 0) +} diff --git a/core/communications/notificationHandler_test.go b/core/communications/notificationHandler_test.go index d380af6..10c1c9e 100644 --- a/core/communications/notificationHandler_test.go +++ b/core/communications/notificationHandler_test.go @@ -12,6 +12,7 @@ import ( func TestNotificationHandler(t *testing.T) { common.InitObjectLocks() + common.InitObjectDownloadSemaphore() if common.Registered { t.Errorf("Registered flag is true") @@ -42,6 +43,8 @@ func TestNotificationHandler(t *testing.T) { t.Errorf("Failed to start communication. Error: %s", err.Error()) } + common.Configuration.CommunicationProtocol = common.MQTTProtocol + common.Configuration.NodeType = common.CSS DestReqQueue = NewDestinationRequestQueue(40) @@ -235,7 +238,7 @@ func TestNotificationHandler(t *testing.T) { t.Errorf("Wrong status: %s instead of completely received (objectID = %s)", storedStatus, row.metaData.ObjectID) } // Check data - storedDataReader, err := Store.RetrieveObjectData(row.metaData.DestOrgID, row.metaData.ObjectType, row.metaData.ObjectID) + storedDataReader, err := Store.RetrieveObjectData(row.metaData.DestOrgID, row.metaData.ObjectType, row.metaData.ObjectID, false) if err != nil { t.Errorf("Failed to fetch object's data (objectID = %s). Error: %s", row.metaData.ObjectID, err.Error()) } else { @@ -304,7 +307,7 @@ func TestNotificationHandler(t *testing.T) { } // There should be no data - dataReader, _ := Store.RetrieveObjectData(row.metaData.DestOrgID, row.metaData.ObjectType, row.metaData.ObjectID) + dataReader, _ := Store.RetrieveObjectData(row.metaData.DestOrgID, row.metaData.ObjectType, row.metaData.ObjectID, false) if dataReader != nil { t.Errorf("Deleted object has data (objectID = %s)", row.metaData.ObjectID) } diff --git a/core/communications/testCommunication.go b/core/communications/testCommunication.go index 87d4c92..e674fed 100644 --- a/core/communications/testCommunication.go +++ b/core/communications/testCommunication.go @@ -66,6 +66,12 @@ func (communication *TestComm) GetData(metaData common.MetaData, offset int64) c return err } +// PushData uploade data to from ESS to CSS +func (communication *TestComm) PushData(metaData *common.MetaData, offset int64) common.SyncServiceError { + err := updatePushDataNotification(*metaData, metaData.OriginType, metaData.OriginID, offset) + return err +} + // SendData sends data from the CSS to the ESS or from the ESS to the CSS func (communication *TestComm) SendData(orgID string, destType string, destID string, message []byte, chunked bool) common.SyncServiceError { return nil diff --git a/core/dataURI/dataURI.go b/core/dataURI/dataURI.go index 5b50575..dba3510 100644 --- a/core/dataURI/dataURI.go +++ b/core/dataURI/dataURI.go @@ -22,40 +22,56 @@ func (e *Error) Error() string { } // AppendData appends a chunk of data to the file stored at the given URI -func AppendData(uri string, dataReader io.Reader, dataLength uint32, offset int64, total int64, isFirstChunk bool, isLastChunk bool) common.SyncServiceError { +func AppendData(uri string, dataReader io.Reader, dataLength uint32, offset int64, total int64, isFirstChunk bool, isLastChunk bool, isTempData bool) (bool, common.SyncServiceError) { if trace.IsLogging(logger.TRACE) { trace.Trace("Storing data chunk at %s", uri) } dataURI, err := url.Parse(uri) if err != nil || !strings.EqualFold(dataURI.Scheme, "file") { - return &Error{"Invalid data URI"} + return isLastChunk, &Error{"Invalid data URI"} } filePath := dataURI.Path + ".tmp" + + if trace.IsLogging(logger.TRACE) { + trace.Trace("Open file %s", filePath) + } file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0600) if err != nil { - return common.CreateError(err, fmt.Sprintf("Failed to open file %s to append data. Error: ", dataURI.Path)) + return isLastChunk, common.CreateError(err, fmt.Sprintf("Failed to open file %s to append data. Error: ", dataURI.Path)) } defer file.Close() if _, err = file.Seek(offset, io.SeekStart); err != nil { - return &common.IOError{Message: fmt.Sprintf("Failed to seek to the offset %d of a file. Error: %s", offset, err.Error())} + return isLastChunk, &common.IOError{Message: fmt.Sprintf("Failed to seek to the offset %d of a file. Error: %s", offset, err.Error())} } written, err := io.Copy(file, dataReader) if err != nil && err != io.EOF { - return &common.IOError{Message: "Failed to write to file. Error: " + err.Error()} + return isLastChunk, &common.IOError{Message: "Failed to write to file. Error: " + err.Error()} } if written != int64(dataLength) { - return &common.IOError{Message: "Failed to write all the data to file."} + return isLastChunk, &common.IOError{Message: "Failed to write all the data to file."} } - if isLastChunk { + fileInfo, err := os.Stat(filePath) + if err != nil { + return isLastChunk, &common.IOError{Message: "Failed to check file size. Error: " + err.Error()} + } + + if trace.IsLogging(logger.TRACE) { + trace.Trace("File size after append %d is %d", offset, fileInfo.Size()) + } + + if isLastChunk && !isTempData { + if trace.IsLogging(logger.TRACE) { + trace.Trace("Rename file from %s to %s", filePath, dataURI.Path) + } if err := os.Rename(filePath, dataURI.Path); err != nil { - return &common.IOError{Message: "Failed to rename data file. Error: " + err.Error()} + return isLastChunk, &common.IOError{Message: "Failed to rename data file. Error: " + err.Error()} } } - return nil + return isLastChunk, nil } // StoreData writes the data to the file stored at the given URI @@ -144,22 +160,27 @@ func StoreDataFromTempData(uri string) common.SyncServiceError { // GetData retrieves the data stored at the given URI. // After reading, the reader has to be closed. -func GetData(uri string) (io.Reader, common.SyncServiceError) { +func GetData(uri string, isTempData bool) (io.Reader, common.SyncServiceError) { dataURI, err := url.Parse(uri) if err != nil || !strings.EqualFold(dataURI.Scheme, "file") { return nil, &Error{"Invalid data URI"} } + filePath := dataURI.Path + if isTempData { + filePath = dataURI.Path + ".tmp" + } + if trace.IsLogging(logger.TRACE) { - trace.Trace("Retrieving data from %s", uri) + trace.Trace("Retrieving data from %s", filePath) } - file, err := os.Open(dataURI.Path) + file, err := os.Open(filePath) if err != nil { if os.IsNotExist(err) { return nil, &common.NotFound{} } - return nil, common.CreateError(err, fmt.Sprintf("Failed to open file %s to read data. Error: ", dataURI.Path)) + return nil, common.CreateError(err, fmt.Sprintf("Failed to open file %s to read data. Error: ", filePath)) } return file, nil } @@ -210,12 +231,23 @@ func GetDataChunk(uri string, size int, offset int64) ([]byte, bool, int, common } // DeleteStoredData deletes the data file stored at the given URI -func DeleteStoredData(uri string) common.SyncServiceError { +func DeleteStoredData(uri string, isTempData bool) common.SyncServiceError { + if trace.IsLogging(logger.TRACE) { + trace.Trace("Deleting stored data at %s, isTempData: %t", uri, isTempData) + } dataURI, err := url.Parse(uri) if err != nil || !strings.EqualFold(dataURI.Scheme, "file") { return &Error{"Invalid data URI"} } - if err = os.Remove(dataURI.Path); err != nil && !os.IsNotExist(err) { + filePath := dataURI.Path + if isTempData { + filePath = dataURI.Path + ".tmp" + } + + if trace.IsLogging(logger.TRACE) { + trace.Trace("Deleting %s", filePath) + } + if err = os.Remove(filePath); err != nil && !os.IsNotExist(err) { return &common.IOError{Message: "Failed to delete data. Error: " + err.Error()} } return nil diff --git a/core/dataURI/dataURI_test.go b/core/dataURI/dataURI_test.go index 2e07e23..ff96a23 100644 --- a/core/dataURI/dataURI_test.go +++ b/core/dataURI/dataURI_test.go @@ -22,10 +22,10 @@ func TestDataURI(t *testing.T) { } for _, row := range tests { - if err := AppendData(row.uri, bytes.NewReader(row.data), row.dataLength, row.offset, 0, true, true); err != nil { + if _, err := AppendData(row.uri, bytes.NewReader(row.data), row.dataLength, row.offset, 0, true, true, false); err != nil { t.Errorf("Failed to store in data uri. Error: %s", err.Error()) } else { - if dataReader, err := GetData(row.uri); err != nil { + if dataReader, err := GetData(row.uri, false); err != nil { t.Errorf("Failed to read from data uri. Error: %s", err.Error()) } else { storedData := make([]byte, 100) @@ -56,7 +56,7 @@ func TestDataURI(t *testing.T) { if written, err := StoreData(row.uri, bytes.NewReader(row.data), row.dataLength); err != nil { t.Errorf("Failed to store in data uri. Error: %s", err.Error()) } else { - if dataReader, err := GetData(row.uri); err != nil { + if dataReader, err := GetData(row.uri, false); err != nil { t.Errorf("Failed to read from data uri. Error: %s", err.Error()) } else { if written != int64(row.dataLength) { @@ -84,10 +84,10 @@ func TestDataURI(t *testing.T) { } } } - if err = DeleteStoredData(row.uri); err != nil { + if err = DeleteStoredData(row.uri, false); err != nil { t.Errorf("Failed to delete stored data. Error: %s", err.Error()) } else { - if dataReader, err := GetData(row.uri); err == nil && dataReader != nil { + if dataReader, err := GetData(row.uri, false); err == nil && dataReader != nil { t.Errorf("Read from deleted data uri") } } @@ -116,7 +116,7 @@ func TestDataURI(t *testing.T) { isLastChunk = true } for i, chunk := range row.chunks { - if err := AppendData(row.uri, bytes.NewReader(chunk), row.lengths[i], row.offsets[i], int64(len(row.wholeData)), isFirstChunk, isLastChunk); err != nil { + if _, err := AppendData(row.uri, bytes.NewReader(chunk), row.lengths[i], row.offsets[i], int64(len(row.wholeData)), isFirstChunk, isLastChunk, false); err != nil { t.Errorf("Failed to store in data uri. Error: %s", err.Error()) } isFirstChunk = false @@ -124,7 +124,7 @@ func TestDataURI(t *testing.T) { isLastChunk = true } } - if dataReader, err := GetData(row.uri); err != nil { + if dataReader, err := GetData(row.uri, false); err != nil { t.Errorf("Failed to read from data uri. Error: %s", err.Error()) } else { storedData := make([]byte, 100) @@ -174,7 +174,7 @@ func TestDataURI(t *testing.T) { } } - if err = DeleteStoredData(row.uri); err != nil { + if err = DeleteStoredData(row.uri, false); err != nil { t.Errorf("Failed to delete %s. Error: %s", row.uri, err) } } diff --git a/core/dataVerifier/dataVerifier.go b/core/dataVerifier/dataVerifier.go index 4a53dc3..5fba8c0 100644 --- a/core/dataVerifier/dataVerifier.go +++ b/core/dataVerifier/dataVerifier.go @@ -5,7 +5,6 @@ import ( "crypto/rsa" "crypto/x509" "encoding/base64" - "fmt" "hash" "io" @@ -64,15 +63,15 @@ func (dataVerifier *DataVerifier) VerifyDataSignature(data io.Reader, orgID stri } else { dr := io.TeeReader(data, dataVerifier.dataHash) if trace.IsLogging(logger.DEBUG) { - trace.Debug("DataVerifier - In VerifyDataSignature, verifying and storing temp data for object %s %s\n", objectType, objectID) + trace.Debug("DataVerifier - In VerifyDataSignature, verifying and storing data for object %s %s\n", objectType, objectID) } if destinationDataURI != "" { - if _, err := dataURI.StoreTempData(destinationDataURI, dr, 0); err != nil { + if _, err := dataURI.StoreData(destinationDataURI, dr, 0); err != nil { return false, err } } else { - if exists, err := Store.StoreObjectTempData(orgID, objectType, objectID, dr); err != nil || !exists { + if exists, err := Store.StoreObjectData(orgID, objectType, objectID, dr); err != nil || !exists { return false, err } } @@ -81,65 +80,27 @@ func (dataVerifier *DataVerifier) VerifyDataSignature(data io.Reader, orgID stri } } -// StoreVerifiedData will store the data from temp data that generated during data verification. And remove temp data -func (dataVerifier *DataVerifier) StoreVerifiedData(orgID string, objectType string, objectID string, destinationDataURI string) common.SyncServiceError { - if dataVerifier.writeThrough { - return nil - } - - if destinationDataURI != "" { - if trace.IsLogging(logger.DEBUG) { - trace.Debug("DataVerifier - In StoreVerifiedData, store data from tmp data for object %s %s at URI %s\n", objectType, objectID, destinationDataURI) - } - // rename the {file}.tmp to {file} - if err := dataURI.StoreDataFromTempData(destinationDataURI); err != nil { - return err - } +// GetTempData is to get temp data for data verification +func (dataVerifier *DataVerifier) GetTempData(metaData common.MetaData) (io.Reader, common.SyncServiceError) { + var dr io.Reader + var err common.SyncServiceError + if metaData.DestinationDataURI != "" { + dr, err = dataURI.GetData(metaData.DestinationDataURI, true) } else { - // 1. Retrieve temp data, 2. Store object data, 3. Remove temp data - if trace.IsLogging(logger.DEBUG) { - trace.Debug("DataVerifier - In StoreVerifiedData, retrieve temp data for object %s %s\n", objectType, objectID) - } - - dataReader, err := Store.RetrieveTempObjectData(orgID, objectType, objectID) - if err != nil { - return &common.InvalidRequest{Message: "Failed to read temp data fro, Error: " + err.Error()} - } else if dataReader == nil { - return &common.InvalidRequest{Message: "Read empty temp data, Error: " + err.Error()} - } - - if trace.IsLogging(logger.DEBUG) { - trace.Debug("DataVerifier - In StoreVerifiedData, storing data for object %s %s\n", objectType, objectID) - } - - if exists, err := Store.StoreObjectData(orgID, objectType, objectID, dataReader); err != nil { - Store.CloseDataReader(dataReader) - return err - } else if !exists { - Store.CloseDataReader(dataReader) - message := fmt.Sprintf("Object metadata is not found for object %s %s %s, Error: %s\n", orgID, objectType, objectID, err.Error()) - return &common.InternalError{Message: message} - } - Store.CloseDataReader(dataReader) - - if trace.IsLogging(logger.DEBUG) { - trace.Debug("DataVerifier - In StoreVerifiedData, remove temp data for object %s %s\n", objectType, objectID) - } - - if err := Store.RemoveObjectTempData(orgID, objectType, objectID); err != nil { - return err - } + dr, err = Store.RetrieveObjectTempData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) } - return nil + if err != nil { + return nil, err + } + return dr, nil } // CleanUp function is to clean up the temp file created during data verification func (dataVerifier *DataVerifier) RemoveTempData(orgID string, objectType string, objectID string, destinationDataURI string) common.SyncServiceError { if destinationDataURI != "" { - tmpFilePath := destinationDataURI + ".tmp" - if err := dataURI.DeleteStoredData(tmpFilePath); err != nil { + if err := dataURI.DeleteStoredData(destinationDataURI, true); err != nil { return err } } else if err := Store.RemoveObjectTempData(orgID, objectType, objectID); err != nil { @@ -148,6 +109,10 @@ func (dataVerifier *DataVerifier) RemoveTempData(orgID string, objectType string return nil } +func (dataVerifier *DataVerifier) RemoveUnverifiedData(metaData common.MetaData) common.SyncServiceError { + return storage.DeleteStoredData(Store, metaData) +} + func (dataVerifier *DataVerifier) verifyHelper(publicKeyBytes []byte, signatureBytes []byte) (bool, common.SyncServiceError) { dataHashSum := dataVerifier.dataHash.Sum(nil) if pubKey, err := x509.ParsePKIXPublicKey(publicKeyBytes); err != nil { diff --git a/core/dataVerifier/dataVerifier_test.go b/core/dataVerifier/dataVerifier_test.go index ffc6736..7f7962d 100644 --- a/core/dataVerifier/dataVerifier_test.go +++ b/core/dataVerifier/dataVerifier_test.go @@ -90,22 +90,6 @@ func testVerifyDataSignature(hashAlgo string, t *testing.T) { } dataVerifier := NewDataVerifier(hashAlgo, publicKey, signature) - if verified, err := dataVerifier.VerifyDataSignature(bytes.NewReader(wrongDataToSign), orgID, objectType, objectID, ""); err == nil || verified { - t.Errorf("Error verifying data, wrong data should not pass verification. verified: %t, error: %s\n", verified, err.Error()) - } - - // Need another dataVerifier object because re-use old object will make the hash calculated on top of the hash from old object - dataVerifier = NewDataVerifier(hashAlgo, publicKey, signature) - if verified, err := dataVerifier.VerifyDataSignature(bytes.NewReader(dataToSign), orgID, objectType, objectID, ""); err != nil || !verified { - t.Errorf("Error verifying data, data should pass verification. verified: %t, error: %s\n", verified, err.Error()) - } - - var reader io.Reader - if reader, err = Store.RetrieveTempObjectData(orgID, objectType, objectID); err != nil { - Store.CloseDataReader(reader) - t.Errorf("Error get temp object data for %s %s %s, error: %s\n", orgID, objectType, objectID, err.Error()) - } - Store.CloseDataReader(reader) // Store object metadata objMetaData := common.MetaData{ @@ -115,6 +99,7 @@ func testVerifyDataSignature(hashAlgo string, t *testing.T) { HashAlgorithm: hashAlgo, PublicKey: publicKey, Signature: signature, + DataVerified: false, } // Store object metadata @@ -122,21 +107,30 @@ func testVerifyDataSignature(hashAlgo string, t *testing.T) { t.Errorf("Failed to store object metadata, error: %s", err.Error()) } - // Store verified data - if err = dataVerifier.StoreVerifiedData(orgID, objectType, objectID, ""); err != nil { - t.Errorf("Error storeing verified data for %s %s %s, error: %s\n", orgID, objectType, objectID, err.Error()) + if verified, err := dataVerifier.VerifyDataSignature(bytes.NewReader(wrongDataToSign), orgID, objectType, objectID, ""); err == nil || verified { + errMessage := "" + if err != nil { + errMessage = err.Error() + } + t.Errorf("Error verifying data, wrong data should not pass verification. verified: %t, error: %s\n", verified, errMessage) } - if reader, err = Store.RetrieveTempObjectData(orgID, objectType, objectID); err != nil { - t.Errorf("Error retrieve verified data for %s %s %s, error: %s\n", orgID, objectType, objectID, err.Error()) - } else if reader != nil { - Store.CloseDataReader(reader) - t.Errorf("Temp object data for %s %s %s should be deleted\n", orgID, objectType, objectID) + // Need another dataVerifier object because re-use old object will make the hash calculated on top of the hash from old object + dataVerifier = NewDataVerifier(hashAlgo, publicKey, signature) + if verified, err := dataVerifier.VerifyDataSignature(bytes.NewReader(dataToSign), orgID, objectType, objectID, ""); err != nil || !verified { + t.Errorf("Error verifying data, data should pass verification. verified: %t, error: %s\n", verified, err.Error()) } - if reader, err = Store.RetrieveObjectData(orgID, objectType, objectID); err != nil { + if err = Store.UpdateObjectDataVerifiedStatus(orgID, objectType, objectID, true); err != nil { + t.Errorf("Failed to update DataVerified to true, error: %s\n", err.Error()) + } + + var reader io.Reader + if reader, err = Store.RetrieveObjectData(orgID, objectType, objectID, false); err != nil { + t.Errorf("Error retrieve verified data for %s %s %s, error: %s\n", orgID, objectType, objectID, err.Error()) + } else if reader == nil { Store.CloseDataReader(reader) - t.Errorf("Error get object data for %s %s %s, error: %s\n", orgID, objectType, objectID, err.Error()) + t.Errorf("Object data for %s %s %s should be stored after verification\n", orgID, objectType, objectID) } Store.CloseDataReader(reader) @@ -148,6 +142,17 @@ func TestVerifyDataSignatureWithDestintionDataURI(t *testing.T) { destinationURIDirFileVerified = "file:///" + destinationURIDir + "/" + "test_verified.txt" destinationURIDirFileWrong = "file:///" + destinationURIDir + "/" + "test_wrong.txt" + if status := setupDB(common.Mongo); status != "" { + t.Errorf("Failed to setup %s storage, error: %s", common.Mongo, status) + } + defer Store.Stop() + testVerifyDataSignatureWithDestintionDataURI(common.Sha1, t) + testVerifyDataSignatureWithDestintionDataURI(common.Sha256, t) + + if status := setupDB(common.Bolt); status != "" { + t.Errorf("Failed to setup %s storage, error: %s", common.Bolt, status) + } + defer Store.Stop() testVerifyDataSignatureWithDestintionDataURI(common.Sha1, t) testVerifyDataSignatureWithDestintionDataURI(common.Sha256, t) @@ -160,37 +165,40 @@ func testVerifyDataSignatureWithDestintionDataURI(hashAlgo string, t *testing.T) t.Errorf("Failed to set up publicKey and signature with SHA1 for data. Error: %s\n", err.Error()) } + objectID1 := "testDVObjID1" + objectID2 := "testDVObjID2" + + metaData1, err := setupObjectForVerify(objectID1, publicKey, signature, hashAlgo, destinationURIDirFileVerified) + if err != nil { + t.Errorf("Failed to set up object(objectID=%s) for testing. Error: %s\n", objectID1, err.Error()) + } + + metaData2, err := setupObjectForVerify(objectID2, publicKey, signature, hashAlgo, destinationURIDirFileWrong) + if err != nil { + t.Errorf("Failed to set up object(objectID=%s) for testing. Error: %s\n", objectID2, err.Error()) + } + // Verify Signature dataVerifier := NewDataVerifier(hashAlgo, publicKey, signature) - if verified, err := dataVerifier.VerifyDataSignature(bytes.NewReader(dataToSign), orgID, objectType, objectID, destinationURIDirFileVerified); err != nil || !verified { + if verified, err := dataVerifier.VerifyDataSignature(bytes.NewReader(dataToSign), metaData1.DestOrgID, metaData1.ObjectType, metaData1.ObjectID, metaData1.DestinationDataURI); err != nil || !verified { t.Errorf("Error verifying data, data should pass verification. verified: %t, error: %s\n", verified, err.Error()) } - if verified, err := dataVerifier.VerifyDataSignature(bytes.NewReader(wrongDataToSign), orgID, objectType, objectID, destinationURIDirFileWrong); err == nil || verified { + if verified, err := dataVerifier.VerifyDataSignature(bytes.NewReader(wrongDataToSign), metaData2.DestOrgID, metaData2.ObjectType, metaData2.ObjectID, metaData2.DestinationDataURI); err == nil || verified { t.Errorf("Error verifying data, wrong data should not pass verification. verified: %t, error: %s\n", verified, err.Error()) } // check .tmp file is created - if _, err := os.Stat(destinationURIDir + "/test_verified.txt.tmp"); err != nil { - t.Errorf("Error checking files at destinationURI %s, error: %s\n", destinationURIDirFileVerified, err.Error()) + if _, err := os.Stat(destinationURIDir + "/test_verified.txt"); err != nil { + t.Errorf("Error checking files at destinationURI %s, error: %s\n", metaData1.DestinationDataURI, err.Error()) } - if _, err := os.Stat(destinationURIDir + "/test_wrong.txt.tmp"); err != nil { - t.Errorf("Error checking files at destinationURI %s.tmp, error: %s\n", destinationURIDirFileWrong, err.Error()) + if _, err := os.Stat(destinationURIDir + "/test_wrong.txt"); err != nil { + t.Errorf("Error checking files at destinationURI %s, error: %s\n", metaData2.DestinationDataURI, err.Error()) } - // check file is created from .tmp file - if err := dataVerifier.StoreVerifiedData(orgID, objectType, objectID, destinationURIDirFileVerified); err != nil { - t.Errorf("Error storing verified data %s %s %s at destinationURI %s, error: %s\n", orgID, objectType, objectID, destinationURIDirFileVerified, err.Error()) - } - if _, err := os.Stat(destinationURIDir + "/test_verified.txt.tmp"); !os.IsNotExist(err) { - t.Errorf("The .tmp file at destinationURI %s should be removed, error: %s\n", destinationURIDir, err.Error()) - } - if _, err := os.Stat(destinationURIDir + "/test_verified.txt"); err != nil { - t.Errorf("Error checking files at destinationURI %s, error: %s\n", destinationURIDirFileVerified, err.Error()) - } - if err = dataVerifier.RemoveTempData(orgID, objectType, objectID, destinationURIDirFileWrong); err != nil { - t.Errorf("Error remove tmp data for %s %s %s at %s, error: %s\n", orgID, objectType, objectID, destinationURIDirFileWrong, err.Error()) + if err = dataVerifier.RemoveUnverifiedData(*metaData2); err != nil { + t.Errorf("Error remove tmp data for %s %s %s at %s, error: %s\n", metaData2.DestOrgID, metaData2.ObjectType, metaData2.ObjectID, metaData2.DestinationDataURI, err.Error()) } } @@ -203,6 +211,26 @@ func setupTestVars() { objectID = "testDVObjID" } +func setupObjectForVerify(objectID string, publicKey string, signature string, hashAlgo string, destinationURI string) (*common.MetaData, common.SyncServiceError) { + objMetaDataToStore := common.MetaData{ + ObjectID: objectID, + ObjectType: objectType, + DestOrgID: orgID, + HashAlgorithm: hashAlgo, + PublicKey: publicKey, + Signature: signature, + DataVerified: false, + DestinationDataURI: destinationURI, + } + + // Store object metadata + if _, err := Store.StoreObject(objMetaDataToStore, []byte{}, ""); err != nil { + return nil, err + } + return &objMetaDataToStore, nil + +} + func setupDB(dbType string) string { if dbType == common.Mongo { common.Configuration.MongoDbName = "d_test_db" diff --git a/core/storage/boltStorage.go b/core/storage/boltStorage.go index b38d4ac..8c878b3 100644 --- a/core/storage/boltStorage.go +++ b/core/storage/boltStorage.go @@ -297,7 +297,7 @@ func (store *BoltStorage) StoreObject(metaData common.MetaData, data []byte, sta return nil, err } } else if !metaData.MetaOnly { - if err := dataURI.DeleteStoredData(createDataPathFromMeta(store.localDataPath, metaData)); err != nil { + if err := dataURI.DeleteStoredData(createDataPathFromMeta(store.localDataPath, metaData), false); err != nil { return nil, err } } @@ -370,8 +370,8 @@ func (store *BoltStorage) StoreObjectData(orgID string, objectType string, objec } func (store *BoltStorage) StoreObjectTempData(orgID string, objectType string, objectID string, dataReader io.Reader) (bool, common.SyncServiceError) { - tmpDataPath := createDataPathForTempData(store.localDataPath, orgID, objectType, objectID) - _, err := dataURI.StoreData(tmpDataPath, dataReader, 0) + dataPath := createDataPath(store.localDataPath, orgID, objectType, objectID) + _, err := dataURI.StoreTempData(dataPath, dataReader, 0) if err != nil { return false, err } @@ -380,8 +380,8 @@ func (store *BoltStorage) StoreObjectTempData(orgID string, objectType string, o } func (store *BoltStorage) RemoveObjectTempData(orgID string, objectType string, objectID string) common.SyncServiceError { - tmpDataPath := createDataPathForTempData(store.localDataPath, orgID, objectType, objectID) - if err := dataURI.DeleteStoredData(tmpDataPath); err != nil { + dataPath := createDataPath(store.localDataPath, orgID, objectType, objectID) + if err := dataURI.DeleteStoredData(dataPath, true); err != nil { if common.IsNotFound(err) { return nil } @@ -390,10 +390,10 @@ func (store *BoltStorage) RemoveObjectTempData(orgID string, objectType string, return nil } -func (store *BoltStorage) RetrieveTempObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { +func (store *BoltStorage) RetrieveObjectTempData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { var dataReader io.Reader - tmpDataPath := createDataPathForTempData(store.localDataPath, orgID, objectType, objectID) - dataReader, err := dataURI.GetData(tmpDataPath) + dataPath := createDataPath(store.localDataPath, orgID, objectType, objectID) + dataReader, err := dataURI.GetData(dataPath, true) if err != nil { if common.IsNotFound(err) { return nil, nil @@ -420,12 +420,12 @@ func (store *BoltStorage) RetrieveObject(orgID string, objectType string, object } // RetrieveObjectData returns the object data with the specified parameters -func (store *BoltStorage) RetrieveObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { +func (store *BoltStorage) RetrieveObjectData(orgID string, objectType string, objectID string, isTempData bool) (io.Reader, common.SyncServiceError) { var dataReader io.Reader function := func(object boltObject) common.SyncServiceError { var err error if object.DataPath != "" { - dataReader, err = dataURI.GetData(object.DataPath) + dataReader, err = dataURI.GetData(object.DataPath, isTempData) return err } return nil @@ -792,7 +792,7 @@ func (store *BoltStorage) GetObjectsToActivate() ([]common.MetaData, common.Sync // AppendObjectData appends a chunk of data to the object's data func (store *BoltStorage) AppendObjectData(orgID string, objectType string, objectID string, dataReader io.Reader, dataLength uint32, - offset int64, total int64, isFirstChunk bool, isLastChunk bool) common.SyncServiceError { + offset int64, total int64, isFirstChunk bool, isLastChunk bool, isTempData bool) (bool, common.SyncServiceError) { dataPath := "" function := func(object boltObject) (boltObject, common.SyncServiceError) { @@ -807,9 +807,36 @@ func (store *BoltStorage) AppendObjectData(orgID string, objectType string, obje return object, nil } if err := store.updateObjectHelper(orgID, objectType, objectID, function); err != nil { - return err + return isLastChunk, err } - return dataURI.AppendData(dataPath, dataReader, dataLength, offset, total, isFirstChunk, isLastChunk) + return dataURI.AppendData(dataPath, dataReader, dataLength, offset, total, isFirstChunk, isLastChunk, isTempData) +} + +// Handles the last data chunk +func (store *BoltStorage) HandleObjectInfoForLastDataChunk(orgID string, objectType string, objectID string, isTempData bool, dataSize int64) (bool, common.SyncServiceError) { + //dataPath := createDataPath(store.localDataPath, orgID, objectType, objectID) + function := func(object boltObject) (boltObject, common.SyncServiceError) { + if object.Status == common.NotReadyToSend { + object.Status = common.ReadyToSend + } + if object.Status == common.NotReadyToSend || object.Status == common.ReadyToSend { + newID := store.getInstanceID() + object.Meta.InstanceID = newID + object.Meta.DataID = newID + } + + //object.DataPath = dataPath + object.Meta.ObjectSize = dataSize + + return object, nil + } + if err := store.updateObjectHelper(orgID, objectType, objectID, function); err != nil { + if err == notFound { + return false, nil + } + return false, err + } + return true, nil } // UpdateObjectStatus updates an object's status @@ -824,6 +851,15 @@ func (store *BoltStorage) UpdateObjectStatus(orgID string, objectType string, ob return store.updateObjectHelper(orgID, objectType, objectID, function) } +// UpdateObjectDataVerifiedStatus updates object's dataVerified field +func (store *BoltStorage) UpdateObjectDataVerifiedStatus(orgID string, objectType string, objectID string, verified bool) common.SyncServiceError { + function := func(object boltObject) (boltObject, common.SyncServiceError) { + object.Meta.DataVerified = verified + return object, nil + } + return store.updateObjectHelper(orgID, objectType, objectID, function) +} + // UpdateObjectSourceDataURI pdates object's source data URI func (store *BoltStorage) UpdateObjectSourceDataURI(orgID string, objectType string, objectID string, sourceDataURI string) common.SyncServiceError { function := func(object boltObject) (boltObject, common.SyncServiceError) { @@ -942,7 +978,10 @@ func (store *BoltStorage) ActivateObject(orgID string, objectType string, object // DeleteStoredObject deletes the object func (store *BoltStorage) DeleteStoredObject(orgID string, objectType string, objectID string) common.SyncServiceError { - if err := store.DeleteStoredData(orgID, objectType, objectID); err != nil { + if err := store.DeleteStoredData(orgID, objectType, objectID, false); err != nil { + return nil + } + if err := store.DeleteStoredData(orgID, objectType, objectID, true); err != nil { return nil } id := createObjectCollectionID(orgID, objectType, objectID) @@ -954,18 +993,25 @@ func (store *BoltStorage) DeleteStoredObject(orgID string, objectType string, ob } // DeleteStoredData deletes the object's data -func (store *BoltStorage) DeleteStoredData(orgID string, objectType string, objectID string) common.SyncServiceError { +func (store *BoltStorage) DeleteStoredData(orgID string, objectType string, objectID string, isTempData bool) common.SyncServiceError { function := func(object boltObject) (boltObject, common.SyncServiceError) { if object.DataPath == "" { return object, nil } - if err := dataURI.DeleteStoredData(object.DataPath); err != nil { + if err := dataURI.DeleteStoredData(object.DataPath, isTempData); err != nil { return object, err } - object.DataPath = "" + if !isTempData { + object.DataPath = "" + } return object, nil } - return store.updateObjectHelper(orgID, objectType, objectID, function) + + err := store.updateObjectHelper(orgID, objectType, objectID, function) + if err != nil && err != notFound { + return err + } + return nil } // CleanObjects removes the objects received from the other side. diff --git a/core/storage/boltStorageHelpers.go b/core/storage/boltStorageHelpers.go index eb9687e..31c5345 100644 --- a/core/storage/boltStorageHelpers.go +++ b/core/storage/boltStorageHelpers.go @@ -96,7 +96,10 @@ func (store *BoltStorage) deleteObjectsHelper(match func(boltObject) bool) commo } if match(object) { if object.DataPath != "" { - if err := dataURI.DeleteStoredData(object.DataPath); err != nil { + if err := dataURI.DeleteStoredData(object.DataPath, false); err != nil { + return err + } + if err := dataURI.DeleteStoredData(object.DataPath, true); err != nil { return err } object.DataPath = "" @@ -123,7 +126,10 @@ func (store *BoltStorage) deleteObjectsAndNotificationsHelper(match func(boltObj } if match(object) { if object.DataPath != "" { - if err := dataURI.DeleteStoredData(object.DataPath); err != nil { + if err := dataURI.DeleteStoredData(object.DataPath, false); err != nil { + return err + } + if err := dataURI.DeleteStoredData(object.DataPath, true); err != nil { return err } object.DataPath = "" diff --git a/core/storage/cache.go b/core/storage/cache.go index cba5dc6..c318294 100644 --- a/core/storage/cache.go +++ b/core/storage/cache.go @@ -80,14 +80,18 @@ func (store *Cache) RemoveObjectTempData(orgID string, objectType string, object return store.Store.RemoveObjectTempData(orgID, objectType, objectID) } -func (store *Cache) RetrieveTempObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { - return store.Store.RetrieveTempObjectData(orgID, objectType, objectID) +func (store *Cache) RetrieveObjectTempData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { + return store.Store.RetrieveObjectTempData(orgID, objectType, objectID) } // AppendObjectData appends a chunk of data to the object's data func (store *Cache) AppendObjectData(orgID string, objectType string, objectID string, dataReader io.Reader, dataLength uint32, - offset int64, total int64, isFirstChunk bool, isLastChunk bool) common.SyncServiceError { - return store.Store.AppendObjectData(orgID, objectType, objectID, dataReader, dataLength, offset, total, isFirstChunk, isLastChunk) + offset int64, total int64, isFirstChunk bool, isLastChunk bool, isTempData bool) (bool, common.SyncServiceError) { + return store.Store.AppendObjectData(orgID, objectType, objectID, dataReader, dataLength, offset, total, isFirstChunk, isLastChunk, isTempData) +} + +func (store *Cache) HandleObjectInfoForLastDataChunk(orgID string, objectType string, objectID string, isTempData bool, dataSize int64) (bool, common.SyncServiceError) { + return store.Store.HandleObjectInfoForLastDataChunk(orgID, objectType, objectID, isTempData, dataSize) } // UpdateObjectStatus updates an object's status @@ -95,6 +99,11 @@ func (store *Cache) UpdateObjectStatus(orgID string, objectType string, objectID return store.Store.UpdateObjectStatus(orgID, objectType, objectID, status) } +// UpdateObjectDataVerifiedStatus updates object's dataVerified field +func (store *Cache) UpdateObjectDataVerifiedStatus(orgID string, objectType string, objectID string, verified bool) common.SyncServiceError { + return store.Store.UpdateObjectDataVerifiedStatus(orgID, objectType, objectID, verified) +} + // UpdateObjectSourceDataURI pdates object's source data URI func (store *Cache) UpdateObjectSourceDataURI(orgID string, objectType string, objectID string, sourceDataURI string) common.SyncServiceError { return store.Store.UpdateObjectSourceDataURI(orgID, objectType, objectID, sourceDataURI) @@ -181,8 +190,8 @@ func (store *Cache) RetrieveObjectAndStatus(orgID string, objectType string, obj } // RetrieveObjectData returns the object data with the specified parameters -func (store *Cache) RetrieveObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { - return store.Store.RetrieveObjectData(orgID, objectType, objectID) +func (store *Cache) RetrieveObjectData(orgID string, objectType string, objectID string, isTempData bool) (io.Reader, common.SyncServiceError) { + return store.Store.RetrieveObjectData(orgID, objectType, objectID, isTempData) } // ReadObjectData returns the object data with the specified parameters @@ -221,8 +230,8 @@ func (store *Cache) DeleteStoredObject(orgID string, objectType string, objectID } // DeleteStoredData deletes the object's data -func (store *Cache) DeleteStoredData(orgID string, objectType string, objectID string) common.SyncServiceError { - return store.Store.DeleteStoredData(orgID, objectType, objectID) +func (store *Cache) DeleteStoredData(orgID string, objectType string, objectID string, isTempData bool) common.SyncServiceError { + return store.Store.DeleteStoredData(orgID, objectType, objectID, isTempData) } // CleanObjects removes the objects received from the other side. diff --git a/core/storage/inMemoryStorage.go b/core/storage/inMemoryStorage.go index 3db6f10..ac0647b 100644 --- a/core/storage/inMemoryStorage.go +++ b/core/storage/inMemoryStorage.go @@ -196,7 +196,7 @@ func (store *InMemoryStorage) RemoveObjectTempData(orgID string, objectType stri return notFound } -func (store *InMemoryStorage) RetrieveTempObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { +func (store *InMemoryStorage) RetrieveObjectTempData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { store.lock() defer store.unLock() @@ -213,7 +213,7 @@ func (store *InMemoryStorage) RetrieveTempObjectData(orgID string, objectType st // AppendObjectData appends a chunk of data to the object's data func (store *InMemoryStorage) AppendObjectData(orgID string, objectType string, objectID string, dataReader io.Reader, dataLength uint32, - offset int64, total int64, isFirstChunk bool, isLastChunk bool) common.SyncServiceError { + offset int64, total int64, isFirstChunk bool, isLastChunk bool, isTempData bool) (bool, common.SyncServiceError) { store.lock() defer store.unLock() @@ -224,7 +224,7 @@ func (store *InMemoryStorage) AppendObjectData(orgID string, objectType string, if dataLength == 0 { dt, err := ioutil.ReadAll(dataReader) if err != nil { - return &Error{"Failed to read object data. Error: " + err.Error()} + return isLastChunk, &Error{"Failed to read object data. Error: " + err.Error()} } data = dt dataLength = uint32(len(data)) @@ -233,26 +233,73 @@ func (store *InMemoryStorage) AppendObjectData(orgID string, objectType string, total = offset + int64(dataLength) } if isFirstChunk { - object.data = make([]byte, total) + if isTempData { + object.tmpData = make([]byte, total) + } else { + object.data = make([]byte, total) + } + } else { - object.data = ensureArrayCapacity(object.data, total) + if isTempData { + object.tmpData = ensureArrayCapacity(object.tmpData, total) + } else { + object.data = ensureArrayCapacity(object.data, total) + } + } if data != nil { - copy(object.data[offset:], data) + if isTempData { + copy(object.tmpData[offset:], data) + } else { + copy(object.data[offset:], data) + } + } else { - count, err := dataReader.Read(object.data[offset:]) - if err != nil { - return &Error{"Failed to read object data. Error: " + err.Error()} + var count int + var err error + if isTempData { + count, err = dataReader.Read(object.tmpData[offset:]) + } else { + count, err = dataReader.Read(object.data[offset:]) + } + + if err != nil && err != io.EOF { + return isLastChunk, &Error{"Failed to read object data. Error: " + err.Error()} } if count != int(dataLength) { - return &Error{fmt.Sprintf("Read %d bytes for the object data, instead of %d", count, dataLength)} + return isLastChunk, &Error{fmt.Sprintf("Read %d bytes for the object data, instead of %d", count, dataLength)} } } store.objects[id] = object - return nil + return isLastChunk, nil } - return notFound + return isLastChunk, notFound +} + +func (store *InMemoryStorage) HandleObjectInfoForLastDataChunk(orgID string, objectType string, objectID string, isTempData bool, dataSize int64) (bool, common.SyncServiceError) { + if isTempData { + return false, nil + } + store.lock() + defer store.unLock() + + id := createObjectCollectionID(orgID, objectType, objectID) + if object, ok := store.objects[id]; ok { + if object.status == common.NotReadyToSend { + object.status = common.ReadyToSend + } + if object.status == common.NotReadyToSend || object.status == common.ReadyToSend { + newID := store.getInstanceID() + object.meta.InstanceID = newID + object.meta.DataID = newID + } + object.meta.ObjectSize = dataSize + store.objects[id] = object + return true, nil + } + + return false, nil } // UpdateObjectStatus updates an object's status @@ -273,6 +320,21 @@ func (store *InMemoryStorage) UpdateObjectStatus(orgID string, objectType string return &NotFound{"Object not found"} } +// UpdateObjectDataVerifiedStatus updates object's dataVerified field +func (store *InMemoryStorage) UpdateObjectDataVerifiedStatus(orgID string, objectType string, objectID string, verified bool) common.SyncServiceError { + store.lock() + defer store.unLock() + + id := createObjectCollectionID(orgID, objectType, objectID) + if object, ok := store.objects[id]; ok { + object.meta.DataVerified = verified + store.objects[id] = object + return nil + } + + return &NotFound{"Object not found"} +} + // UpdateObjectSourceDataURI updates object's source data URI func (store *InMemoryStorage) UpdateObjectSourceDataURI(orgID string, objectType string, objectID string, sourceDataURI string) common.SyncServiceError { store.lock() @@ -474,14 +536,20 @@ func (store *InMemoryStorage) RetrieveObjectAndStatus(orgID string, objectType s } // RetrieveObjectData returns the object data with the specified parameters -func (store *InMemoryStorage) RetrieveObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { +func (store *InMemoryStorage) RetrieveObjectData(orgID string, objectType string, objectID string, isTempData bool) (io.Reader, common.SyncServiceError) { store.lock() defer store.unLock() id := createObjectCollectionID(orgID, objectType, objectID) if object, ok := store.objects[id]; ok { - if object.data != nil && len(object.data) > 0 { - return bytes.NewReader(object.data), nil + if isTempData { + if object.tmpData != nil && len(object.tmpData) > 0 { + return bytes.NewReader(object.tmpData), nil + } + } else { + if object.data != nil && len(object.data) > 0 { + return bytes.NewReader(object.data), nil + } } return nil, nil } @@ -586,18 +654,22 @@ func (store *InMemoryStorage) DeleteStoredObject(orgID string, objectType string } // DeleteStoredData deletes the object's data -func (store *InMemoryStorage) DeleteStoredData(orgID string, objectType string, objectID string) common.SyncServiceError { +func (store *InMemoryStorage) DeleteStoredData(orgID string, objectType string, objectID string, isTempData bool) common.SyncServiceError { store.lock() defer store.unLock() id := createObjectCollectionID(orgID, objectType, objectID) if object, ok := store.objects[id]; ok { - object.data = nil + if isTempData { + object.tmpData = nil + } else { + object.data = nil + } store.objects[id] = object return nil } - return notFound + return nil } // CleanObjects removes the objects received from the other side. @@ -1044,7 +1116,7 @@ func (store *InMemoryStorage) readPersistedTimebase(path string) int64 { return 0 } - data, err := dataURI.GetData("file://" + path) + data, err := dataURI.GetData("file://"+path, false) if err != nil || data == nil { return 0 } diff --git a/core/storage/mongoStorage.go b/core/storage/mongoStorage.go index a7f12df..2079070 100644 --- a/core/storage/mongoStorage.go +++ b/core/storage/mongoStorage.go @@ -103,6 +103,15 @@ type aclObject struct { LastUpdate bson.MongoTimestamp `bson:"last-update"` } +type dataInfoObject struct { + ID string `bson:"_id"` + ChunkSize int32 `bson:"chunkSize"` + UploadDate bson.MongoTimestamp `bson:"uploadDate"` + Length int32 `bson:"length"` + MD5 string `bson:"md5"` + Filename string `bson:"filename"` +} + const maxUpdateTries = 5 var sleepInMS int @@ -980,8 +989,14 @@ func (store *MongoStorage) RetrieveObjectAndStatus(orgID string, objectType stri } // RetrieveObjectData returns the object data with the specified parameters -func (store *MongoStorage) RetrieveObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { - id := createObjectCollectionID(orgID, objectType, objectID) +func (store *MongoStorage) RetrieveObjectData(orgID string, objectType string, objectID string, isTempData bool) (io.Reader, common.SyncServiceError) { + var id string + if isTempData { + id = createTempObjectCollectionID(orgID, objectType, objectID) + } else { + id = createObjectCollectionID(orgID, objectType, objectID) + } + fileHandle, err := store.openFile(id) if err != nil { switch err { @@ -1116,7 +1131,7 @@ func (store *MongoStorage) RemoveObjectTempData(orgID string, objectType string, } -func (store *MongoStorage) RetrieveTempObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { +func (store *MongoStorage) RetrieveObjectTempData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) { id := createTempObjectCollectionID(orgID, objectType, objectID) fileHandle, err := store.openFile(id) if err != nil { @@ -1133,20 +1148,26 @@ func (store *MongoStorage) RetrieveTempObjectData(orgID string, objectType strin // AppendObjectData appends a chunk of data to the object's data func (store *MongoStorage) AppendObjectData(orgID string, objectType string, objectID string, dataReader io.Reader, - dataLength uint32, offset int64, total int64, isFirstChunk bool, isLastChunk bool) common.SyncServiceError { - id := createObjectCollectionID(orgID, objectType, objectID) + dataLength uint32, offset int64, total int64, isFirstChunk bool, isLastChunk bool, isTempData bool) (bool, common.SyncServiceError) { + var id string + if isTempData { + id = createTempObjectCollectionID(orgID, objectType, objectID) + } else { + id = createObjectCollectionID(orgID, objectType, objectID) + } + var fileHandle *fileHandle if isFirstChunk { store.removeFile(id) fh, err := store.createFile(id) if err != nil { - return err + return isLastChunk, err } fileHandle = fh } else { fh := store.getFileHandle(id) if fh == nil { - return &Error{fmt.Sprintf("Failed to append the data at offset %d, the file %s doesn't exist.", offset, id)} + return isLastChunk, &Error{fmt.Sprintf("Failed to append the data at offset %d, the file %s doesn't exist.", offset, id)} } fileHandle = fh } @@ -1161,23 +1182,23 @@ func (store *MongoStorage) AppendObjectData(orgID string, objectType string, obj data, err = ioutil.ReadAll(dataReader) n = len(data) } - if err != nil { - return &Error{fmt.Sprintf("Failed to read the data from the dataReader. Error: %s.", err)} + if err != nil && err != io.EOF { + return isLastChunk, &Error{fmt.Sprintf("Failed to read the data from the dataReader. Error: %s.", err)} } if uint32(n) != dataLength && dataLength > 0 { - return &Error{fmt.Sprintf("Failed to read all the data from the dataReader. Read %d instead of %d.", n, dataLength)} + return isLastChunk, &Error{fmt.Sprintf("Failed to read all the data from the dataReader. Read %d instead of %d.", n, dataLength)} } if offset == fileHandle.offset { for { if trace.IsLogging(logger.TRACE) { - trace.Trace(" Put data (%d) in file at offset %d\n", len(data), fileHandle.offset) + trace.Trace(" Put data (data size: %d) in file at offset %d\n", len(data), fileHandle.offset) } n, err = fileHandle.file.Write(data) if err != nil { - return &Error{fmt.Sprintf("Failed to write the data to the file. Error: %s.", err)} + return isLastChunk, &Error{fmt.Sprintf("Failed to write the data to the file. Error: %s.", err)} } if n != len(data) { - return &Error{fmt.Sprintf("Failed to write all the data to the file. Wrote %d instead of %d.", n, len(data))} + return isLastChunk, &Error{fmt.Sprintf("Failed to write all the data to the file. Wrote %d instead of %d.", n, len(data))} } fileHandle.offset += int64(n) if fileHandle.chunks == nil { @@ -1200,24 +1221,79 @@ func (store *MongoStorage) AppendObjectData(orgID string, objectType string, obj if trace.IsLogging(logger.INFO) { trace.Info(" Discard data chunk at offset %d since there are too many (%d) out-of-order chunks\n", offset, len(fileHandle.chunks)) } - return &Discarded{fmt.Sprintf(" Discard data chunk at offset %d since there are too many out-of-order chunks\n", offset)} + return isLastChunk, &Discarded{fmt.Sprintf(" Discard data chunk at offset %d since there are too many out-of-order chunks\n", offset)} } fileHandle.chunks[offset] = data if trace.IsLogging(logger.TRACE) { trace.Trace(" Put data (%d) in map at offset %d (# in map %d)\n", len(data), offset, len(fileHandle.chunks)) } } - if isLastChunk { + + fileSize := fileHandle.file.Size() + if trace.IsLogging(logger.TRACE) { + trace.Trace(" FileSize is: %d\n", fileSize) + } + + updatedLastChunk := isLastChunk + if fileSize == total { + + updatedLastChunk = true + if trace.IsLogging(logger.TRACE) { + trace.Trace(" FileSize is same as total, set updatedLastChunk to %t\n", updatedLastChunk) + } + } + + if updatedLastChunk { store.deleteFileHandle(id) err := fileHandle.file.Close() if err != nil { - return &Error{fmt.Sprintf("Failed to close the file. Error: %s.", err)} + return updatedLastChunk, &Error{fmt.Sprintf("Failed to close the file. Error: %s.", err)} } } else { store.putFileHandle(id, fileHandle) } - return nil + return updatedLastChunk, nil +} + +// Handles the last data chunk +func (store *MongoStorage) HandleObjectInfoForLastDataChunk(orgID string, objectType string, objectID string, isTempData bool, dataSize int64) (bool, common.SyncServiceError) { + if isTempData { + return false, nil + } + + id := createObjectCollectionID(orgID, objectType, objectID) + + result := object{} + if err := store.fetchOne(objects, bson.M{"_id": id}, bson.M{"status": bson.ElementString}, &result); err != nil { + switch err { + case mgo.ErrNotFound: + return false, nil + default: + return false, &Error{fmt.Sprintf("Failed to store the data. Error: %s.", err)} + } + } + + if result.Status == common.NotReadyToSend { + store.UpdateObjectStatus(orgID, objectType, objectID, common.ReadyToSend) + } + if result.Status == common.NotReadyToSend || result.Status == common.ReadyToSend { + newID := store.getInstanceID() + if err := store.update(objects, bson.M{"_id": id}, + bson.M{ + "$set": bson.M{"metadata.data-id": newID, "metadata.instance-id": newID}, + "$currentDate": bson.M{"last-update": bson.M{"$type": "timestamp"}}, + }); err != nil { + return false, &Error{fmt.Sprintf("Failed to set instance id. Error: %s.", err)} + } + } + + // Update object size + if err := store.update(objects, bson.M{"_id": id}, bson.M{"$set": bson.M{"metadata.object-size": dataSize}}); err != nil { + return false, &Error{fmt.Sprintf("Failed to update object's size. Error: %s.", err)} + } + + return true, nil } // UpdateObjectStatus updates object's status @@ -1233,6 +1309,19 @@ func (store *MongoStorage) UpdateObjectStatus(orgID string, objectType string, o return nil } +// UpdateObjectDataVerifiedStatus updates object's dataVerified field +func (store *MongoStorage) UpdateObjectDataVerifiedStatus(orgID string, objectType string, objectID string, verified bool) common.SyncServiceError { + id := createObjectCollectionID(orgID, objectType, objectID) + if err := store.update(objects, bson.M{"_id": id}, + bson.M{ + "$set": bson.M{"metadata.data-verified": verified}, + "$currentDate": bson.M{"last-update": bson.M{"$type": "timestamp"}}, + }); err != nil { + return &Error{fmt.Sprintf("Failed to update object's data-verified status. Error: %s.", err)} + } + return nil +} + // UpdateObjectSourceDataURI updates object's source data URI func (store *MongoStorage) UpdateObjectSourceDataURI(orgID string, objectType string, objectID string, sourceDataURI string) common.SyncServiceError { return nil @@ -1282,8 +1371,14 @@ func (store *MongoStorage) DeleteStoredObject(orgID string, objectType string, o } // DeleteStoredData deletes the object's data -func (store *MongoStorage) DeleteStoredData(orgID string, objectType string, objectID string) common.SyncServiceError { - id := createObjectCollectionID(orgID, objectType, objectID) +func (store *MongoStorage) DeleteStoredData(orgID string, objectType string, objectID string, isTempData bool) common.SyncServiceError { + var id string + if isTempData { + id = createTempObjectCollectionID(orgID, objectType, objectID) + } else { + id = createObjectCollectionID(orgID, objectType, objectID) + } + if trace.IsLogging(logger.TRACE) { trace.Trace("Deleting object's data %s\n", id) } @@ -1749,10 +1844,10 @@ func (store *MongoStorage) RetrieveNotifications(orgID string, destType string, if retrieveReceived { query = bson.M{"$or": []bson.M{ bson.M{"notification.status": common.Update}, + bson.M{"notification.status": common.Updated}, bson.M{"notification.status": common.Received}, bson.M{"notification.status": common.Consumed}, bson.M{"notification.status": common.Getdata}, - bson.M{"notification.status": common.Data}, bson.M{"notification.status": common.ReceivedByDestination}, bson.M{"notification.status": common.Delete}, bson.M{"notification.status": common.Deleted}}, @@ -1791,6 +1886,7 @@ func (store *MongoStorage) RetrievePendingNotifications(orgID string, destType s if destType == "" && destID == "" { query = bson.M{"$or": []bson.M{ bson.M{"notification.status": common.UpdatePending}, + bson.M{"notification.status": common.ReceivedPending}, bson.M{"notification.status": common.ConsumedPending}, bson.M{"notification.status": common.DeletePending}, bson.M{"notification.status": common.DeletedPending}}, @@ -1798,6 +1894,7 @@ func (store *MongoStorage) RetrievePendingNotifications(orgID string, destType s } else { query = bson.M{"$or": []bson.M{ bson.M{"notification.status": common.UpdatePending}, + bson.M{"notification.status": common.ReceivedPending}, bson.M{"notification.status": common.ConsumedPending}, bson.M{"notification.status": common.DeletePending}, bson.M{"notification.status": common.DeletedPending}}, diff --git a/core/storage/storage.go b/core/storage/storage.go index 58ee4c8..c35def4 100644 --- a/core/storage/storage.go +++ b/core/storage/storage.go @@ -19,6 +19,7 @@ const ( webhooks = "syncWebhooks" organizations = "syncOrganizations" acls = "syncACLs" + dataInfos = "fs.files" ) // Storage is the interface for stores @@ -48,14 +49,20 @@ type Storage interface { RemoveObjectTempData(orgID string, objectType string, objectID string) common.SyncServiceError - RetrieveTempObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) + RetrieveObjectTempData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) // Append a chunk of data to the object's data - AppendObjectData(orgID string, objectType string, objectID string, dataReader io.Reader, dataLength uint32, offset int64, total int64, isFirstChunk bool, isLastChunk bool) common.SyncServiceError + AppendObjectData(orgID string, objectType string, objectID string, dataReader io.Reader, dataLength uint32, offset int64, total int64, isFirstChunk bool, isLastChunk bool, isTempData bool) (bool, common.SyncServiceError) + + // Handles the last data chunk + HandleObjectInfoForLastDataChunk(orgID string, objectType string, objectID string, isTempData bool, dataSize int64) (bool, common.SyncServiceError) // Update object's status UpdateObjectStatus(orgID string, objectType string, objectID string, status string) common.SyncServiceError + // UpdateObjectDataVerifiedStatus updates object's dataVerified field + UpdateObjectDataVerifiedStatus(orgID string, objectType string, objectID string, verified bool) common.SyncServiceError + // Update object's source data URI UpdateObjectSourceDataURI(orgID string, objectType string, objectID string, sourceDataURI string) common.SyncServiceError @@ -107,7 +114,7 @@ type Storage interface { RetrieveObjectAndStatus(orgID string, objectType string, objectID string) (*common.MetaData, string, common.SyncServiceError) // Return the object data with the specified parameters - RetrieveObjectData(orgID string, objectType string, objectID string) (io.Reader, common.SyncServiceError) + RetrieveObjectData(orgID string, objectType string, objectID string, isTempData bool) (io.Reader, common.SyncServiceError) // Return the object data with the specified parameters ReadObjectData(orgID string, objectType string, objectID string, size int, offset int64) ([]byte, bool, int, common.SyncServiceError) @@ -131,7 +138,7 @@ type Storage interface { DeleteStoredObject(orgID string, objectType string, objectID string) common.SyncServiceError // Delete the object's data - DeleteStoredData(orgID string, objectType string, objectID string) common.SyncServiceError + DeleteStoredData(orgID string, objectType string, objectID string, isTempData bool) common.SyncServiceError // CleanObjects removes the objects received from the other side. // For persistant storage only partially recieved objects are removed. @@ -422,7 +429,7 @@ func createDestinationCollectionID(orgID string, destType string, destID string) func resendNotification(notification common.Notification, retrieveReceived bool) bool { s := notification.Status return (s == common.Update || s == common.Consumed || s == common.Getdata || s == common.Delete || s == common.Deleted || s == common.Received || s == common.Error || - (retrieveReceived && (s == common.Data || s == common.ReceivedByDestination))) + (retrieveReceived && (s == common.Data || s == common.Updated || s == common.ReceivedByDestination))) } func ensureArrayCapacity(data []byte, newCapacity int64) []byte { @@ -446,20 +453,6 @@ func createDataPath(prefix string, orgID string, objectType string, objectID str return strBuilder.String() } -func createDataPathForTempData(prefix string, orgID string, objectType string, objectID string) string { - var strBuilder strings.Builder - strBuilder.Grow(len(prefix) + len(orgID) + len(objectType) + len(objectID) + len("tmp") + 4) - strBuilder.WriteString(prefix) - strBuilder.WriteString(orgID) - strBuilder.WriteByte('-') - strBuilder.WriteString(objectType) - strBuilder.WriteByte('-') - strBuilder.WriteString(objectID) - strBuilder.WriteByte('-') - strBuilder.WriteString("tmp") - return strBuilder.String() -} - func createDataPathFromMeta(prefix string, metaData common.MetaData) string { return createDataPath(prefix, metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) } @@ -648,7 +641,11 @@ func DeleteStoredObject(store Storage, metaData common.MetaData) common.SyncServ } if common.Configuration.NodeType == common.ESS && metaData.DestinationDataURI != "" { - if err := dataURI.DeleteStoredData(metaData.DestinationDataURI); err != nil { + if err := dataURI.DeleteStoredData(metaData.DestinationDataURI, false); err != nil { + return err + } + + if err := dataURI.DeleteStoredData(metaData.DestinationDataURI, true); err != nil { return err } } @@ -659,11 +656,21 @@ func DeleteStoredObject(store Storage, metaData common.MetaData) common.SyncServ // DeleteStoredData calls the storage to delete the object's data func DeleteStoredData(store Storage, metaData common.MetaData) common.SyncServiceError { if common.Configuration.NodeType == common.ESS && metaData.DestinationDataURI != "" { - if err := dataURI.DeleteStoredData(metaData.DestinationDataURI); err != nil { + if err := dataURI.DeleteStoredData(metaData.DestinationDataURI, true); err != nil { + return err + } + if err := dataURI.DeleteStoredData(metaData.DestinationDataURI, false); err != nil { return err } return nil } - return store.DeleteStoredData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID) + if err := store.DeleteStoredData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, true); err != nil { + return err + } + + if err := store.DeleteStoredData(metaData.DestOrgID, metaData.ObjectType, metaData.ObjectID, false); err != nil { + return err + } + return nil } diff --git a/core/storage/storage_test.go b/core/storage/storage_test.go index 170dad7..16061bf 100644 --- a/core/storage/storage_test.go +++ b/core/storage/storage_test.go @@ -847,7 +847,7 @@ func testStorageObjectData(storageType string, t *testing.T) { // Check stored data dataReader, err := store.RetrieveObjectData(test.metaData.DestOrgID, - test.metaData.ObjectType, test.metaData.ObjectID) + test.metaData.ObjectType, test.metaData.ObjectID, false) if err != nil { t.Errorf("Failed to retrieve object's data' (objectID = %s). Error: %s\n", test.metaData.ObjectID, err.Error()) } else if dataReader == nil { @@ -968,11 +968,11 @@ func testStorageObjectData(storageType string, t *testing.T) { // Append data if test.data != nil { - if err := store.AppendObjectData(test.metaData.DestOrgID, test.metaData.ObjectType, test.metaData.ObjectID, - bytes.NewReader(test.data), uint32(len(test.data)), 0, test.metaData.ObjectSize, true, false); err != nil { + if _, err := store.AppendObjectData(test.metaData.DestOrgID, test.metaData.ObjectType, test.metaData.ObjectID, + bytes.NewReader(test.data), uint32(len(test.data)), 0, test.metaData.ObjectSize, true, false, false); err != nil { t.Errorf("AppendObjectData failed (objectID = %s). Error: %s\n", test.metaData.ObjectID, err.Error()) - } else if err := store.AppendObjectData(test.metaData.DestOrgID, test.metaData.ObjectType, test.metaData.ObjectID, - bytes.NewReader(test.newData), uint32(len(test.newData)), int64(len(test.data)), test.metaData.ObjectSize, false, true); err != nil { + } else if _, err := store.AppendObjectData(test.metaData.DestOrgID, test.metaData.ObjectType, test.metaData.ObjectID, + bytes.NewReader(test.newData), uint32(len(test.newData)), int64(len(test.data)), test.metaData.ObjectSize, false, true, false); err != nil { t.Errorf("AppendObjectData failed (objectID = %s). Error: %s\n", test.metaData.ObjectID, err.Error()) } else { expectedData := append(test.data, test.newData...)