diff --git a/api/uexecutor/v1/tx.pulsar.go b/api/uexecutor/v1/tx.pulsar.go index 0c0ce2f7..0bd51284 100644 --- a/api/uexecutor/v1/tx.pulsar.go +++ b/api/uexecutor/v1/tx.pulsar.go @@ -5626,6 +5626,925 @@ func (x *fastReflection_MsgVoteInboundResponse) ProtoMethods() *protoiface.Metho } } +var ( + md_MsgVoteOutbound protoreflect.MessageDescriptor + fd_MsgVoteOutbound_signer protoreflect.FieldDescriptor + fd_MsgVoteOutbound_tx_id protoreflect.FieldDescriptor + fd_MsgVoteOutbound_observed_tx protoreflect.FieldDescriptor +) + +func init() { + file_uexecutor_v1_tx_proto_init() + md_MsgVoteOutbound = File_uexecutor_v1_tx_proto.Messages().ByName("MsgVoteOutbound") + fd_MsgVoteOutbound_signer = md_MsgVoteOutbound.Fields().ByName("signer") + fd_MsgVoteOutbound_tx_id = md_MsgVoteOutbound.Fields().ByName("tx_id") + fd_MsgVoteOutbound_observed_tx = md_MsgVoteOutbound.Fields().ByName("observed_tx") +} + +var _ protoreflect.Message = (*fastReflection_MsgVoteOutbound)(nil) + +type fastReflection_MsgVoteOutbound MsgVoteOutbound + +func (x *MsgVoteOutbound) ProtoReflect() protoreflect.Message { + return (*fastReflection_MsgVoteOutbound)(x) +} + +func (x *MsgVoteOutbound) slowProtoReflect() protoreflect.Message { + mi := &file_uexecutor_v1_tx_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_MsgVoteOutbound_messageType fastReflection_MsgVoteOutbound_messageType +var _ protoreflect.MessageType = fastReflection_MsgVoteOutbound_messageType{} + +type fastReflection_MsgVoteOutbound_messageType struct{} + +func (x fastReflection_MsgVoteOutbound_messageType) Zero() protoreflect.Message { + return (*fastReflection_MsgVoteOutbound)(nil) +} +func (x fastReflection_MsgVoteOutbound_messageType) New() protoreflect.Message { + return new(fastReflection_MsgVoteOutbound) +} +func (x fastReflection_MsgVoteOutbound_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_MsgVoteOutbound +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_MsgVoteOutbound) Descriptor() protoreflect.MessageDescriptor { + return md_MsgVoteOutbound +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_MsgVoteOutbound) Type() protoreflect.MessageType { + return _fastReflection_MsgVoteOutbound_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_MsgVoteOutbound) New() protoreflect.Message { + return new(fastReflection_MsgVoteOutbound) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_MsgVoteOutbound) Interface() protoreflect.ProtoMessage { + return (*MsgVoteOutbound)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_MsgVoteOutbound) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.Signer != "" { + value := protoreflect.ValueOfString(x.Signer) + if !f(fd_MsgVoteOutbound_signer, value) { + return + } + } + if x.TxId != "" { + value := protoreflect.ValueOfString(x.TxId) + if !f(fd_MsgVoteOutbound_tx_id, value) { + return + } + } + if x.ObservedTx != nil { + value := protoreflect.ValueOfMessage(x.ObservedTx.ProtoReflect()) + if !f(fd_MsgVoteOutbound_observed_tx, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_MsgVoteOutbound) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "uexecutor.v1.MsgVoteOutbound.signer": + return x.Signer != "" + case "uexecutor.v1.MsgVoteOutbound.tx_id": + return x.TxId != "" + case "uexecutor.v1.MsgVoteOutbound.observed_tx": + return x.ObservedTx != nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutbound")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutbound does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgVoteOutbound) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "uexecutor.v1.MsgVoteOutbound.signer": + x.Signer = "" + case "uexecutor.v1.MsgVoteOutbound.tx_id": + x.TxId = "" + case "uexecutor.v1.MsgVoteOutbound.observed_tx": + x.ObservedTx = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutbound")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutbound does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_MsgVoteOutbound) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "uexecutor.v1.MsgVoteOutbound.signer": + value := x.Signer + return protoreflect.ValueOfString(value) + case "uexecutor.v1.MsgVoteOutbound.tx_id": + value := x.TxId + return protoreflect.ValueOfString(value) + case "uexecutor.v1.MsgVoteOutbound.observed_tx": + value := x.ObservedTx + return protoreflect.ValueOfMessage(value.ProtoReflect()) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutbound")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutbound does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgVoteOutbound) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "uexecutor.v1.MsgVoteOutbound.signer": + x.Signer = value.Interface().(string) + case "uexecutor.v1.MsgVoteOutbound.tx_id": + x.TxId = value.Interface().(string) + case "uexecutor.v1.MsgVoteOutbound.observed_tx": + x.ObservedTx = value.Message().Interface().(*OutboundObservation) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutbound")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutbound does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgVoteOutbound) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "uexecutor.v1.MsgVoteOutbound.observed_tx": + if x.ObservedTx == nil { + x.ObservedTx = new(OutboundObservation) + } + return protoreflect.ValueOfMessage(x.ObservedTx.ProtoReflect()) + case "uexecutor.v1.MsgVoteOutbound.signer": + panic(fmt.Errorf("field signer of message uexecutor.v1.MsgVoteOutbound is not mutable")) + case "uexecutor.v1.MsgVoteOutbound.tx_id": + panic(fmt.Errorf("field tx_id of message uexecutor.v1.MsgVoteOutbound is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutbound")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutbound does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_MsgVoteOutbound) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "uexecutor.v1.MsgVoteOutbound.signer": + return protoreflect.ValueOfString("") + case "uexecutor.v1.MsgVoteOutbound.tx_id": + return protoreflect.ValueOfString("") + case "uexecutor.v1.MsgVoteOutbound.observed_tx": + m := new(OutboundObservation) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutbound")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutbound does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_MsgVoteOutbound) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in uexecutor.v1.MsgVoteOutbound", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_MsgVoteOutbound) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgVoteOutbound) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_MsgVoteOutbound) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_MsgVoteOutbound) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*MsgVoteOutbound) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + l = len(x.Signer) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.TxId) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.ObservedTx != nil { + l = options.Size(x.ObservedTx) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*MsgVoteOutbound) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.ObservedTx != nil { + encoded, err := options.Marshal(x.ObservedTx) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x1a + } + if len(x.TxId) > 0 { + i -= len(x.TxId) + copy(dAtA[i:], x.TxId) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.TxId))) + i-- + dAtA[i] = 0x12 + } + if len(x.Signer) > 0 { + i -= len(x.Signer) + copy(dAtA[i:], x.Signer) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Signer))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*MsgVoteOutbound) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgVoteOutbound: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgVoteOutbound: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field TxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.TxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ObservedTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.ObservedTx == nil { + x.ObservedTx = &OutboundObservation{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.ObservedTx); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var ( + md_MsgVoteOutboundResponse protoreflect.MessageDescriptor +) + +func init() { + file_uexecutor_v1_tx_proto_init() + md_MsgVoteOutboundResponse = File_uexecutor_v1_tx_proto.Messages().ByName("MsgVoteOutboundResponse") +} + +var _ protoreflect.Message = (*fastReflection_MsgVoteOutboundResponse)(nil) + +type fastReflection_MsgVoteOutboundResponse MsgVoteOutboundResponse + +func (x *MsgVoteOutboundResponse) ProtoReflect() protoreflect.Message { + return (*fastReflection_MsgVoteOutboundResponse)(x) +} + +func (x *MsgVoteOutboundResponse) slowProtoReflect() protoreflect.Message { + mi := &file_uexecutor_v1_tx_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_MsgVoteOutboundResponse_messageType fastReflection_MsgVoteOutboundResponse_messageType +var _ protoreflect.MessageType = fastReflection_MsgVoteOutboundResponse_messageType{} + +type fastReflection_MsgVoteOutboundResponse_messageType struct{} + +func (x fastReflection_MsgVoteOutboundResponse_messageType) Zero() protoreflect.Message { + return (*fastReflection_MsgVoteOutboundResponse)(nil) +} +func (x fastReflection_MsgVoteOutboundResponse_messageType) New() protoreflect.Message { + return new(fastReflection_MsgVoteOutboundResponse) +} +func (x fastReflection_MsgVoteOutboundResponse_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_MsgVoteOutboundResponse +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_MsgVoteOutboundResponse) Descriptor() protoreflect.MessageDescriptor { + return md_MsgVoteOutboundResponse +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_MsgVoteOutboundResponse) Type() protoreflect.MessageType { + return _fastReflection_MsgVoteOutboundResponse_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_MsgVoteOutboundResponse) New() protoreflect.Message { + return new(fastReflection_MsgVoteOutboundResponse) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_MsgVoteOutboundResponse) Interface() protoreflect.ProtoMessage { + return (*MsgVoteOutboundResponse)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_MsgVoteOutboundResponse) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_MsgVoteOutboundResponse) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutboundResponse")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutboundResponse does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgVoteOutboundResponse) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutboundResponse")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutboundResponse does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_MsgVoteOutboundResponse) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutboundResponse")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutboundResponse does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgVoteOutboundResponse) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutboundResponse")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutboundResponse does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgVoteOutboundResponse) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutboundResponse")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutboundResponse does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_MsgVoteOutboundResponse) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.MsgVoteOutboundResponse")) + } + panic(fmt.Errorf("message uexecutor.v1.MsgVoteOutboundResponse does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_MsgVoteOutboundResponse) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in uexecutor.v1.MsgVoteOutboundResponse", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_MsgVoteOutboundResponse) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgVoteOutboundResponse) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_MsgVoteOutboundResponse) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_MsgVoteOutboundResponse) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*MsgVoteOutboundResponse) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*MsgVoteOutboundResponse) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*MsgVoteOutboundResponse) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgVoteOutboundResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgVoteOutboundResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + var ( md_MsgVoteGasPrice protoreflect.MessageDescriptor fd_MsgVoteGasPrice_signer protoreflect.FieldDescriptor @@ -5652,7 +6571,7 @@ func (x *MsgVoteGasPrice) ProtoReflect() protoreflect.Message { } func (x *MsgVoteGasPrice) slowProtoReflect() protoreflect.Message { - mi := &file_uexecutor_v1_tx_proto_msgTypes[12] + mi := &file_uexecutor_v1_tx_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6224,7 +7143,7 @@ func (x *MsgVoteGasPriceResponse) ProtoReflect() protoreflect.Message { } func (x *MsgVoteGasPriceResponse) slowProtoReflect() protoreflect.Message { - mi := &file_uexecutor_v1_tx_proto_msgTypes[13] + mi := &file_uexecutor_v1_tx_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7084,6 +8003,86 @@ func (*MsgVoteInboundResponse) Descriptor() ([]byte, []int) { return file_uexecutor_v1_tx_proto_rawDescGZIP(), []int{11} } +// MsgVoteOutbound allows a universal validator to vote on an outbound tx observation. +type MsgVoteOutbound struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // signer is the Cosmos address initiating the tx (used for tx signing) + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + TxId string `protobuf:"bytes,2,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // Tx Id (abi.encode(utxId,outboundId)) + ObservedTx *OutboundObservation `protobuf:"bytes,3,opt,name=observed_tx,json=observedTx,proto3" json:"observed_tx,omitempty"` // observed tx on destination chain +} + +func (x *MsgVoteOutbound) Reset() { + *x = MsgVoteOutbound{} + if protoimpl.UnsafeEnabled { + mi := &file_uexecutor_v1_tx_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MsgVoteOutbound) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MsgVoteOutbound) ProtoMessage() {} + +// Deprecated: Use MsgVoteOutbound.ProtoReflect.Descriptor instead. +func (*MsgVoteOutbound) Descriptor() ([]byte, []int) { + return file_uexecutor_v1_tx_proto_rawDescGZIP(), []int{12} +} + +func (x *MsgVoteOutbound) GetSigner() string { + if x != nil { + return x.Signer + } + return "" +} + +func (x *MsgVoteOutbound) GetTxId() string { + if x != nil { + return x.TxId + } + return "" +} + +func (x *MsgVoteOutbound) GetObservedTx() *OutboundObservation { + if x != nil { + return x.ObservedTx + } + return nil +} + +// MsgVoteInboundResponse defines the response for MsgExecutePayload. +type MsgVoteOutboundResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MsgVoteOutboundResponse) Reset() { + *x = MsgVoteOutboundResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_uexecutor_v1_tx_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MsgVoteOutboundResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MsgVoteOutboundResponse) ProtoMessage() {} + +// Deprecated: Use MsgVoteOutboundResponse.ProtoReflect.Descriptor instead. +func (*MsgVoteOutboundResponse) Descriptor() ([]byte, []int) { + return file_uexecutor_v1_tx_proto_rawDescGZIP(), []int{13} +} + // MsgVoteGasPrice is broadcasted by Universal Validators to submit their observed gas prices type MsgVoteGasPrice struct { state protoimpl.MessageState @@ -7099,7 +8098,7 @@ type MsgVoteGasPrice struct { func (x *MsgVoteGasPrice) Reset() { *x = MsgVoteGasPrice{} if protoimpl.UnsafeEnabled { - mi := &file_uexecutor_v1_tx_proto_msgTypes[12] + mi := &file_uexecutor_v1_tx_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7113,7 +8112,7 @@ func (*MsgVoteGasPrice) ProtoMessage() {} // Deprecated: Use MsgVoteGasPrice.ProtoReflect.Descriptor instead. func (*MsgVoteGasPrice) Descriptor() ([]byte, []int) { - return file_uexecutor_v1_tx_proto_rawDescGZIP(), []int{12} + return file_uexecutor_v1_tx_proto_rawDescGZIP(), []int{14} } func (x *MsgVoteGasPrice) GetSigner() string { @@ -7154,7 +8153,7 @@ type MsgVoteGasPriceResponse struct { func (x *MsgVoteGasPriceResponse) Reset() { *x = MsgVoteGasPriceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_uexecutor_v1_tx_proto_msgTypes[13] + mi := &file_uexecutor_v1_tx_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7168,7 +8167,7 @@ func (*MsgVoteGasPriceResponse) ProtoMessage() {} // Deprecated: Use MsgVoteGasPriceResponse.ProtoReflect.Descriptor instead. func (*MsgVoteGasPriceResponse) Descriptor() ([]byte, []int) { - return file_uexecutor_v1_tx_proto_rawDescGZIP(), []int{13} + return file_uexecutor_v1_tx_proto_rawDescGZIP(), []int{15} } var File_uexecutor_v1_tx_proto protoreflect.FileDescriptor @@ -7280,70 +8279,90 @@ var file_uexecutor_v1_tx_proto_rawDesc = []byte{ 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x8a, 0xe7, 0xb0, 0x2a, 0x11, 0x75, 0x65, 0x2f, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x18, 0x0a, 0x16, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd3, 0x01, 0x0a, 0x0f, 0x4d, 0x73, 0x67, 0x56, 0x6f, - 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x69, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x0f, 0x4d, 0x73, 0x67, 0x56, 0x6f, + 0x74, 0x65, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xd2, 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x11, - 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x69, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x70, 0x72, 0x69, 0x63, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x3a, 0x29, 0x82, 0xe7, 0xb0, 0x2a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x8a, 0xe7, - 0xb0, 0x2a, 0x19, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x4d, 0x73, 0x67, - 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x22, 0x19, 0x0a, 0x17, - 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xc8, 0x04, 0x0a, 0x03, 0x4d, 0x73, 0x67, 0x12, - 0x54, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, - 0x1d, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, - 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x25, - 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, - 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x09, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x55, - 0x45, 0x41, 0x12, 0x1a, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x55, 0x45, 0x41, 0x1a, 0x22, - 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, - 0x67, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x55, 0x45, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x4d, 0x69, 0x6e, 0x74, 0x50, 0x43, 0x12, 0x17, 0x2e, 0x75, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x4d, - 0x69, 0x6e, 0x74, 0x50, 0x43, 0x1a, 0x1f, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x4d, 0x69, 0x6e, 0x74, 0x50, 0x43, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x27, 0x2e, 0x75, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0a, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x55, 0x45, 0x41, - 0x12, 0x1b, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x4d, 0x73, 0x67, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x55, 0x45, 0x41, 0x1a, 0x23, 0x2e, + 0x72, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x13, 0x0a, 0x05, + 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x78, 0x49, + 0x64, 0x12, 0x42, 0x0a, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x78, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x54, 0x78, 0x3a, 0x29, 0x82, 0xe7, 0xb0, 0x2a, 0x06, 0x73, 0x69, 0x67, 0x6e, + 0x65, 0x72, 0x8a, 0xe7, 0xb0, 0x2a, 0x19, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, + 0x2f, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, + 0x22, 0x19, 0x0a, 0x17, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x4f, 0x75, 0x74, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd3, 0x01, 0x0a, 0x0f, + 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x12, + 0x30, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x18, 0xd2, 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, + 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x70, 0x72, + 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x3a, 0x29, 0x82, 0xe7, 0xb0, 0x2a, 0x06, 0x73, 0x69, 0x67, + 0x6e, 0x65, 0x72, 0x8a, 0xe7, 0xb0, 0x2a, 0x19, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, + 0x72, 0x2f, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, + 0x65, 0x22, 0x19, 0x0a, 0x17, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, + 0x72, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x9e, 0x05, 0x0a, + 0x03, 0x4d, 0x73, 0x67, 0x12, 0x54, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1d, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x1a, 0x25, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x09, 0x44, 0x65, + 0x70, 0x6c, 0x6f, 0x79, 0x55, 0x45, 0x41, 0x12, 0x1a, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, + 0x55, 0x45, 0x41, 0x1a, 0x22, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x55, 0x45, 0x41, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x4d, 0x69, 0x6e, 0x74, 0x50, + 0x43, 0x12, 0x17, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x4d, 0x73, 0x67, 0x4d, 0x69, 0x6e, 0x74, 0x50, 0x43, 0x1a, 0x1f, 0x2e, 0x75, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x4d, 0x69, 0x6e, + 0x74, 0x50, 0x43, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0e, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x55, 0x45, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0b, 0x56, 0x6f, 0x74, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, - 0x64, 0x12, 0x1c, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x1a, - 0x24, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, - 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0c, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, - 0x50, 0x72, 0x69, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, - 0x72, 0x69, 0x63, 0x65, 0x1a, 0x25, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x05, 0x80, 0xe7, 0xb0, - 0x2a, 0x01, 0x42, 0xaf, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x07, 0x54, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, - 0x75, 0x73, 0x68, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2f, 0x70, 0x75, 0x73, 0x68, 0x2d, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x75, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x6f, 0x72, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x55, 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x55, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0c, 0x55, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x18, 0x55, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x6f, 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, - 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x27, + 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, + 0x67, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0a, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x55, 0x45, 0x41, 0x12, 0x1b, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x55, + 0x45, 0x41, 0x1a, 0x23, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x55, 0x45, 0x41, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0b, 0x56, 0x6f, 0x74, 0x65, 0x49, + 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x1c, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x49, 0x6e, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x1a, 0x24, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0c, 0x56, 0x6f, + 0x74, 0x65, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x1d, 0x2e, 0x75, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, + 0x65, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x1a, 0x25, 0x2e, 0x75, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, + 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x54, 0x0a, 0x0c, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, + 0x12, 0x1d, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x4d, 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x1a, + 0x25, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, + 0x73, 0x67, 0x56, 0x6f, 0x74, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x05, 0x80, 0xe7, 0xb0, 0x2a, 0x01, 0x42, 0xaf, 0x01, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, + 0x76, 0x31, 0x42, 0x07, 0x54, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x73, 0x68, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x2f, 0x70, 0x75, 0x73, 0x68, 0x2d, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2d, 0x6e, + 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, + 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x76, 0x31, + 0xa2, 0x02, 0x03, 0x55, 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0c, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, + 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x18, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, + 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x0d, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -7358,7 +8377,7 @@ func file_uexecutor_v1_tx_proto_rawDescGZIP() []byte { return file_uexecutor_v1_tx_proto_rawDescData } -var file_uexecutor_v1_tx_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_uexecutor_v1_tx_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_uexecutor_v1_tx_proto_goTypes = []interface{}{ (*MsgUpdateParams)(nil), // 0: uexecutor.v1.MsgUpdateParams (*MsgUpdateParamsResponse)(nil), // 1: uexecutor.v1.MsgUpdateParamsResponse @@ -7372,42 +8391,48 @@ var file_uexecutor_v1_tx_proto_goTypes = []interface{}{ (*MsgMigrateUEAResponse)(nil), // 9: uexecutor.v1.MsgMigrateUEAResponse (*MsgVoteInbound)(nil), // 10: uexecutor.v1.MsgVoteInbound (*MsgVoteInboundResponse)(nil), // 11: uexecutor.v1.MsgVoteInboundResponse - (*MsgVoteGasPrice)(nil), // 12: uexecutor.v1.MsgVoteGasPrice - (*MsgVoteGasPriceResponse)(nil), // 13: uexecutor.v1.MsgVoteGasPriceResponse - (*Params)(nil), // 14: uexecutor.v1.Params - (*UniversalAccountId)(nil), // 15: uexecutor.v1.UniversalAccountId - (*UniversalPayload)(nil), // 16: uexecutor.v1.UniversalPayload - (*MigrationPayload)(nil), // 17: uexecutor.v1.MigrationPayload - (*Inbound)(nil), // 18: uexecutor.v1.Inbound + (*MsgVoteOutbound)(nil), // 12: uexecutor.v1.MsgVoteOutbound + (*MsgVoteOutboundResponse)(nil), // 13: uexecutor.v1.MsgVoteOutboundResponse + (*MsgVoteGasPrice)(nil), // 14: uexecutor.v1.MsgVoteGasPrice + (*MsgVoteGasPriceResponse)(nil), // 15: uexecutor.v1.MsgVoteGasPriceResponse + (*Params)(nil), // 16: uexecutor.v1.Params + (*UniversalAccountId)(nil), // 17: uexecutor.v1.UniversalAccountId + (*UniversalPayload)(nil), // 18: uexecutor.v1.UniversalPayload + (*MigrationPayload)(nil), // 19: uexecutor.v1.MigrationPayload + (*Inbound)(nil), // 20: uexecutor.v1.Inbound + (*OutboundObservation)(nil), // 21: uexecutor.v1.OutboundObservation } var file_uexecutor_v1_tx_proto_depIdxs = []int32{ - 14, // 0: uexecutor.v1.MsgUpdateParams.params:type_name -> uexecutor.v1.Params - 15, // 1: uexecutor.v1.MsgDeployUEA.universal_account_id:type_name -> uexecutor.v1.UniversalAccountId - 15, // 2: uexecutor.v1.MsgMintPC.universal_account_id:type_name -> uexecutor.v1.UniversalAccountId - 15, // 3: uexecutor.v1.MsgExecutePayload.universal_account_id:type_name -> uexecutor.v1.UniversalAccountId - 16, // 4: uexecutor.v1.MsgExecutePayload.universal_payload:type_name -> uexecutor.v1.UniversalPayload - 15, // 5: uexecutor.v1.MsgMigrateUEA.universal_account_id:type_name -> uexecutor.v1.UniversalAccountId - 17, // 6: uexecutor.v1.MsgMigrateUEA.migration_payload:type_name -> uexecutor.v1.MigrationPayload - 18, // 7: uexecutor.v1.MsgVoteInbound.inbound:type_name -> uexecutor.v1.Inbound - 0, // 8: uexecutor.v1.Msg.UpdateParams:input_type -> uexecutor.v1.MsgUpdateParams - 2, // 9: uexecutor.v1.Msg.DeployUEA:input_type -> uexecutor.v1.MsgDeployUEA - 4, // 10: uexecutor.v1.Msg.MintPC:input_type -> uexecutor.v1.MsgMintPC - 6, // 11: uexecutor.v1.Msg.ExecutePayload:input_type -> uexecutor.v1.MsgExecutePayload - 8, // 12: uexecutor.v1.Msg.MigrateUEA:input_type -> uexecutor.v1.MsgMigrateUEA - 10, // 13: uexecutor.v1.Msg.VoteInbound:input_type -> uexecutor.v1.MsgVoteInbound - 12, // 14: uexecutor.v1.Msg.VoteGasPrice:input_type -> uexecutor.v1.MsgVoteGasPrice - 1, // 15: uexecutor.v1.Msg.UpdateParams:output_type -> uexecutor.v1.MsgUpdateParamsResponse - 3, // 16: uexecutor.v1.Msg.DeployUEA:output_type -> uexecutor.v1.MsgDeployUEAResponse - 5, // 17: uexecutor.v1.Msg.MintPC:output_type -> uexecutor.v1.MsgMintPCResponse - 7, // 18: uexecutor.v1.Msg.ExecutePayload:output_type -> uexecutor.v1.MsgExecutePayloadResponse - 9, // 19: uexecutor.v1.Msg.MigrateUEA:output_type -> uexecutor.v1.MsgMigrateUEAResponse - 11, // 20: uexecutor.v1.Msg.VoteInbound:output_type -> uexecutor.v1.MsgVoteInboundResponse - 13, // 21: uexecutor.v1.Msg.VoteGasPrice:output_type -> uexecutor.v1.MsgVoteGasPriceResponse - 15, // [15:22] is the sub-list for method output_type - 8, // [8:15] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name + 16, // 0: uexecutor.v1.MsgUpdateParams.params:type_name -> uexecutor.v1.Params + 17, // 1: uexecutor.v1.MsgDeployUEA.universal_account_id:type_name -> uexecutor.v1.UniversalAccountId + 17, // 2: uexecutor.v1.MsgMintPC.universal_account_id:type_name -> uexecutor.v1.UniversalAccountId + 17, // 3: uexecutor.v1.MsgExecutePayload.universal_account_id:type_name -> uexecutor.v1.UniversalAccountId + 18, // 4: uexecutor.v1.MsgExecutePayload.universal_payload:type_name -> uexecutor.v1.UniversalPayload + 17, // 5: uexecutor.v1.MsgMigrateUEA.universal_account_id:type_name -> uexecutor.v1.UniversalAccountId + 19, // 6: uexecutor.v1.MsgMigrateUEA.migration_payload:type_name -> uexecutor.v1.MigrationPayload + 20, // 7: uexecutor.v1.MsgVoteInbound.inbound:type_name -> uexecutor.v1.Inbound + 21, // 8: uexecutor.v1.MsgVoteOutbound.observed_tx:type_name -> uexecutor.v1.OutboundObservation + 0, // 9: uexecutor.v1.Msg.UpdateParams:input_type -> uexecutor.v1.MsgUpdateParams + 2, // 10: uexecutor.v1.Msg.DeployUEA:input_type -> uexecutor.v1.MsgDeployUEA + 4, // 11: uexecutor.v1.Msg.MintPC:input_type -> uexecutor.v1.MsgMintPC + 6, // 12: uexecutor.v1.Msg.ExecutePayload:input_type -> uexecutor.v1.MsgExecutePayload + 8, // 13: uexecutor.v1.Msg.MigrateUEA:input_type -> uexecutor.v1.MsgMigrateUEA + 10, // 14: uexecutor.v1.Msg.VoteInbound:input_type -> uexecutor.v1.MsgVoteInbound + 12, // 15: uexecutor.v1.Msg.VoteOutbound:input_type -> uexecutor.v1.MsgVoteOutbound + 14, // 16: uexecutor.v1.Msg.VoteGasPrice:input_type -> uexecutor.v1.MsgVoteGasPrice + 1, // 17: uexecutor.v1.Msg.UpdateParams:output_type -> uexecutor.v1.MsgUpdateParamsResponse + 3, // 18: uexecutor.v1.Msg.DeployUEA:output_type -> uexecutor.v1.MsgDeployUEAResponse + 5, // 19: uexecutor.v1.Msg.MintPC:output_type -> uexecutor.v1.MsgMintPCResponse + 7, // 20: uexecutor.v1.Msg.ExecutePayload:output_type -> uexecutor.v1.MsgExecutePayloadResponse + 9, // 21: uexecutor.v1.Msg.MigrateUEA:output_type -> uexecutor.v1.MsgMigrateUEAResponse + 11, // 22: uexecutor.v1.Msg.VoteInbound:output_type -> uexecutor.v1.MsgVoteInboundResponse + 13, // 23: uexecutor.v1.Msg.VoteOutbound:output_type -> uexecutor.v1.MsgVoteOutboundResponse + 15, // 24: uexecutor.v1.Msg.VoteGasPrice:output_type -> uexecutor.v1.MsgVoteGasPriceResponse + 17, // [17:25] is the sub-list for method output_type + 9, // [9:17] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name } func init() { file_uexecutor_v1_tx_proto_init() } @@ -7562,7 +8587,7 @@ func file_uexecutor_v1_tx_proto_init() { } } file_uexecutor_v1_tx_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MsgVoteGasPrice); i { + switch v := v.(*MsgVoteOutbound); i { case 0: return &v.state case 1: @@ -7574,6 +8599,30 @@ func file_uexecutor_v1_tx_proto_init() { } } file_uexecutor_v1_tx_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MsgVoteOutboundResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_uexecutor_v1_tx_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MsgVoteGasPrice); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_uexecutor_v1_tx_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MsgVoteGasPriceResponse); i { case 0: return &v.state @@ -7592,7 +8641,7 @@ func file_uexecutor_v1_tx_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_uexecutor_v1_tx_proto_rawDesc, NumEnums: 0, - NumMessages: 14, + NumMessages: 16, NumExtensions: 0, NumServices: 1, }, diff --git a/api/uexecutor/v1/tx_grpc.pb.go b/api/uexecutor/v1/tx_grpc.pb.go index 9a79b5a2..76112997 100644 --- a/api/uexecutor/v1/tx_grpc.pb.go +++ b/api/uexecutor/v1/tx_grpc.pb.go @@ -25,6 +25,7 @@ const ( Msg_ExecutePayload_FullMethodName = "/uexecutor.v1.Msg/ExecutePayload" Msg_MigrateUEA_FullMethodName = "/uexecutor.v1.Msg/MigrateUEA" Msg_VoteInbound_FullMethodName = "/uexecutor.v1.Msg/VoteInbound" + Msg_VoteOutbound_FullMethodName = "/uexecutor.v1.Msg/VoteOutbound" Msg_VoteGasPrice_FullMethodName = "/uexecutor.v1.Msg/VoteGasPrice" ) @@ -46,6 +47,8 @@ type MsgClient interface { MigrateUEA(ctx context.Context, in *MsgMigrateUEA, opts ...grpc.CallOption) (*MsgMigrateUEAResponse, error) // VoteInbound defines a message for voting on synthetic assets bridging from external chain to PC VoteInbound(ctx context.Context, in *MsgVoteInbound, opts ...grpc.CallOption) (*MsgVoteInboundResponse, error) + // VoteOutbound defines a message for voting on a observed outbound tx on external chain + VoteOutbound(ctx context.Context, in *MsgVoteOutbound, opts ...grpc.CallOption) (*MsgVoteOutboundResponse, error) // VoteGasPrice defines a message for universal validators to vote on the gas price VoteGasPrice(ctx context.Context, in *MsgVoteGasPrice, opts ...grpc.CallOption) (*MsgVoteGasPriceResponse, error) } @@ -112,6 +115,15 @@ func (c *msgClient) VoteInbound(ctx context.Context, in *MsgVoteInbound, opts .. return out, nil } +func (c *msgClient) VoteOutbound(ctx context.Context, in *MsgVoteOutbound, opts ...grpc.CallOption) (*MsgVoteOutboundResponse, error) { + out := new(MsgVoteOutboundResponse) + err := c.cc.Invoke(ctx, Msg_VoteOutbound_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *msgClient) VoteGasPrice(ctx context.Context, in *MsgVoteGasPrice, opts ...grpc.CallOption) (*MsgVoteGasPriceResponse, error) { out := new(MsgVoteGasPriceResponse) err := c.cc.Invoke(ctx, Msg_VoteGasPrice_FullMethodName, in, out, opts...) @@ -139,6 +151,8 @@ type MsgServer interface { MigrateUEA(context.Context, *MsgMigrateUEA) (*MsgMigrateUEAResponse, error) // VoteInbound defines a message for voting on synthetic assets bridging from external chain to PC VoteInbound(context.Context, *MsgVoteInbound) (*MsgVoteInboundResponse, error) + // VoteOutbound defines a message for voting on a observed outbound tx on external chain + VoteOutbound(context.Context, *MsgVoteOutbound) (*MsgVoteOutboundResponse, error) // VoteGasPrice defines a message for universal validators to vote on the gas price VoteGasPrice(context.Context, *MsgVoteGasPrice) (*MsgVoteGasPriceResponse, error) mustEmbedUnimplementedMsgServer() @@ -166,6 +180,9 @@ func (UnimplementedMsgServer) MigrateUEA(context.Context, *MsgMigrateUEA) (*MsgM func (UnimplementedMsgServer) VoteInbound(context.Context, *MsgVoteInbound) (*MsgVoteInboundResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VoteInbound not implemented") } +func (UnimplementedMsgServer) VoteOutbound(context.Context, *MsgVoteOutbound) (*MsgVoteOutboundResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VoteOutbound not implemented") +} func (UnimplementedMsgServer) VoteGasPrice(context.Context, *MsgVoteGasPrice) (*MsgVoteGasPriceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VoteGasPrice not implemented") } @@ -290,6 +307,24 @@ func _Msg_VoteInbound_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } +func _Msg_VoteOutbound_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgVoteOutbound) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).VoteOutbound(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Msg_VoteOutbound_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).VoteOutbound(ctx, req.(*MsgVoteOutbound)) + } + return interceptor(ctx, in, info, handler) +} + func _Msg_VoteGasPrice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgVoteGasPrice) if err := dec(in); err != nil { @@ -339,6 +374,10 @@ var Msg_ServiceDesc = grpc.ServiceDesc{ MethodName: "VoteInbound", Handler: _Msg_VoteInbound_Handler, }, + { + MethodName: "VoteOutbound", + Handler: _Msg_VoteOutbound_Handler, + }, { MethodName: "VoteGasPrice", Handler: _Msg_VoteGasPrice_Handler, diff --git a/api/uexecutor/v1/types.pulsar.go b/api/uexecutor/v1/types.pulsar.go index aa738944..1d00bcc3 100644 --- a/api/uexecutor/v1/types.pulsar.go +++ b/api/uexecutor/v1/types.pulsar.go @@ -2437,25 +2437,25 @@ func (x *fastReflection_UniversalAccountId) ProtoMethods() *protoiface.Methods { } var ( - md_InboundStatus protoreflect.MessageDescriptor - fd_InboundStatus_status protoreflect.FieldDescriptor + md_RevertInstructions protoreflect.MessageDescriptor + fd_RevertInstructions_fund_recipient protoreflect.FieldDescriptor ) func init() { file_uexecutor_v1_types_proto_init() - md_InboundStatus = File_uexecutor_v1_types_proto.Messages().ByName("InboundStatus") - fd_InboundStatus_status = md_InboundStatus.Fields().ByName("status") + md_RevertInstructions = File_uexecutor_v1_types_proto.Messages().ByName("RevertInstructions") + fd_RevertInstructions_fund_recipient = md_RevertInstructions.Fields().ByName("fund_recipient") } -var _ protoreflect.Message = (*fastReflection_InboundStatus)(nil) +var _ protoreflect.Message = (*fastReflection_RevertInstructions)(nil) -type fastReflection_InboundStatus InboundStatus +type fastReflection_RevertInstructions RevertInstructions -func (x *InboundStatus) ProtoReflect() protoreflect.Message { - return (*fastReflection_InboundStatus)(x) +func (x *RevertInstructions) ProtoReflect() protoreflect.Message { + return (*fastReflection_RevertInstructions)(x) } -func (x *InboundStatus) slowProtoReflect() protoreflect.Message { +func (x *RevertInstructions) slowProtoReflect() protoreflect.Message { mi := &file_uexecutor_v1_types_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2467,43 +2467,43 @@ func (x *InboundStatus) slowProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -var _fastReflection_InboundStatus_messageType fastReflection_InboundStatus_messageType -var _ protoreflect.MessageType = fastReflection_InboundStatus_messageType{} +var _fastReflection_RevertInstructions_messageType fastReflection_RevertInstructions_messageType +var _ protoreflect.MessageType = fastReflection_RevertInstructions_messageType{} -type fastReflection_InboundStatus_messageType struct{} +type fastReflection_RevertInstructions_messageType struct{} -func (x fastReflection_InboundStatus_messageType) Zero() protoreflect.Message { - return (*fastReflection_InboundStatus)(nil) +func (x fastReflection_RevertInstructions_messageType) Zero() protoreflect.Message { + return (*fastReflection_RevertInstructions)(nil) } -func (x fastReflection_InboundStatus_messageType) New() protoreflect.Message { - return new(fastReflection_InboundStatus) +func (x fastReflection_RevertInstructions_messageType) New() protoreflect.Message { + return new(fastReflection_RevertInstructions) } -func (x fastReflection_InboundStatus_messageType) Descriptor() protoreflect.MessageDescriptor { - return md_InboundStatus +func (x fastReflection_RevertInstructions_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_RevertInstructions } // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. -func (x *fastReflection_InboundStatus) Descriptor() protoreflect.MessageDescriptor { - return md_InboundStatus +func (x *fastReflection_RevertInstructions) Descriptor() protoreflect.MessageDescriptor { + return md_RevertInstructions } // Type returns the message type, which encapsulates both Go and protobuf // type information. If the Go type information is not needed, // it is recommended that the message descriptor be used instead. -func (x *fastReflection_InboundStatus) Type() protoreflect.MessageType { - return _fastReflection_InboundStatus_messageType +func (x *fastReflection_RevertInstructions) Type() protoreflect.MessageType { + return _fastReflection_RevertInstructions_messageType } // New returns a newly allocated and mutable empty message. -func (x *fastReflection_InboundStatus) New() protoreflect.Message { - return new(fastReflection_InboundStatus) +func (x *fastReflection_RevertInstructions) New() protoreflect.Message { + return new(fastReflection_RevertInstructions) } // Interface unwraps the message reflection interface and // returns the underlying ProtoMessage interface. -func (x *fastReflection_InboundStatus) Interface() protoreflect.ProtoMessage { - return (*InboundStatus)(x) +func (x *fastReflection_RevertInstructions) Interface() protoreflect.ProtoMessage { + return (*RevertInstructions)(x) } // Range iterates over every populated field in an undefined order, @@ -2511,10 +2511,10 @@ func (x *fastReflection_InboundStatus) Interface() protoreflect.ProtoMessage { // Range returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current field descriptor. -func (x *fastReflection_InboundStatus) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - if x.Status != 0 { - value := protoreflect.ValueOfEnum((protoreflect.EnumNumber)(x.Status)) - if !f(fd_InboundStatus_status, value) { +func (x *fastReflection_RevertInstructions) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.FundRecipient != "" { + value := protoreflect.ValueOfString(x.FundRecipient) + if !f(fd_RevertInstructions_fund_recipient, value) { return } } @@ -2531,15 +2531,15 @@ func (x *fastReflection_InboundStatus) Range(f func(protoreflect.FieldDescriptor // In other cases (aside from the nullable cases above), // a proto3 scalar field is populated if it contains a non-zero value, and // a repeated field is populated if it is non-empty. -func (x *fastReflection_InboundStatus) Has(fd protoreflect.FieldDescriptor) bool { +func (x *fastReflection_RevertInstructions) Has(fd protoreflect.FieldDescriptor) bool { switch fd.FullName() { - case "uexecutor.v1.InboundStatus.status": - return x.Status != 0 + case "uexecutor.v1.RevertInstructions.fund_recipient": + return x.FundRecipient != "" default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.InboundStatus")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.RevertInstructions")) } - panic(fmt.Errorf("message uexecutor.v1.InboundStatus does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.RevertInstructions does not contain field %s", fd.FullName())) } } @@ -2549,15 +2549,15 @@ func (x *fastReflection_InboundStatus) Has(fd protoreflect.FieldDescriptor) bool // associated with the given field number. // // Clear is a mutating operation and unsafe for concurrent use. -func (x *fastReflection_InboundStatus) Clear(fd protoreflect.FieldDescriptor) { +func (x *fastReflection_RevertInstructions) Clear(fd protoreflect.FieldDescriptor) { switch fd.FullName() { - case "uexecutor.v1.InboundStatus.status": - x.Status = 0 + case "uexecutor.v1.RevertInstructions.fund_recipient": + x.FundRecipient = "" default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.InboundStatus")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.RevertInstructions")) } - panic(fmt.Errorf("message uexecutor.v1.InboundStatus does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.RevertInstructions does not contain field %s", fd.FullName())) } } @@ -2567,16 +2567,16 @@ func (x *fastReflection_InboundStatus) Clear(fd protoreflect.FieldDescriptor) { // the default value of a bytes scalar is guaranteed to be a copy. // For unpopulated composite types, it returns an empty, read-only view // of the value; to obtain a mutable reference, use Mutable. -func (x *fastReflection_InboundStatus) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { +func (x *fastReflection_RevertInstructions) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { switch descriptor.FullName() { - case "uexecutor.v1.InboundStatus.status": - value := x.Status - return protoreflect.ValueOfEnum((protoreflect.EnumNumber)(value)) + case "uexecutor.v1.RevertInstructions.fund_recipient": + value := x.FundRecipient + return protoreflect.ValueOfString(value) default: if descriptor.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.InboundStatus")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.RevertInstructions")) } - panic(fmt.Errorf("message uexecutor.v1.InboundStatus does not contain field %s", descriptor.FullName())) + panic(fmt.Errorf("message uexecutor.v1.RevertInstructions does not contain field %s", descriptor.FullName())) } } @@ -2590,15 +2590,15 @@ func (x *fastReflection_InboundStatus) Get(descriptor protoreflect.FieldDescript // empty, read-only value, then it panics. // // Set is a mutating operation and unsafe for concurrent use. -func (x *fastReflection_InboundStatus) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { +func (x *fastReflection_RevertInstructions) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { switch fd.FullName() { - case "uexecutor.v1.InboundStatus.status": - x.Status = (Status)(value.Enum()) + case "uexecutor.v1.RevertInstructions.fund_recipient": + x.FundRecipient = value.Interface().(string) default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.InboundStatus")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.RevertInstructions")) } - panic(fmt.Errorf("message uexecutor.v1.InboundStatus does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.RevertInstructions does not contain field %s", fd.FullName())) } } @@ -2612,40 +2612,40 @@ func (x *fastReflection_InboundStatus) Set(fd protoreflect.FieldDescriptor, valu // It panics if the field does not contain a composite type. // // Mutable is a mutating operation and unsafe for concurrent use. -func (x *fastReflection_InboundStatus) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { +func (x *fastReflection_RevertInstructions) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { switch fd.FullName() { - case "uexecutor.v1.InboundStatus.status": - panic(fmt.Errorf("field status of message uexecutor.v1.InboundStatus is not mutable")) + case "uexecutor.v1.RevertInstructions.fund_recipient": + panic(fmt.Errorf("field fund_recipient of message uexecutor.v1.RevertInstructions is not mutable")) default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.InboundStatus")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.RevertInstructions")) } - panic(fmt.Errorf("message uexecutor.v1.InboundStatus does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.RevertInstructions does not contain field %s", fd.FullName())) } } // NewField returns a new value that is assignable to the field // for the given descriptor. For scalars, this returns the default value. // For lists, maps, and messages, this returns a new, empty, mutable value. -func (x *fastReflection_InboundStatus) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { +func (x *fastReflection_RevertInstructions) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { switch fd.FullName() { - case "uexecutor.v1.InboundStatus.status": - return protoreflect.ValueOfEnum(0) + case "uexecutor.v1.RevertInstructions.fund_recipient": + return protoreflect.ValueOfString("") default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.InboundStatus")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.RevertInstructions")) } - panic(fmt.Errorf("message uexecutor.v1.InboundStatus does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.RevertInstructions does not contain field %s", fd.FullName())) } } // WhichOneof reports which field within the oneof is populated, // returning nil if none are populated. // It panics if the oneof descriptor does not belong to this message. -func (x *fastReflection_InboundStatus) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { +func (x *fastReflection_RevertInstructions) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { switch d.FullName() { default: - panic(fmt.Errorf("%s is not a oneof field in uexecutor.v1.InboundStatus", d.FullName())) + panic(fmt.Errorf("%s is not a oneof field in uexecutor.v1.RevertInstructions", d.FullName())) } panic("unreachable") } @@ -2653,7 +2653,7 @@ func (x *fastReflection_InboundStatus) WhichOneof(d protoreflect.OneofDescriptor // GetUnknown retrieves the entire list of unknown fields. // The caller may only mutate the contents of the RawFields // if the mutated bytes are stored back into the message with SetUnknown. -func (x *fastReflection_InboundStatus) GetUnknown() protoreflect.RawFields { +func (x *fastReflection_RevertInstructions) GetUnknown() protoreflect.RawFields { return x.unknownFields } @@ -2664,7 +2664,7 @@ func (x *fastReflection_InboundStatus) GetUnknown() protoreflect.RawFields { // An empty RawFields may be passed to clear the fields. // // SetUnknown is a mutating operation and unsafe for concurrent use. -func (x *fastReflection_InboundStatus) SetUnknown(fields protoreflect.RawFields) { +func (x *fastReflection_RevertInstructions) SetUnknown(fields protoreflect.RawFields) { x.unknownFields = fields } @@ -2676,7 +2676,7 @@ func (x *fastReflection_InboundStatus) SetUnknown(fields protoreflect.RawFields) // message type, but the details are implementation dependent. // Validity is not part of the protobuf data model, and may not // be preserved in marshaling or other operations. -func (x *fastReflection_InboundStatus) IsValid() bool { +func (x *fastReflection_RevertInstructions) IsValid() bool { return x != nil } @@ -2686,9 +2686,9 @@ func (x *fastReflection_InboundStatus) IsValid() bool { // The returned methods type is identical to // "google.golang.org/protobuf/runtime/protoiface".Methods. // Consult the protoiface package documentation for details. -func (x *fastReflection_InboundStatus) ProtoMethods() *protoiface.Methods { +func (x *fastReflection_RevertInstructions) ProtoMethods() *protoiface.Methods { size := func(input protoiface.SizeInput) protoiface.SizeOutput { - x := input.Message.Interface().(*InboundStatus) + x := input.Message.Interface().(*RevertInstructions) if x == nil { return protoiface.SizeOutput{ NoUnkeyedLiterals: input.NoUnkeyedLiterals, @@ -2700,8 +2700,9 @@ func (x *fastReflection_InboundStatus) ProtoMethods() *protoiface.Methods { var n int var l int _ = l - if x.Status != 0 { - n += 1 + runtime.Sov(uint64(x.Status)) + l = len(x.FundRecipient) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) } if x.unknownFields != nil { n += len(x.unknownFields) @@ -2713,7 +2714,7 @@ func (x *fastReflection_InboundStatus) ProtoMethods() *protoiface.Methods { } marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { - x := input.Message.Interface().(*InboundStatus) + x := input.Message.Interface().(*RevertInstructions) if x == nil { return protoiface.MarshalOutput{ NoUnkeyedLiterals: input.NoUnkeyedLiterals, @@ -2732,10 +2733,12 @@ func (x *fastReflection_InboundStatus) ProtoMethods() *protoiface.Methods { i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } - if x.Status != 0 { - i = runtime.EncodeVarint(dAtA, i, uint64(x.Status)) + if len(x.FundRecipient) > 0 { + i -= len(x.FundRecipient) + copy(dAtA[i:], x.FundRecipient) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.FundRecipient))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } if input.Buf != nil { input.Buf = append(input.Buf, dAtA...) @@ -2748,7 +2751,7 @@ func (x *fastReflection_InboundStatus) ProtoMethods() *protoiface.Methods { }, nil } unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { - x := input.Message.Interface().(*InboundStatus) + x := input.Message.Interface().(*RevertInstructions) if x == nil { return protoiface.UnmarshalOutput{ NoUnkeyedLiterals: input.NoUnkeyedLiterals, @@ -2780,17 +2783,17 @@ func (x *fastReflection_InboundStatus) ProtoMethods() *protoiface.Methods { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: InboundStatus: wiretype end group for non-group") + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: RevertInstructions: wiretype end group for non-group") } if fieldNum <= 0 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: InboundStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: RevertInstructions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field FundRecipient", wireType) } - x.Status = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow @@ -2800,11 +2803,24 @@ func (x *fastReflection_InboundStatus) ProtoMethods() *protoiface.Methods { } b := dAtA[iNdEx] iNdEx++ - x.Status |= Status(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.FundRecipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := runtime.Skip(dAtA[iNdEx:]) @@ -2841,17 +2857,18 @@ func (x *fastReflection_InboundStatus) ProtoMethods() *protoiface.Methods { } var ( - md_Inbound protoreflect.MessageDescriptor - fd_Inbound_source_chain protoreflect.FieldDescriptor - fd_Inbound_tx_hash protoreflect.FieldDescriptor - fd_Inbound_sender protoreflect.FieldDescriptor - fd_Inbound_recipient protoreflect.FieldDescriptor - fd_Inbound_amount protoreflect.FieldDescriptor - fd_Inbound_asset_addr protoreflect.FieldDescriptor - fd_Inbound_log_index protoreflect.FieldDescriptor - fd_Inbound_tx_type protoreflect.FieldDescriptor - fd_Inbound_universal_payload protoreflect.FieldDescriptor - fd_Inbound_verification_data protoreflect.FieldDescriptor + md_Inbound protoreflect.MessageDescriptor + fd_Inbound_source_chain protoreflect.FieldDescriptor + fd_Inbound_tx_hash protoreflect.FieldDescriptor + fd_Inbound_sender protoreflect.FieldDescriptor + fd_Inbound_recipient protoreflect.FieldDescriptor + fd_Inbound_amount protoreflect.FieldDescriptor + fd_Inbound_asset_addr protoreflect.FieldDescriptor + fd_Inbound_log_index protoreflect.FieldDescriptor + fd_Inbound_tx_type protoreflect.FieldDescriptor + fd_Inbound_universal_payload protoreflect.FieldDescriptor + fd_Inbound_verification_data protoreflect.FieldDescriptor + fd_Inbound_revert_instructions protoreflect.FieldDescriptor ) func init() { @@ -2867,6 +2884,7 @@ func init() { fd_Inbound_tx_type = md_Inbound.Fields().ByName("tx_type") fd_Inbound_universal_payload = md_Inbound.Fields().ByName("universal_payload") fd_Inbound_verification_data = md_Inbound.Fields().ByName("verification_data") + fd_Inbound_revert_instructions = md_Inbound.Fields().ByName("revert_instructions") } var _ protoreflect.Message = (*fastReflection_Inbound)(nil) @@ -2994,6 +3012,12 @@ func (x *fastReflection_Inbound) Range(f func(protoreflect.FieldDescriptor, prot return } } + if x.RevertInstructions != nil { + value := protoreflect.ValueOfMessage(x.RevertInstructions.ProtoReflect()) + if !f(fd_Inbound_revert_instructions, value) { + return + } + } } // Has reports whether a field is populated. @@ -3029,6 +3053,8 @@ func (x *fastReflection_Inbound) Has(fd protoreflect.FieldDescriptor) bool { return x.UniversalPayload != nil case "uexecutor.v1.Inbound.verification_data": return x.VerificationData != "" + case "uexecutor.v1.Inbound.revert_instructions": + return x.RevertInstructions != nil default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.Inbound")) @@ -3065,6 +3091,8 @@ func (x *fastReflection_Inbound) Clear(fd protoreflect.FieldDescriptor) { x.UniversalPayload = nil case "uexecutor.v1.Inbound.verification_data": x.VerificationData = "" + case "uexecutor.v1.Inbound.revert_instructions": + x.RevertInstructions = nil default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.Inbound")) @@ -3111,6 +3139,9 @@ func (x *fastReflection_Inbound) Get(descriptor protoreflect.FieldDescriptor) pr case "uexecutor.v1.Inbound.verification_data": value := x.VerificationData return protoreflect.ValueOfString(value) + case "uexecutor.v1.Inbound.revert_instructions": + value := x.RevertInstructions + return protoreflect.ValueOfMessage(value.ProtoReflect()) default: if descriptor.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.Inbound")) @@ -3146,11 +3177,13 @@ func (x *fastReflection_Inbound) Set(fd protoreflect.FieldDescriptor, value prot case "uexecutor.v1.Inbound.log_index": x.LogIndex = value.Interface().(string) case "uexecutor.v1.Inbound.tx_type": - x.TxType = (InboundTxType)(value.Enum()) + x.TxType = (TxType)(value.Enum()) case "uexecutor.v1.Inbound.universal_payload": x.UniversalPayload = value.Message().Interface().(*UniversalPayload) case "uexecutor.v1.Inbound.verification_data": x.VerificationData = value.Interface().(string) + case "uexecutor.v1.Inbound.revert_instructions": + x.RevertInstructions = value.Message().Interface().(*RevertInstructions) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.Inbound")) @@ -3176,6 +3209,11 @@ func (x *fastReflection_Inbound) Mutable(fd protoreflect.FieldDescriptor) protor x.UniversalPayload = new(UniversalPayload) } return protoreflect.ValueOfMessage(x.UniversalPayload.ProtoReflect()) + case "uexecutor.v1.Inbound.revert_instructions": + if x.RevertInstructions == nil { + x.RevertInstructions = new(RevertInstructions) + } + return protoreflect.ValueOfMessage(x.RevertInstructions.ProtoReflect()) case "uexecutor.v1.Inbound.source_chain": panic(fmt.Errorf("field source_chain of message uexecutor.v1.Inbound is not mutable")) case "uexecutor.v1.Inbound.tx_hash": @@ -3228,6 +3266,9 @@ func (x *fastReflection_Inbound) NewField(fd protoreflect.FieldDescriptor) proto return protoreflect.ValueOfMessage(m.ProtoReflect()) case "uexecutor.v1.Inbound.verification_data": return protoreflect.ValueOfString("") + case "uexecutor.v1.Inbound.revert_instructions": + m := new(RevertInstructions) + return protoreflect.ValueOfMessage(m.ProtoReflect()) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.Inbound")) @@ -3336,6 +3377,10 @@ func (x *fastReflection_Inbound) ProtoMethods() *protoiface.Methods { if l > 0 { n += 1 + l + runtime.Sov(uint64(l)) } + if x.RevertInstructions != nil { + l = options.Size(x.RevertInstructions) + n += 1 + l + runtime.Sov(uint64(l)) + } if x.unknownFields != nil { n += len(x.unknownFields) } @@ -3365,6 +3410,20 @@ func (x *fastReflection_Inbound) ProtoMethods() *protoiface.Methods { i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } + if x.RevertInstructions != nil { + encoded, err := options.Marshal(x.RevertInstructions) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x5a + } if len(x.VerificationData) > 0 { i -= len(x.VerificationData) copy(dAtA[i:], x.VerificationData) @@ -3727,7 +3786,7 @@ func (x *fastReflection_Inbound) ProtoMethods() *protoiface.Methods { } b := dAtA[iNdEx] iNdEx++ - x.TxType |= InboundTxType(b&0x7F) << shift + x.TxType |= TxType(b&0x7F) << shift if b < 0x80 { break } @@ -3800,6 +3859,42 @@ func (x *fastReflection_Inbound) ProtoMethods() *protoiface.Methods { } x.VerificationData = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field RevertInstructions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.RevertInstructions == nil { + x.RevertInstructions = &RevertInstructions{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.RevertInstructions); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := runtime.Skip(dAtA[iNdEx:]) @@ -4544,33 +4639,31 @@ func (x *fastReflection_PCTx) ProtoMethods() *protoiface.Methods { } var ( - md_OutboundTx protoreflect.MessageDescriptor - fd_OutboundTx_destination_chain protoreflect.FieldDescriptor - fd_OutboundTx_tx_hash protoreflect.FieldDescriptor - fd_OutboundTx_recipient protoreflect.FieldDescriptor - fd_OutboundTx_amount protoreflect.FieldDescriptor - fd_OutboundTx_asset_addr protoreflect.FieldDescriptor + md_OutboundObservation protoreflect.MessageDescriptor + fd_OutboundObservation_success protoreflect.FieldDescriptor + fd_OutboundObservation_block_height protoreflect.FieldDescriptor + fd_OutboundObservation_tx_hash protoreflect.FieldDescriptor + fd_OutboundObservation_error_msg protoreflect.FieldDescriptor ) func init() { file_uexecutor_v1_types_proto_init() - md_OutboundTx = File_uexecutor_v1_types_proto.Messages().ByName("OutboundTx") - fd_OutboundTx_destination_chain = md_OutboundTx.Fields().ByName("destination_chain") - fd_OutboundTx_tx_hash = md_OutboundTx.Fields().ByName("tx_hash") - fd_OutboundTx_recipient = md_OutboundTx.Fields().ByName("recipient") - fd_OutboundTx_amount = md_OutboundTx.Fields().ByName("amount") - fd_OutboundTx_asset_addr = md_OutboundTx.Fields().ByName("asset_addr") + md_OutboundObservation = File_uexecutor_v1_types_proto.Messages().ByName("OutboundObservation") + fd_OutboundObservation_success = md_OutboundObservation.Fields().ByName("success") + fd_OutboundObservation_block_height = md_OutboundObservation.Fields().ByName("block_height") + fd_OutboundObservation_tx_hash = md_OutboundObservation.Fields().ByName("tx_hash") + fd_OutboundObservation_error_msg = md_OutboundObservation.Fields().ByName("error_msg") } -var _ protoreflect.Message = (*fastReflection_OutboundTx)(nil) +var _ protoreflect.Message = (*fastReflection_OutboundObservation)(nil) -type fastReflection_OutboundTx OutboundTx +type fastReflection_OutboundObservation OutboundObservation -func (x *OutboundTx) ProtoReflect() protoreflect.Message { - return (*fastReflection_OutboundTx)(x) +func (x *OutboundObservation) ProtoReflect() protoreflect.Message { + return (*fastReflection_OutboundObservation)(x) } -func (x *OutboundTx) slowProtoReflect() protoreflect.Message { +func (x *OutboundObservation) slowProtoReflect() protoreflect.Message { mi := &file_uexecutor_v1_types_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4582,43 +4675,43 @@ func (x *OutboundTx) slowProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -var _fastReflection_OutboundTx_messageType fastReflection_OutboundTx_messageType -var _ protoreflect.MessageType = fastReflection_OutboundTx_messageType{} +var _fastReflection_OutboundObservation_messageType fastReflection_OutboundObservation_messageType +var _ protoreflect.MessageType = fastReflection_OutboundObservation_messageType{} -type fastReflection_OutboundTx_messageType struct{} +type fastReflection_OutboundObservation_messageType struct{} -func (x fastReflection_OutboundTx_messageType) Zero() protoreflect.Message { - return (*fastReflection_OutboundTx)(nil) +func (x fastReflection_OutboundObservation_messageType) Zero() protoreflect.Message { + return (*fastReflection_OutboundObservation)(nil) } -func (x fastReflection_OutboundTx_messageType) New() protoreflect.Message { - return new(fastReflection_OutboundTx) +func (x fastReflection_OutboundObservation_messageType) New() protoreflect.Message { + return new(fastReflection_OutboundObservation) } -func (x fastReflection_OutboundTx_messageType) Descriptor() protoreflect.MessageDescriptor { - return md_OutboundTx +func (x fastReflection_OutboundObservation_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_OutboundObservation } // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. -func (x *fastReflection_OutboundTx) Descriptor() protoreflect.MessageDescriptor { - return md_OutboundTx +func (x *fastReflection_OutboundObservation) Descriptor() protoreflect.MessageDescriptor { + return md_OutboundObservation } // Type returns the message type, which encapsulates both Go and protobuf // type information. If the Go type information is not needed, // it is recommended that the message descriptor be used instead. -func (x *fastReflection_OutboundTx) Type() protoreflect.MessageType { - return _fastReflection_OutboundTx_messageType +func (x *fastReflection_OutboundObservation) Type() protoreflect.MessageType { + return _fastReflection_OutboundObservation_messageType } // New returns a newly allocated and mutable empty message. -func (x *fastReflection_OutboundTx) New() protoreflect.Message { - return new(fastReflection_OutboundTx) +func (x *fastReflection_OutboundObservation) New() protoreflect.Message { + return new(fastReflection_OutboundObservation) } // Interface unwraps the message reflection interface and // returns the underlying ProtoMessage interface. -func (x *fastReflection_OutboundTx) Interface() protoreflect.ProtoMessage { - return (*OutboundTx)(x) +func (x *fastReflection_OutboundObservation) Interface() protoreflect.ProtoMessage { + return (*OutboundObservation)(x) } // Range iterates over every populated field in an undefined order, @@ -4626,34 +4719,28 @@ func (x *fastReflection_OutboundTx) Interface() protoreflect.ProtoMessage { // Range returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current field descriptor. -func (x *fastReflection_OutboundTx) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - if x.DestinationChain != "" { - value := protoreflect.ValueOfString(x.DestinationChain) - if !f(fd_OutboundTx_destination_chain, value) { - return - } - } - if x.TxHash != "" { - value := protoreflect.ValueOfString(x.TxHash) - if !f(fd_OutboundTx_tx_hash, value) { +func (x *fastReflection_OutboundObservation) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.Success != false { + value := protoreflect.ValueOfBool(x.Success) + if !f(fd_OutboundObservation_success, value) { return } } - if x.Recipient != "" { - value := protoreflect.ValueOfString(x.Recipient) - if !f(fd_OutboundTx_recipient, value) { + if x.BlockHeight != uint64(0) { + value := protoreflect.ValueOfUint64(x.BlockHeight) + if !f(fd_OutboundObservation_block_height, value) { return } } - if x.Amount != "" { - value := protoreflect.ValueOfString(x.Amount) - if !f(fd_OutboundTx_amount, value) { + if x.TxHash != "" { + value := protoreflect.ValueOfString(x.TxHash) + if !f(fd_OutboundObservation_tx_hash, value) { return } } - if x.AssetAddr != "" { - value := protoreflect.ValueOfString(x.AssetAddr) - if !f(fd_OutboundTx_asset_addr, value) { + if x.ErrorMsg != "" { + value := protoreflect.ValueOfString(x.ErrorMsg) + if !f(fd_OutboundObservation_error_msg, value) { return } } @@ -4670,23 +4757,21 @@ func (x *fastReflection_OutboundTx) Range(f func(protoreflect.FieldDescriptor, p // In other cases (aside from the nullable cases above), // a proto3 scalar field is populated if it contains a non-zero value, and // a repeated field is populated if it is non-empty. -func (x *fastReflection_OutboundTx) Has(fd protoreflect.FieldDescriptor) bool { +func (x *fastReflection_OutboundObservation) Has(fd protoreflect.FieldDescriptor) bool { switch fd.FullName() { - case "uexecutor.v1.OutboundTx.destination_chain": - return x.DestinationChain != "" - case "uexecutor.v1.OutboundTx.tx_hash": + case "uexecutor.v1.OutboundObservation.success": + return x.Success != false + case "uexecutor.v1.OutboundObservation.block_height": + return x.BlockHeight != uint64(0) + case "uexecutor.v1.OutboundObservation.tx_hash": return x.TxHash != "" - case "uexecutor.v1.OutboundTx.recipient": - return x.Recipient != "" - case "uexecutor.v1.OutboundTx.amount": - return x.Amount != "" - case "uexecutor.v1.OutboundTx.asset_addr": - return x.AssetAddr != "" + case "uexecutor.v1.OutboundObservation.error_msg": + return x.ErrorMsg != "" default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundObservation")) } - panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.OutboundObservation does not contain field %s", fd.FullName())) } } @@ -4696,23 +4781,21 @@ func (x *fastReflection_OutboundTx) Has(fd protoreflect.FieldDescriptor) bool { // associated with the given field number. // // Clear is a mutating operation and unsafe for concurrent use. -func (x *fastReflection_OutboundTx) Clear(fd protoreflect.FieldDescriptor) { +func (x *fastReflection_OutboundObservation) Clear(fd protoreflect.FieldDescriptor) { switch fd.FullName() { - case "uexecutor.v1.OutboundTx.destination_chain": - x.DestinationChain = "" - case "uexecutor.v1.OutboundTx.tx_hash": + case "uexecutor.v1.OutboundObservation.success": + x.Success = false + case "uexecutor.v1.OutboundObservation.block_height": + x.BlockHeight = uint64(0) + case "uexecutor.v1.OutboundObservation.tx_hash": x.TxHash = "" - case "uexecutor.v1.OutboundTx.recipient": - x.Recipient = "" - case "uexecutor.v1.OutboundTx.amount": - x.Amount = "" - case "uexecutor.v1.OutboundTx.asset_addr": - x.AssetAddr = "" + case "uexecutor.v1.OutboundObservation.error_msg": + x.ErrorMsg = "" default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundObservation")) } - panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.OutboundObservation does not contain field %s", fd.FullName())) } } @@ -4722,28 +4805,25 @@ func (x *fastReflection_OutboundTx) Clear(fd protoreflect.FieldDescriptor) { // the default value of a bytes scalar is guaranteed to be a copy. // For unpopulated composite types, it returns an empty, read-only view // of the value; to obtain a mutable reference, use Mutable. -func (x *fastReflection_OutboundTx) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { +func (x *fastReflection_OutboundObservation) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { switch descriptor.FullName() { - case "uexecutor.v1.OutboundTx.destination_chain": - value := x.DestinationChain - return protoreflect.ValueOfString(value) - case "uexecutor.v1.OutboundTx.tx_hash": + case "uexecutor.v1.OutboundObservation.success": + value := x.Success + return protoreflect.ValueOfBool(value) + case "uexecutor.v1.OutboundObservation.block_height": + value := x.BlockHeight + return protoreflect.ValueOfUint64(value) + case "uexecutor.v1.OutboundObservation.tx_hash": value := x.TxHash return protoreflect.ValueOfString(value) - case "uexecutor.v1.OutboundTx.recipient": - value := x.Recipient - return protoreflect.ValueOfString(value) - case "uexecutor.v1.OutboundTx.amount": - value := x.Amount - return protoreflect.ValueOfString(value) - case "uexecutor.v1.OutboundTx.asset_addr": - value := x.AssetAddr + case "uexecutor.v1.OutboundObservation.error_msg": + value := x.ErrorMsg return protoreflect.ValueOfString(value) default: if descriptor.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundObservation")) } - panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", descriptor.FullName())) + panic(fmt.Errorf("message uexecutor.v1.OutboundObservation does not contain field %s", descriptor.FullName())) } } @@ -4757,23 +4837,21 @@ func (x *fastReflection_OutboundTx) Get(descriptor protoreflect.FieldDescriptor) // empty, read-only value, then it panics. // // Set is a mutating operation and unsafe for concurrent use. -func (x *fastReflection_OutboundTx) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { +func (x *fastReflection_OutboundObservation) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { switch fd.FullName() { - case "uexecutor.v1.OutboundTx.destination_chain": - x.DestinationChain = value.Interface().(string) - case "uexecutor.v1.OutboundTx.tx_hash": + case "uexecutor.v1.OutboundObservation.success": + x.Success = value.Bool() + case "uexecutor.v1.OutboundObservation.block_height": + x.BlockHeight = value.Uint() + case "uexecutor.v1.OutboundObservation.tx_hash": x.TxHash = value.Interface().(string) - case "uexecutor.v1.OutboundTx.recipient": - x.Recipient = value.Interface().(string) - case "uexecutor.v1.OutboundTx.amount": - x.Amount = value.Interface().(string) - case "uexecutor.v1.OutboundTx.asset_addr": - x.AssetAddr = value.Interface().(string) + case "uexecutor.v1.OutboundObservation.error_msg": + x.ErrorMsg = value.Interface().(string) default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundObservation")) } - panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.OutboundObservation does not contain field %s", fd.FullName())) } } @@ -4787,56 +4865,52 @@ func (x *fastReflection_OutboundTx) Set(fd protoreflect.FieldDescriptor, value p // It panics if the field does not contain a composite type. // // Mutable is a mutating operation and unsafe for concurrent use. -func (x *fastReflection_OutboundTx) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { +func (x *fastReflection_OutboundObservation) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { switch fd.FullName() { - case "uexecutor.v1.OutboundTx.destination_chain": - panic(fmt.Errorf("field destination_chain of message uexecutor.v1.OutboundTx is not mutable")) - case "uexecutor.v1.OutboundTx.tx_hash": - panic(fmt.Errorf("field tx_hash of message uexecutor.v1.OutboundTx is not mutable")) - case "uexecutor.v1.OutboundTx.recipient": - panic(fmt.Errorf("field recipient of message uexecutor.v1.OutboundTx is not mutable")) - case "uexecutor.v1.OutboundTx.amount": - panic(fmt.Errorf("field amount of message uexecutor.v1.OutboundTx is not mutable")) - case "uexecutor.v1.OutboundTx.asset_addr": - panic(fmt.Errorf("field asset_addr of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundObservation.success": + panic(fmt.Errorf("field success of message uexecutor.v1.OutboundObservation is not mutable")) + case "uexecutor.v1.OutboundObservation.block_height": + panic(fmt.Errorf("field block_height of message uexecutor.v1.OutboundObservation is not mutable")) + case "uexecutor.v1.OutboundObservation.tx_hash": + panic(fmt.Errorf("field tx_hash of message uexecutor.v1.OutboundObservation is not mutable")) + case "uexecutor.v1.OutboundObservation.error_msg": + panic(fmt.Errorf("field error_msg of message uexecutor.v1.OutboundObservation is not mutable")) default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundObservation")) } - panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.OutboundObservation does not contain field %s", fd.FullName())) } } // NewField returns a new value that is assignable to the field // for the given descriptor. For scalars, this returns the default value. // For lists, maps, and messages, this returns a new, empty, mutable value. -func (x *fastReflection_OutboundTx) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { +func (x *fastReflection_OutboundObservation) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { switch fd.FullName() { - case "uexecutor.v1.OutboundTx.destination_chain": - return protoreflect.ValueOfString("") - case "uexecutor.v1.OutboundTx.tx_hash": - return protoreflect.ValueOfString("") - case "uexecutor.v1.OutboundTx.recipient": - return protoreflect.ValueOfString("") - case "uexecutor.v1.OutboundTx.amount": + case "uexecutor.v1.OutboundObservation.success": + return protoreflect.ValueOfBool(false) + case "uexecutor.v1.OutboundObservation.block_height": + return protoreflect.ValueOfUint64(uint64(0)) + case "uexecutor.v1.OutboundObservation.tx_hash": return protoreflect.ValueOfString("") - case "uexecutor.v1.OutboundTx.asset_addr": + case "uexecutor.v1.OutboundObservation.error_msg": return protoreflect.ValueOfString("") default: if fd.IsExtension() { - panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundObservation")) } - panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + panic(fmt.Errorf("message uexecutor.v1.OutboundObservation does not contain field %s", fd.FullName())) } } // WhichOneof reports which field within the oneof is populated, // returning nil if none are populated. // It panics if the oneof descriptor does not belong to this message. -func (x *fastReflection_OutboundTx) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { +func (x *fastReflection_OutboundObservation) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { switch d.FullName() { default: - panic(fmt.Errorf("%s is not a oneof field in uexecutor.v1.OutboundTx", d.FullName())) + panic(fmt.Errorf("%s is not a oneof field in uexecutor.v1.OutboundObservation", d.FullName())) } panic("unreachable") } @@ -4844,7 +4918,7 @@ func (x *fastReflection_OutboundTx) WhichOneof(d protoreflect.OneofDescriptor) p // GetUnknown retrieves the entire list of unknown fields. // The caller may only mutate the contents of the RawFields // if the mutated bytes are stored back into the message with SetUnknown. -func (x *fastReflection_OutboundTx) GetUnknown() protoreflect.RawFields { +func (x *fastReflection_OutboundObservation) GetUnknown() protoreflect.RawFields { return x.unknownFields } @@ -4855,7 +4929,7 @@ func (x *fastReflection_OutboundTx) GetUnknown() protoreflect.RawFields { // An empty RawFields may be passed to clear the fields. // // SetUnknown is a mutating operation and unsafe for concurrent use. -func (x *fastReflection_OutboundTx) SetUnknown(fields protoreflect.RawFields) { +func (x *fastReflection_OutboundObservation) SetUnknown(fields protoreflect.RawFields) { x.unknownFields = fields } @@ -4867,7 +4941,7 @@ func (x *fastReflection_OutboundTx) SetUnknown(fields protoreflect.RawFields) { // message type, but the details are implementation dependent. // Validity is not part of the protobuf data model, and may not // be preserved in marshaling or other operations. -func (x *fastReflection_OutboundTx) IsValid() bool { +func (x *fastReflection_OutboundObservation) IsValid() bool { return x != nil } @@ -4877,9 +4951,9 @@ func (x *fastReflection_OutboundTx) IsValid() bool { // The returned methods type is identical to // "google.golang.org/protobuf/runtime/protoiface".Methods. // Consult the protoiface package documentation for details. -func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { +func (x *fastReflection_OutboundObservation) ProtoMethods() *protoiface.Methods { size := func(input protoiface.SizeInput) protoiface.SizeOutput { - x := input.Message.Interface().(*OutboundTx) + x := input.Message.Interface().(*OutboundObservation) if x == nil { return protoiface.SizeOutput{ NoUnkeyedLiterals: input.NoUnkeyedLiterals, @@ -4891,23 +4965,17 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { var n int var l int _ = l - l = len(x.DestinationChain) - if l > 0 { - n += 1 + l + runtime.Sov(uint64(l)) - } - l = len(x.TxHash) - if l > 0 { - n += 1 + l + runtime.Sov(uint64(l)) + if x.Success { + n += 2 } - l = len(x.Recipient) - if l > 0 { - n += 1 + l + runtime.Sov(uint64(l)) + if x.BlockHeight != 0 { + n += 1 + runtime.Sov(uint64(x.BlockHeight)) } - l = len(x.Amount) + l = len(x.TxHash) if l > 0 { n += 1 + l + runtime.Sov(uint64(l)) } - l = len(x.AssetAddr) + l = len(x.ErrorMsg) if l > 0 { n += 1 + l + runtime.Sov(uint64(l)) } @@ -4921,7 +4989,7 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { } marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { - x := input.Message.Interface().(*OutboundTx) + x := input.Message.Interface().(*OutboundObservation) if x == nil { return protoiface.MarshalOutput{ NoUnkeyedLiterals: input.NoUnkeyedLiterals, @@ -4940,42 +5008,1497 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } - if len(x.AssetAddr) > 0 { - i -= len(x.AssetAddr) - copy(dAtA[i:], x.AssetAddr) - i = runtime.EncodeVarint(dAtA, i, uint64(len(x.AssetAddr))) - i-- - dAtA[i] = 0x2a - } - if len(x.Amount) > 0 { - i -= len(x.Amount) - copy(dAtA[i:], x.Amount) - i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Amount))) + if len(x.ErrorMsg) > 0 { + i -= len(x.ErrorMsg) + copy(dAtA[i:], x.ErrorMsg) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.ErrorMsg))) i-- dAtA[i] = 0x22 } - if len(x.Recipient) > 0 { - i -= len(x.Recipient) - copy(dAtA[i:], x.Recipient) - i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Recipient))) - i-- - dAtA[i] = 0x1a - } if len(x.TxHash) > 0 { i -= len(x.TxHash) copy(dAtA[i:], x.TxHash) i = runtime.EncodeVarint(dAtA, i, uint64(len(x.TxHash))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - if len(x.DestinationChain) > 0 { - i -= len(x.DestinationChain) - copy(dAtA[i:], x.DestinationChain) - i = runtime.EncodeVarint(dAtA, i, uint64(len(x.DestinationChain))) + if x.BlockHeight != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.BlockHeight)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x10 } - if input.Buf != nil { + if x.Success { + i-- + if x.Success { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*OutboundObservation) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: OutboundObservation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: OutboundObservation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + x.Success = bool(v != 0) + case 2: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) + } + x.BlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.BlockHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field TxHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.TxHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ErrorMsg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.ErrorMsg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var ( + md_OriginatingPcTx protoreflect.MessageDescriptor + fd_OriginatingPcTx_tx_hash protoreflect.FieldDescriptor + fd_OriginatingPcTx_log_index protoreflect.FieldDescriptor +) + +func init() { + file_uexecutor_v1_types_proto_init() + md_OriginatingPcTx = File_uexecutor_v1_types_proto.Messages().ByName("OriginatingPcTx") + fd_OriginatingPcTx_tx_hash = md_OriginatingPcTx.Fields().ByName("tx_hash") + fd_OriginatingPcTx_log_index = md_OriginatingPcTx.Fields().ByName("log_index") +} + +var _ protoreflect.Message = (*fastReflection_OriginatingPcTx)(nil) + +type fastReflection_OriginatingPcTx OriginatingPcTx + +func (x *OriginatingPcTx) ProtoReflect() protoreflect.Message { + return (*fastReflection_OriginatingPcTx)(x) +} + +func (x *OriginatingPcTx) slowProtoReflect() protoreflect.Message { + mi := &file_uexecutor_v1_types_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_OriginatingPcTx_messageType fastReflection_OriginatingPcTx_messageType +var _ protoreflect.MessageType = fastReflection_OriginatingPcTx_messageType{} + +type fastReflection_OriginatingPcTx_messageType struct{} + +func (x fastReflection_OriginatingPcTx_messageType) Zero() protoreflect.Message { + return (*fastReflection_OriginatingPcTx)(nil) +} +func (x fastReflection_OriginatingPcTx_messageType) New() protoreflect.Message { + return new(fastReflection_OriginatingPcTx) +} +func (x fastReflection_OriginatingPcTx_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_OriginatingPcTx +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_OriginatingPcTx) Descriptor() protoreflect.MessageDescriptor { + return md_OriginatingPcTx +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_OriginatingPcTx) Type() protoreflect.MessageType { + return _fastReflection_OriginatingPcTx_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_OriginatingPcTx) New() protoreflect.Message { + return new(fastReflection_OriginatingPcTx) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_OriginatingPcTx) Interface() protoreflect.ProtoMessage { + return (*OriginatingPcTx)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_OriginatingPcTx) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.TxHash != "" { + value := protoreflect.ValueOfString(x.TxHash) + if !f(fd_OriginatingPcTx_tx_hash, value) { + return + } + } + if x.LogIndex != "" { + value := protoreflect.ValueOfString(x.LogIndex) + if !f(fd_OriginatingPcTx_log_index, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_OriginatingPcTx) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "uexecutor.v1.OriginatingPcTx.tx_hash": + return x.TxHash != "" + case "uexecutor.v1.OriginatingPcTx.log_index": + return x.LogIndex != "" + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OriginatingPcTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OriginatingPcTx does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_OriginatingPcTx) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "uexecutor.v1.OriginatingPcTx.tx_hash": + x.TxHash = "" + case "uexecutor.v1.OriginatingPcTx.log_index": + x.LogIndex = "" + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OriginatingPcTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OriginatingPcTx does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_OriginatingPcTx) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "uexecutor.v1.OriginatingPcTx.tx_hash": + value := x.TxHash + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OriginatingPcTx.log_index": + value := x.LogIndex + return protoreflect.ValueOfString(value) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OriginatingPcTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OriginatingPcTx does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_OriginatingPcTx) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "uexecutor.v1.OriginatingPcTx.tx_hash": + x.TxHash = value.Interface().(string) + case "uexecutor.v1.OriginatingPcTx.log_index": + x.LogIndex = value.Interface().(string) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OriginatingPcTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OriginatingPcTx does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_OriginatingPcTx) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "uexecutor.v1.OriginatingPcTx.tx_hash": + panic(fmt.Errorf("field tx_hash of message uexecutor.v1.OriginatingPcTx is not mutable")) + case "uexecutor.v1.OriginatingPcTx.log_index": + panic(fmt.Errorf("field log_index of message uexecutor.v1.OriginatingPcTx is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OriginatingPcTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OriginatingPcTx does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_OriginatingPcTx) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "uexecutor.v1.OriginatingPcTx.tx_hash": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OriginatingPcTx.log_index": + return protoreflect.ValueOfString("") + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OriginatingPcTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OriginatingPcTx does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_OriginatingPcTx) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in uexecutor.v1.OriginatingPcTx", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_OriginatingPcTx) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_OriginatingPcTx) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_OriginatingPcTx) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_OriginatingPcTx) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*OriginatingPcTx) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + l = len(x.TxHash) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.LogIndex) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*OriginatingPcTx) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if len(x.LogIndex) > 0 { + i -= len(x.LogIndex) + copy(dAtA[i:], x.LogIndex) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.LogIndex))) + i-- + dAtA[i] = 0x12 + } + if len(x.TxHash) > 0 { + i -= len(x.TxHash) + copy(dAtA[i:], x.TxHash) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.TxHash))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*OriginatingPcTx) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: OriginatingPcTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: OriginatingPcTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field TxHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.TxHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field LogIndex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.LogIndex = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var ( + md_OutboundTx protoreflect.MessageDescriptor + fd_OutboundTx_destination_chain protoreflect.FieldDescriptor + fd_OutboundTx_recipient protoreflect.FieldDescriptor + fd_OutboundTx_amount protoreflect.FieldDescriptor + fd_OutboundTx_external_asset_addr protoreflect.FieldDescriptor + fd_OutboundTx_prc20_asset_addr protoreflect.FieldDescriptor + fd_OutboundTx_sender protoreflect.FieldDescriptor + fd_OutboundTx_payload protoreflect.FieldDescriptor + fd_OutboundTx_gas_limit protoreflect.FieldDescriptor + fd_OutboundTx_tx_type protoreflect.FieldDescriptor + fd_OutboundTx_pc_tx protoreflect.FieldDescriptor + fd_OutboundTx_observed_tx protoreflect.FieldDescriptor + fd_OutboundTx_id protoreflect.FieldDescriptor + fd_OutboundTx_outbound_status protoreflect.FieldDescriptor + fd_OutboundTx_revert_instructions protoreflect.FieldDescriptor + fd_OutboundTx_pc_revert_execution protoreflect.FieldDescriptor +) + +func init() { + file_uexecutor_v1_types_proto_init() + md_OutboundTx = File_uexecutor_v1_types_proto.Messages().ByName("OutboundTx") + fd_OutboundTx_destination_chain = md_OutboundTx.Fields().ByName("destination_chain") + fd_OutboundTx_recipient = md_OutboundTx.Fields().ByName("recipient") + fd_OutboundTx_amount = md_OutboundTx.Fields().ByName("amount") + fd_OutboundTx_external_asset_addr = md_OutboundTx.Fields().ByName("external_asset_addr") + fd_OutboundTx_prc20_asset_addr = md_OutboundTx.Fields().ByName("prc20_asset_addr") + fd_OutboundTx_sender = md_OutboundTx.Fields().ByName("sender") + fd_OutboundTx_payload = md_OutboundTx.Fields().ByName("payload") + fd_OutboundTx_gas_limit = md_OutboundTx.Fields().ByName("gas_limit") + fd_OutboundTx_tx_type = md_OutboundTx.Fields().ByName("tx_type") + fd_OutboundTx_pc_tx = md_OutboundTx.Fields().ByName("pc_tx") + fd_OutboundTx_observed_tx = md_OutboundTx.Fields().ByName("observed_tx") + fd_OutboundTx_id = md_OutboundTx.Fields().ByName("id") + fd_OutboundTx_outbound_status = md_OutboundTx.Fields().ByName("outbound_status") + fd_OutboundTx_revert_instructions = md_OutboundTx.Fields().ByName("revert_instructions") + fd_OutboundTx_pc_revert_execution = md_OutboundTx.Fields().ByName("pc_revert_execution") +} + +var _ protoreflect.Message = (*fastReflection_OutboundTx)(nil) + +type fastReflection_OutboundTx OutboundTx + +func (x *OutboundTx) ProtoReflect() protoreflect.Message { + return (*fastReflection_OutboundTx)(x) +} + +func (x *OutboundTx) slowProtoReflect() protoreflect.Message { + mi := &file_uexecutor_v1_types_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_OutboundTx_messageType fastReflection_OutboundTx_messageType +var _ protoreflect.MessageType = fastReflection_OutboundTx_messageType{} + +type fastReflection_OutboundTx_messageType struct{} + +func (x fastReflection_OutboundTx_messageType) Zero() protoreflect.Message { + return (*fastReflection_OutboundTx)(nil) +} +func (x fastReflection_OutboundTx_messageType) New() protoreflect.Message { + return new(fastReflection_OutboundTx) +} +func (x fastReflection_OutboundTx_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_OutboundTx +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_OutboundTx) Descriptor() protoreflect.MessageDescriptor { + return md_OutboundTx +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_OutboundTx) Type() protoreflect.MessageType { + return _fastReflection_OutboundTx_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_OutboundTx) New() protoreflect.Message { + return new(fastReflection_OutboundTx) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_OutboundTx) Interface() protoreflect.ProtoMessage { + return (*OutboundTx)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_OutboundTx) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.DestinationChain != "" { + value := protoreflect.ValueOfString(x.DestinationChain) + if !f(fd_OutboundTx_destination_chain, value) { + return + } + } + if x.Recipient != "" { + value := protoreflect.ValueOfString(x.Recipient) + if !f(fd_OutboundTx_recipient, value) { + return + } + } + if x.Amount != "" { + value := protoreflect.ValueOfString(x.Amount) + if !f(fd_OutboundTx_amount, value) { + return + } + } + if x.ExternalAssetAddr != "" { + value := protoreflect.ValueOfString(x.ExternalAssetAddr) + if !f(fd_OutboundTx_external_asset_addr, value) { + return + } + } + if x.Prc20AssetAddr != "" { + value := protoreflect.ValueOfString(x.Prc20AssetAddr) + if !f(fd_OutboundTx_prc20_asset_addr, value) { + return + } + } + if x.Sender != "" { + value := protoreflect.ValueOfString(x.Sender) + if !f(fd_OutboundTx_sender, value) { + return + } + } + if x.Payload != "" { + value := protoreflect.ValueOfString(x.Payload) + if !f(fd_OutboundTx_payload, value) { + return + } + } + if x.GasLimit != "" { + value := protoreflect.ValueOfString(x.GasLimit) + if !f(fd_OutboundTx_gas_limit, value) { + return + } + } + if x.TxType != 0 { + value := protoreflect.ValueOfEnum((protoreflect.EnumNumber)(x.TxType)) + if !f(fd_OutboundTx_tx_type, value) { + return + } + } + if x.PcTx != nil { + value := protoreflect.ValueOfMessage(x.PcTx.ProtoReflect()) + if !f(fd_OutboundTx_pc_tx, value) { + return + } + } + if x.ObservedTx != nil { + value := protoreflect.ValueOfMessage(x.ObservedTx.ProtoReflect()) + if !f(fd_OutboundTx_observed_tx, value) { + return + } + } + if x.Id != "" { + value := protoreflect.ValueOfString(x.Id) + if !f(fd_OutboundTx_id, value) { + return + } + } + if x.OutboundStatus != 0 { + value := protoreflect.ValueOfEnum((protoreflect.EnumNumber)(x.OutboundStatus)) + if !f(fd_OutboundTx_outbound_status, value) { + return + } + } + if x.RevertInstructions != nil { + value := protoreflect.ValueOfMessage(x.RevertInstructions.ProtoReflect()) + if !f(fd_OutboundTx_revert_instructions, value) { + return + } + } + if x.PcRevertExecution != nil { + value := protoreflect.ValueOfMessage(x.PcRevertExecution.ProtoReflect()) + if !f(fd_OutboundTx_pc_revert_execution, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_OutboundTx) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "uexecutor.v1.OutboundTx.destination_chain": + return x.DestinationChain != "" + case "uexecutor.v1.OutboundTx.recipient": + return x.Recipient != "" + case "uexecutor.v1.OutboundTx.amount": + return x.Amount != "" + case "uexecutor.v1.OutboundTx.external_asset_addr": + return x.ExternalAssetAddr != "" + case "uexecutor.v1.OutboundTx.prc20_asset_addr": + return x.Prc20AssetAddr != "" + case "uexecutor.v1.OutboundTx.sender": + return x.Sender != "" + case "uexecutor.v1.OutboundTx.payload": + return x.Payload != "" + case "uexecutor.v1.OutboundTx.gas_limit": + return x.GasLimit != "" + case "uexecutor.v1.OutboundTx.tx_type": + return x.TxType != 0 + case "uexecutor.v1.OutboundTx.pc_tx": + return x.PcTx != nil + case "uexecutor.v1.OutboundTx.observed_tx": + return x.ObservedTx != nil + case "uexecutor.v1.OutboundTx.id": + return x.Id != "" + case "uexecutor.v1.OutboundTx.outbound_status": + return x.OutboundStatus != 0 + case "uexecutor.v1.OutboundTx.revert_instructions": + return x.RevertInstructions != nil + case "uexecutor.v1.OutboundTx.pc_revert_execution": + return x.PcRevertExecution != nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_OutboundTx) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "uexecutor.v1.OutboundTx.destination_chain": + x.DestinationChain = "" + case "uexecutor.v1.OutboundTx.recipient": + x.Recipient = "" + case "uexecutor.v1.OutboundTx.amount": + x.Amount = "" + case "uexecutor.v1.OutboundTx.external_asset_addr": + x.ExternalAssetAddr = "" + case "uexecutor.v1.OutboundTx.prc20_asset_addr": + x.Prc20AssetAddr = "" + case "uexecutor.v1.OutboundTx.sender": + x.Sender = "" + case "uexecutor.v1.OutboundTx.payload": + x.Payload = "" + case "uexecutor.v1.OutboundTx.gas_limit": + x.GasLimit = "" + case "uexecutor.v1.OutboundTx.tx_type": + x.TxType = 0 + case "uexecutor.v1.OutboundTx.pc_tx": + x.PcTx = nil + case "uexecutor.v1.OutboundTx.observed_tx": + x.ObservedTx = nil + case "uexecutor.v1.OutboundTx.id": + x.Id = "" + case "uexecutor.v1.OutboundTx.outbound_status": + x.OutboundStatus = 0 + case "uexecutor.v1.OutboundTx.revert_instructions": + x.RevertInstructions = nil + case "uexecutor.v1.OutboundTx.pc_revert_execution": + x.PcRevertExecution = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_OutboundTx) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "uexecutor.v1.OutboundTx.destination_chain": + value := x.DestinationChain + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OutboundTx.recipient": + value := x.Recipient + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OutboundTx.amount": + value := x.Amount + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OutboundTx.external_asset_addr": + value := x.ExternalAssetAddr + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OutboundTx.prc20_asset_addr": + value := x.Prc20AssetAddr + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OutboundTx.sender": + value := x.Sender + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OutboundTx.payload": + value := x.Payload + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OutboundTx.gas_limit": + value := x.GasLimit + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OutboundTx.tx_type": + value := x.TxType + return protoreflect.ValueOfEnum((protoreflect.EnumNumber)(value)) + case "uexecutor.v1.OutboundTx.pc_tx": + value := x.PcTx + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "uexecutor.v1.OutboundTx.observed_tx": + value := x.ObservedTx + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "uexecutor.v1.OutboundTx.id": + value := x.Id + return protoreflect.ValueOfString(value) + case "uexecutor.v1.OutboundTx.outbound_status": + value := x.OutboundStatus + return protoreflect.ValueOfEnum((protoreflect.EnumNumber)(value)) + case "uexecutor.v1.OutboundTx.revert_instructions": + value := x.RevertInstructions + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "uexecutor.v1.OutboundTx.pc_revert_execution": + value := x.PcRevertExecution + return protoreflect.ValueOfMessage(value.ProtoReflect()) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_OutboundTx) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "uexecutor.v1.OutboundTx.destination_chain": + x.DestinationChain = value.Interface().(string) + case "uexecutor.v1.OutboundTx.recipient": + x.Recipient = value.Interface().(string) + case "uexecutor.v1.OutboundTx.amount": + x.Amount = value.Interface().(string) + case "uexecutor.v1.OutboundTx.external_asset_addr": + x.ExternalAssetAddr = value.Interface().(string) + case "uexecutor.v1.OutboundTx.prc20_asset_addr": + x.Prc20AssetAddr = value.Interface().(string) + case "uexecutor.v1.OutboundTx.sender": + x.Sender = value.Interface().(string) + case "uexecutor.v1.OutboundTx.payload": + x.Payload = value.Interface().(string) + case "uexecutor.v1.OutboundTx.gas_limit": + x.GasLimit = value.Interface().(string) + case "uexecutor.v1.OutboundTx.tx_type": + x.TxType = (TxType)(value.Enum()) + case "uexecutor.v1.OutboundTx.pc_tx": + x.PcTx = value.Message().Interface().(*OriginatingPcTx) + case "uexecutor.v1.OutboundTx.observed_tx": + x.ObservedTx = value.Message().Interface().(*OutboundObservation) + case "uexecutor.v1.OutboundTx.id": + x.Id = value.Interface().(string) + case "uexecutor.v1.OutboundTx.outbound_status": + x.OutboundStatus = (Status)(value.Enum()) + case "uexecutor.v1.OutboundTx.revert_instructions": + x.RevertInstructions = value.Message().Interface().(*RevertInstructions) + case "uexecutor.v1.OutboundTx.pc_revert_execution": + x.PcRevertExecution = value.Message().Interface().(*PCTx) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_OutboundTx) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "uexecutor.v1.OutboundTx.pc_tx": + if x.PcTx == nil { + x.PcTx = new(OriginatingPcTx) + } + return protoreflect.ValueOfMessage(x.PcTx.ProtoReflect()) + case "uexecutor.v1.OutboundTx.observed_tx": + if x.ObservedTx == nil { + x.ObservedTx = new(OutboundObservation) + } + return protoreflect.ValueOfMessage(x.ObservedTx.ProtoReflect()) + case "uexecutor.v1.OutboundTx.revert_instructions": + if x.RevertInstructions == nil { + x.RevertInstructions = new(RevertInstructions) + } + return protoreflect.ValueOfMessage(x.RevertInstructions.ProtoReflect()) + case "uexecutor.v1.OutboundTx.pc_revert_execution": + if x.PcRevertExecution == nil { + x.PcRevertExecution = new(PCTx) + } + return protoreflect.ValueOfMessage(x.PcRevertExecution.ProtoReflect()) + case "uexecutor.v1.OutboundTx.destination_chain": + panic(fmt.Errorf("field destination_chain of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.recipient": + panic(fmt.Errorf("field recipient of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.amount": + panic(fmt.Errorf("field amount of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.external_asset_addr": + panic(fmt.Errorf("field external_asset_addr of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.prc20_asset_addr": + panic(fmt.Errorf("field prc20_asset_addr of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.sender": + panic(fmt.Errorf("field sender of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.payload": + panic(fmt.Errorf("field payload of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.gas_limit": + panic(fmt.Errorf("field gas_limit of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.tx_type": + panic(fmt.Errorf("field tx_type of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.id": + panic(fmt.Errorf("field id of message uexecutor.v1.OutboundTx is not mutable")) + case "uexecutor.v1.OutboundTx.outbound_status": + panic(fmt.Errorf("field outbound_status of message uexecutor.v1.OutboundTx is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_OutboundTx) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "uexecutor.v1.OutboundTx.destination_chain": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OutboundTx.recipient": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OutboundTx.amount": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OutboundTx.external_asset_addr": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OutboundTx.prc20_asset_addr": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OutboundTx.sender": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OutboundTx.payload": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OutboundTx.gas_limit": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OutboundTx.tx_type": + return protoreflect.ValueOfEnum(0) + case "uexecutor.v1.OutboundTx.pc_tx": + m := new(OriginatingPcTx) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "uexecutor.v1.OutboundTx.observed_tx": + m := new(OutboundObservation) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "uexecutor.v1.OutboundTx.id": + return protoreflect.ValueOfString("") + case "uexecutor.v1.OutboundTx.outbound_status": + return protoreflect.ValueOfEnum(0) + case "uexecutor.v1.OutboundTx.revert_instructions": + m := new(RevertInstructions) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "uexecutor.v1.OutboundTx.pc_revert_execution": + m := new(PCTx) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: uexecutor.v1.OutboundTx")) + } + panic(fmt.Errorf("message uexecutor.v1.OutboundTx does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_OutboundTx) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in uexecutor.v1.OutboundTx", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_OutboundTx) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_OutboundTx) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_OutboundTx) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*OutboundTx) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + l = len(x.DestinationChain) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.Recipient) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.Amount) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.ExternalAssetAddr) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.Prc20AssetAddr) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.Sender) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.Payload) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.GasLimit) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.TxType != 0 { + n += 1 + runtime.Sov(uint64(x.TxType)) + } + if x.PcTx != nil { + l = options.Size(x.PcTx) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.ObservedTx != nil { + l = options.Size(x.ObservedTx) + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.Id) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.OutboundStatus != 0 { + n += 1 + runtime.Sov(uint64(x.OutboundStatus)) + } + if x.RevertInstructions != nil { + l = options.Size(x.RevertInstructions) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.PcRevertExecution != nil { + l = options.Size(x.PcRevertExecution) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*OutboundTx) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.PcRevertExecution != nil { + encoded, err := options.Marshal(x.PcRevertExecution) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x7a + } + if x.RevertInstructions != nil { + encoded, err := options.Marshal(x.RevertInstructions) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x72 + } + if x.OutboundStatus != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.OutboundStatus)) + i-- + dAtA[i] = 0x68 + } + if len(x.Id) > 0 { + i -= len(x.Id) + copy(dAtA[i:], x.Id) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Id))) + i-- + dAtA[i] = 0x62 + } + if x.ObservedTx != nil { + encoded, err := options.Marshal(x.ObservedTx) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x5a + } + if x.PcTx != nil { + encoded, err := options.Marshal(x.PcTx) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x52 + } + if x.TxType != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.TxType)) + i-- + dAtA[i] = 0x48 + } + if len(x.GasLimit) > 0 { + i -= len(x.GasLimit) + copy(dAtA[i:], x.GasLimit) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.GasLimit))) + i-- + dAtA[i] = 0x42 + } + if len(x.Payload) > 0 { + i -= len(x.Payload) + copy(dAtA[i:], x.Payload) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Payload))) + i-- + dAtA[i] = 0x3a + } + if len(x.Sender) > 0 { + i -= len(x.Sender) + copy(dAtA[i:], x.Sender) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Sender))) + i-- + dAtA[i] = 0x32 + } + if len(x.Prc20AssetAddr) > 0 { + i -= len(x.Prc20AssetAddr) + copy(dAtA[i:], x.Prc20AssetAddr) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Prc20AssetAddr))) + i-- + dAtA[i] = 0x2a + } + if len(x.ExternalAssetAddr) > 0 { + i -= len(x.ExternalAssetAddr) + copy(dAtA[i:], x.ExternalAssetAddr) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.ExternalAssetAddr))) + i-- + dAtA[i] = 0x22 + } + if len(x.Amount) > 0 { + i -= len(x.Amount) + copy(dAtA[i:], x.Amount) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Amount))) + i-- + dAtA[i] = 0x1a + } + if len(x.Recipient) > 0 { + i -= len(x.Recipient) + copy(dAtA[i:], x.Recipient) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Recipient))) + i-- + dAtA[i] = 0x12 + } + if len(x.DestinationChain) > 0 { + i -= len(x.DestinationChain) + copy(dAtA[i:], x.DestinationChain) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.DestinationChain))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { input.Buf = append(input.Buf, dAtA...) } else { input.Buf = dAtA @@ -5005,28 +6528,156 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { if shift >= 64 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow } - if iNdEx >= l { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: OutboundTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: OutboundTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field DestinationChain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.DestinationChain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Recipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Amount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ExternalAssetAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: OutboundTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: OutboundTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.ExternalAssetAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field DestinationChain", wireType) + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Prc20AssetAddr", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5054,11 +6705,11 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { if postIndex > l { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF } - x.DestinationChain = string(dAtA[iNdEx:postIndex]) + x.Prc20AssetAddr = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field TxHash", wireType) + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5086,11 +6737,11 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { if postIndex > l { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF } - x.TxHash = string(dAtA[iNdEx:postIndex]) + x.Sender = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 7: if wireType != 2 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5118,11 +6769,11 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { if postIndex > l { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF } - x.Recipient = string(dAtA[iNdEx:postIndex]) + x.Payload = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 8: if wireType != 2 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field GasLimit", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5150,11 +6801,102 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { if postIndex > l { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF } - x.Amount = string(dAtA[iNdEx:postIndex]) + x.GasLimit = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 9: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field TxType", wireType) + } + x.TxType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.TxType |= TxType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: if wireType != 2 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field AssetAddr", wireType) + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field PcTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.PcTx == nil { + x.PcTx = &OriginatingPcTx{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.PcTx); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ObservedTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.ObservedTx == nil { + x.ObservedTx = &OutboundObservation{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.ObservedTx); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5182,7 +6924,98 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { if postIndex > l { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF } - x.AssetAddr = string(dAtA[iNdEx:postIndex]) + x.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field OutboundStatus", wireType) + } + x.OutboundStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.OutboundStatus |= Status(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field RevertInstructions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.RevertInstructions == nil { + x.RevertInstructions = &RevertInstructions{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.RevertInstructions); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field PcRevertExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.PcRevertExecution == nil { + x.PcRevertExecution = &PCTx{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.PcRevertExecution); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5219,59 +7052,111 @@ func (x *fastReflection_OutboundTx) ProtoMethods() *protoiface.Methods { } } -var _ protoreflect.List = (*_UniversalTx_2_list)(nil) +var _ protoreflect.List = (*_UniversalTx_3_list)(nil) -type _UniversalTx_2_list struct { +type _UniversalTx_3_list struct { list *[]*PCTx } -func (x *_UniversalTx_2_list) Len() int { +func (x *_UniversalTx_3_list) Len() int { if x.list == nil { return 0 } return len(*x.list) } -func (x *_UniversalTx_2_list) Get(i int) protoreflect.Value { +func (x *_UniversalTx_3_list) Get(i int) protoreflect.Value { return protoreflect.ValueOfMessage((*x.list)[i].ProtoReflect()) } -func (x *_UniversalTx_2_list) Set(i int, value protoreflect.Value) { +func (x *_UniversalTx_3_list) Set(i int, value protoreflect.Value) { valueUnwrapped := value.Message() concreteValue := valueUnwrapped.Interface().(*PCTx) (*x.list)[i] = concreteValue } -func (x *_UniversalTx_2_list) Append(value protoreflect.Value) { +func (x *_UniversalTx_3_list) Append(value protoreflect.Value) { valueUnwrapped := value.Message() concreteValue := valueUnwrapped.Interface().(*PCTx) *x.list = append(*x.list, concreteValue) } -func (x *_UniversalTx_2_list) AppendMutable() protoreflect.Value { +func (x *_UniversalTx_3_list) AppendMutable() protoreflect.Value { v := new(PCTx) *x.list = append(*x.list, v) return protoreflect.ValueOfMessage(v.ProtoReflect()) } -func (x *_UniversalTx_2_list) Truncate(n int) { +func (x *_UniversalTx_3_list) Truncate(n int) { for i := n; i < len(*x.list); i++ { (*x.list)[i] = nil } *x.list = (*x.list)[:n] } -func (x *_UniversalTx_2_list) NewElement() protoreflect.Value { +func (x *_UniversalTx_3_list) NewElement() protoreflect.Value { v := new(PCTx) return protoreflect.ValueOfMessage(v.ProtoReflect()) } -func (x *_UniversalTx_2_list) IsValid() bool { +func (x *_UniversalTx_3_list) IsValid() bool { + return x.list != nil +} + +var _ protoreflect.List = (*_UniversalTx_4_list)(nil) + +type _UniversalTx_4_list struct { + list *[]*OutboundTx +} + +func (x *_UniversalTx_4_list) Len() int { + if x.list == nil { + return 0 + } + return len(*x.list) +} + +func (x *_UniversalTx_4_list) Get(i int) protoreflect.Value { + return protoreflect.ValueOfMessage((*x.list)[i].ProtoReflect()) +} + +func (x *_UniversalTx_4_list) Set(i int, value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*OutboundTx) + (*x.list)[i] = concreteValue +} + +func (x *_UniversalTx_4_list) Append(value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*OutboundTx) + *x.list = append(*x.list, concreteValue) +} + +func (x *_UniversalTx_4_list) AppendMutable() protoreflect.Value { + v := new(OutboundTx) + *x.list = append(*x.list, v) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_UniversalTx_4_list) Truncate(n int) { + for i := n; i < len(*x.list); i++ { + (*x.list)[i] = nil + } + *x.list = (*x.list)[:n] +} + +func (x *_UniversalTx_4_list) NewElement() protoreflect.Value { + v := new(OutboundTx) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_UniversalTx_4_list) IsValid() bool { return x.list != nil } var ( md_UniversalTx protoreflect.MessageDescriptor + fd_UniversalTx_id protoreflect.FieldDescriptor fd_UniversalTx_inbound_tx protoreflect.FieldDescriptor fd_UniversalTx_pc_tx protoreflect.FieldDescriptor fd_UniversalTx_outbound_tx protoreflect.FieldDescriptor @@ -5281,6 +7166,7 @@ var ( func init() { file_uexecutor_v1_types_proto_init() md_UniversalTx = File_uexecutor_v1_types_proto.Messages().ByName("UniversalTx") + fd_UniversalTx_id = md_UniversalTx.Fields().ByName("id") fd_UniversalTx_inbound_tx = md_UniversalTx.Fields().ByName("inbound_tx") fd_UniversalTx_pc_tx = md_UniversalTx.Fields().ByName("pc_tx") fd_UniversalTx_outbound_tx = md_UniversalTx.Fields().ByName("outbound_tx") @@ -5296,7 +7182,7 @@ func (x *UniversalTx) ProtoReflect() protoreflect.Message { } func (x *UniversalTx) slowProtoReflect() protoreflect.Message { - mi := &file_uexecutor_v1_types_proto_msgTypes[8] + mi := &file_uexecutor_v1_types_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5352,6 +7238,12 @@ func (x *fastReflection_UniversalTx) Interface() protoreflect.ProtoMessage { // While iterating, mutating operations may only be performed // on the current field descriptor. func (x *fastReflection_UniversalTx) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.Id != "" { + value := protoreflect.ValueOfString(x.Id) + if !f(fd_UniversalTx_id, value) { + return + } + } if x.InboundTx != nil { value := protoreflect.ValueOfMessage(x.InboundTx.ProtoReflect()) if !f(fd_UniversalTx_inbound_tx, value) { @@ -5359,13 +7251,13 @@ func (x *fastReflection_UniversalTx) Range(f func(protoreflect.FieldDescriptor, } } if len(x.PcTx) != 0 { - value := protoreflect.ValueOfList(&_UniversalTx_2_list{list: &x.PcTx}) + value := protoreflect.ValueOfList(&_UniversalTx_3_list{list: &x.PcTx}) if !f(fd_UniversalTx_pc_tx, value) { return } } - if x.OutboundTx != nil { - value := protoreflect.ValueOfMessage(x.OutboundTx.ProtoReflect()) + if len(x.OutboundTx) != 0 { + value := protoreflect.ValueOfList(&_UniversalTx_4_list{list: &x.OutboundTx}) if !f(fd_UniversalTx_outbound_tx, value) { return } @@ -5391,12 +7283,14 @@ func (x *fastReflection_UniversalTx) Range(f func(protoreflect.FieldDescriptor, // a repeated field is populated if it is non-empty. func (x *fastReflection_UniversalTx) Has(fd protoreflect.FieldDescriptor) bool { switch fd.FullName() { + case "uexecutor.v1.UniversalTx.id": + return x.Id != "" case "uexecutor.v1.UniversalTx.inbound_tx": return x.InboundTx != nil case "uexecutor.v1.UniversalTx.pc_tx": return len(x.PcTx) != 0 case "uexecutor.v1.UniversalTx.outbound_tx": - return x.OutboundTx != nil + return len(x.OutboundTx) != 0 case "uexecutor.v1.UniversalTx.universal_status": return x.UniversalStatus != 0 default: @@ -5415,6 +7309,8 @@ func (x *fastReflection_UniversalTx) Has(fd protoreflect.FieldDescriptor) bool { // Clear is a mutating operation and unsafe for concurrent use. func (x *fastReflection_UniversalTx) Clear(fd protoreflect.FieldDescriptor) { switch fd.FullName() { + case "uexecutor.v1.UniversalTx.id": + x.Id = "" case "uexecutor.v1.UniversalTx.inbound_tx": x.InboundTx = nil case "uexecutor.v1.UniversalTx.pc_tx": @@ -5439,18 +7335,24 @@ func (x *fastReflection_UniversalTx) Clear(fd protoreflect.FieldDescriptor) { // of the value; to obtain a mutable reference, use Mutable. func (x *fastReflection_UniversalTx) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { switch descriptor.FullName() { + case "uexecutor.v1.UniversalTx.id": + value := x.Id + return protoreflect.ValueOfString(value) case "uexecutor.v1.UniversalTx.inbound_tx": value := x.InboundTx return protoreflect.ValueOfMessage(value.ProtoReflect()) case "uexecutor.v1.UniversalTx.pc_tx": if len(x.PcTx) == 0 { - return protoreflect.ValueOfList(&_UniversalTx_2_list{}) + return protoreflect.ValueOfList(&_UniversalTx_3_list{}) } - listValue := &_UniversalTx_2_list{list: &x.PcTx} + listValue := &_UniversalTx_3_list{list: &x.PcTx} return protoreflect.ValueOfList(listValue) case "uexecutor.v1.UniversalTx.outbound_tx": - value := x.OutboundTx - return protoreflect.ValueOfMessage(value.ProtoReflect()) + if len(x.OutboundTx) == 0 { + return protoreflect.ValueOfList(&_UniversalTx_4_list{}) + } + listValue := &_UniversalTx_4_list{list: &x.OutboundTx} + return protoreflect.ValueOfList(listValue) case "uexecutor.v1.UniversalTx.universal_status": value := x.UniversalStatus return protoreflect.ValueOfEnum((protoreflect.EnumNumber)(value)) @@ -5474,14 +7376,18 @@ func (x *fastReflection_UniversalTx) Get(descriptor protoreflect.FieldDescriptor // Set is a mutating operation and unsafe for concurrent use. func (x *fastReflection_UniversalTx) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { switch fd.FullName() { + case "uexecutor.v1.UniversalTx.id": + x.Id = value.Interface().(string) case "uexecutor.v1.UniversalTx.inbound_tx": x.InboundTx = value.Message().Interface().(*Inbound) case "uexecutor.v1.UniversalTx.pc_tx": lv := value.List() - clv := lv.(*_UniversalTx_2_list) + clv := lv.(*_UniversalTx_3_list) x.PcTx = *clv.list case "uexecutor.v1.UniversalTx.outbound_tx": - x.OutboundTx = value.Message().Interface().(*OutboundTx) + lv := value.List() + clv := lv.(*_UniversalTx_4_list) + x.OutboundTx = *clv.list case "uexecutor.v1.UniversalTx.universal_status": x.UniversalStatus = (UniversalTxStatus)(value.Enum()) default: @@ -5513,13 +7419,16 @@ func (x *fastReflection_UniversalTx) Mutable(fd protoreflect.FieldDescriptor) pr if x.PcTx == nil { x.PcTx = []*PCTx{} } - value := &_UniversalTx_2_list{list: &x.PcTx} + value := &_UniversalTx_3_list{list: &x.PcTx} return protoreflect.ValueOfList(value) case "uexecutor.v1.UniversalTx.outbound_tx": if x.OutboundTx == nil { - x.OutboundTx = new(OutboundTx) + x.OutboundTx = []*OutboundTx{} } - return protoreflect.ValueOfMessage(x.OutboundTx.ProtoReflect()) + value := &_UniversalTx_4_list{list: &x.OutboundTx} + return protoreflect.ValueOfList(value) + case "uexecutor.v1.UniversalTx.id": + panic(fmt.Errorf("field id of message uexecutor.v1.UniversalTx is not mutable")) case "uexecutor.v1.UniversalTx.universal_status": panic(fmt.Errorf("field universal_status of message uexecutor.v1.UniversalTx is not mutable")) default: @@ -5535,15 +7444,17 @@ func (x *fastReflection_UniversalTx) Mutable(fd protoreflect.FieldDescriptor) pr // For lists, maps, and messages, this returns a new, empty, mutable value. func (x *fastReflection_UniversalTx) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { switch fd.FullName() { + case "uexecutor.v1.UniversalTx.id": + return protoreflect.ValueOfString("") case "uexecutor.v1.UniversalTx.inbound_tx": m := new(Inbound) return protoreflect.ValueOfMessage(m.ProtoReflect()) case "uexecutor.v1.UniversalTx.pc_tx": list := []*PCTx{} - return protoreflect.ValueOfList(&_UniversalTx_2_list{list: &list}) + return protoreflect.ValueOfList(&_UniversalTx_3_list{list: &list}) case "uexecutor.v1.UniversalTx.outbound_tx": - m := new(OutboundTx) - return protoreflect.ValueOfMessage(m.ProtoReflect()) + list := []*OutboundTx{} + return protoreflect.ValueOfList(&_UniversalTx_4_list{list: &list}) case "uexecutor.v1.UniversalTx.universal_status": return protoreflect.ValueOfEnum(0) default: @@ -5615,6 +7526,10 @@ func (x *fastReflection_UniversalTx) ProtoMethods() *protoiface.Methods { var n int var l int _ = l + l = len(x.Id) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } if x.InboundTx != nil { l = options.Size(x.InboundTx) n += 1 + l + runtime.Sov(uint64(l)) @@ -5625,9 +7540,11 @@ func (x *fastReflection_UniversalTx) ProtoMethods() *protoiface.Methods { n += 1 + l + runtime.Sov(uint64(l)) } } - if x.OutboundTx != nil { - l = options.Size(x.OutboundTx) - n += 1 + l + runtime.Sov(uint64(l)) + if len(x.OutboundTx) > 0 { + for _, e := range x.OutboundTx { + l = options.Size(e) + n += 1 + l + runtime.Sov(uint64(l)) + } } if x.UniversalStatus != 0 { n += 1 + runtime.Sov(uint64(x.UniversalStatus)) @@ -5664,21 +7581,23 @@ func (x *fastReflection_UniversalTx) ProtoMethods() *protoiface.Methods { if x.UniversalStatus != 0 { i = runtime.EncodeVarint(dAtA, i, uint64(x.UniversalStatus)) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x28 } - if x.OutboundTx != nil { - encoded, err := options.Marshal(x.OutboundTx) - if err != nil { - return protoiface.MarshalOutput{ - NoUnkeyedLiterals: input.NoUnkeyedLiterals, - Buf: input.Buf, - }, err + if len(x.OutboundTx) > 0 { + for iNdEx := len(x.OutboundTx) - 1; iNdEx >= 0; iNdEx-- { + encoded, err := options.Marshal(x.OutboundTx[iNdEx]) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x22 } - i -= len(encoded) - copy(dAtA[i:], encoded) - i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) - i-- - dAtA[i] = 0x1a } if len(x.PcTx) > 0 { for iNdEx := len(x.PcTx) - 1; iNdEx >= 0; iNdEx-- { @@ -5693,7 +7612,7 @@ func (x *fastReflection_UniversalTx) ProtoMethods() *protoiface.Methods { copy(dAtA[i:], encoded) i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } } if x.InboundTx != nil { @@ -5708,6 +7627,13 @@ func (x *fastReflection_UniversalTx) ProtoMethods() *protoiface.Methods { copy(dAtA[i:], encoded) i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) i-- + dAtA[i] = 0x12 + } + if len(x.Id) > 0 { + i -= len(x.Id) + copy(dAtA[i:], x.Id) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Id))) + i-- dAtA[i] = 0xa } if input.Buf != nil { @@ -5760,6 +7686,38 @@ func (x *fastReflection_UniversalTx) ProtoMethods() *protoiface.Methods { } switch fieldNum { case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field InboundTx", wireType) } @@ -5795,7 +7753,7 @@ func (x *fastReflection_UniversalTx) ProtoMethods() *protoiface.Methods { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field PcTx", wireType) } @@ -5829,7 +7787,7 @@ func (x *fastReflection_UniversalTx) ProtoMethods() *protoiface.Methods { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field OutboundTx", wireType) } @@ -5858,14 +7816,12 @@ func (x *fastReflection_UniversalTx) ProtoMethods() *protoiface.Methods { if postIndex > l { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF } - if x.OutboundTx == nil { - x.OutboundTx = &OutboundTx{} - } - if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.OutboundTx); err != nil { + x.OutboundTx = append(x.OutboundTx, &OutboundTx{}) + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.OutboundTx[len(x.OutboundTx)-1]); err != nil { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err } iNdEx = postIndex - case 4: + case 5: if wireType != 0 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field UniversalStatus", wireType) } @@ -6054,7 +8010,8 @@ type Status int32 const ( Status_UNSPECIFIED Status = 0 Status_PENDING Status = 1 - Status_FINALIZED Status = 2 + Status_OBSERVED Status = 2 + Status_REVERTED Status = 3 ) // Enum value maps for Status. @@ -6062,12 +8019,14 @@ var ( Status_name = map[int32]string{ 0: "UNSPECIFIED", 1: "PENDING", - 2: "FINALIZED", + 2: "OBSERVED", + 3: "REVERTED", } Status_value = map[string]int32{ "UNSPECIFIED": 0, "PENDING": 1, - "FINALIZED": 2, + "OBSERVED": 2, + "REVERTED": 3, } ) @@ -6098,58 +8057,64 @@ func (Status) EnumDescriptor() ([]byte, []int) { return file_uexecutor_v1_types_proto_rawDescGZIP(), []int{2} } -type InboundTxType int32 +type TxType int32 const ( - InboundTxType_UNSPECIFIED_TX InboundTxType = 0 - InboundTxType_GAS InboundTxType = 1 // fee abstraction - InboundTxType_FUNDS InboundTxType = 2 // synthetic - InboundTxType_FUNDS_AND_PAYLOAD InboundTxType = 3 // synthetic + payload exec - InboundTxType_GAS_AND_PAYLOAD InboundTxType = 4 // fee abstraction + payload exec + TxType_UNSPECIFIED_TX TxType = 0 + TxType_GAS TxType = 1 + TxType_GAS_AND_PAYLOAD TxType = 2 + TxType_FUNDS TxType = 3 + TxType_FUNDS_AND_PAYLOAD TxType = 4 + TxType_PAYLOAD TxType = 5 + TxType_INBOUND_REVERT TxType = 6 ) -// Enum value maps for InboundTxType. +// Enum value maps for TxType. var ( - InboundTxType_name = map[int32]string{ + TxType_name = map[int32]string{ 0: "UNSPECIFIED_TX", 1: "GAS", - 2: "FUNDS", - 3: "FUNDS_AND_PAYLOAD", - 4: "GAS_AND_PAYLOAD", + 2: "GAS_AND_PAYLOAD", + 3: "FUNDS", + 4: "FUNDS_AND_PAYLOAD", + 5: "PAYLOAD", + 6: "INBOUND_REVERT", } - InboundTxType_value = map[string]int32{ + TxType_value = map[string]int32{ "UNSPECIFIED_TX": 0, "GAS": 1, - "FUNDS": 2, - "FUNDS_AND_PAYLOAD": 3, - "GAS_AND_PAYLOAD": 4, + "GAS_AND_PAYLOAD": 2, + "FUNDS": 3, + "FUNDS_AND_PAYLOAD": 4, + "PAYLOAD": 5, + "INBOUND_REVERT": 6, } ) -func (x InboundTxType) Enum() *InboundTxType { - p := new(InboundTxType) +func (x TxType) Enum() *TxType { + p := new(TxType) *p = x return p } -func (x InboundTxType) String() string { +func (x TxType) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (InboundTxType) Descriptor() protoreflect.EnumDescriptor { +func (TxType) Descriptor() protoreflect.EnumDescriptor { return file_uexecutor_v1_types_proto_enumTypes[3].Descriptor() } -func (InboundTxType) Type() protoreflect.EnumType { +func (TxType) Type() protoreflect.EnumType { return &file_uexecutor_v1_types_proto_enumTypes[3] } -func (x InboundTxType) Number() protoreflect.EnumNumber { +func (x TxType) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use InboundTxType.Descriptor instead. -func (InboundTxType) EnumDescriptor() ([]byte, []int) { +// Deprecated: Use TxType.Descriptor instead. +func (TxType) EnumDescriptor() ([]byte, []int) { return file_uexecutor_v1_types_proto_rawDescGZIP(), []int{3} } @@ -6393,16 +8358,16 @@ func (x *UniversalAccountId) GetOwner() string { return "" } -type InboundStatus struct { +type RevertInstructions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=uexecutor.v1.Status" json:"status,omitempty"` + FundRecipient string `protobuf:"bytes,1,opt,name=fund_recipient,json=fundRecipient,proto3" json:"fund_recipient,omitempty"` // where funds go in revert/refund } -func (x *InboundStatus) Reset() { - *x = InboundStatus{} +func (x *RevertInstructions) Reset() { + *x = RevertInstructions{} if protoimpl.UnsafeEnabled { mi := &file_uexecutor_v1_types_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -6410,22 +8375,22 @@ func (x *InboundStatus) Reset() { } } -func (x *InboundStatus) String() string { +func (x *RevertInstructions) String() string { return protoimpl.X.MessageStringOf(x) } -func (*InboundStatus) ProtoMessage() {} +func (*RevertInstructions) ProtoMessage() {} -// Deprecated: Use InboundStatus.ProtoReflect.Descriptor instead. -func (*InboundStatus) Descriptor() ([]byte, []int) { +// Deprecated: Use RevertInstructions.ProtoReflect.Descriptor instead. +func (*RevertInstructions) Descriptor() ([]byte, []int) { return file_uexecutor_v1_types_proto_rawDescGZIP(), []int{4} } -func (x *InboundStatus) GetStatus() Status { +func (x *RevertInstructions) GetFundRecipient() string { if x != nil { - return x.Status + return x.FundRecipient } - return Status_UNSPECIFIED + return "" } type Inbound struct { @@ -6433,16 +8398,17 @@ type Inbound struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SourceChain string `protobuf:"bytes,1,opt,name=source_chain,json=sourceChain,proto3" json:"source_chain,omitempty"` // origin chain caip2 id (e.g. eip155:11155111) - TxHash string `protobuf:"bytes,2,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` // unique tx hash / identifier from source chain - Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` // sender address on source chain - Recipient string `protobuf:"bytes,4,opt,name=recipient,proto3" json:"recipient,omitempty"` // recipient address on destination chain - Amount string `protobuf:"bytes,5,opt,name=amount,proto3" json:"amount,omitempty"` // synthetic token amount bridged in - AssetAddr string `protobuf:"bytes,6,opt,name=asset_addr,json=assetAddr,proto3" json:"asset_addr,omitempty"` // address of erc20 token address on source chain - LogIndex string `protobuf:"bytes,7,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` // log index that originated the cross chain tx - TxType InboundTxType `protobuf:"varint,8,opt,name=tx_type,json=txType,proto3,enum=uexecutor.v1.InboundTxType" json:"tx_type,omitempty"` // inbound tx type - UniversalPayload *UniversalPayload `protobuf:"bytes,9,opt,name=universal_payload,json=universalPayload,proto3" json:"universal_payload,omitempty"` // payload is the universal payload to be executed - VerificationData string `protobuf:"bytes,10,opt,name=verification_data,json=verificationData,proto3" json:"verification_data,omitempty"` // verification_data is the bytes passed as verifier data for the given payload. + SourceChain string `protobuf:"bytes,1,opt,name=source_chain,json=sourceChain,proto3" json:"source_chain,omitempty"` // origin chain caip2 id (e.g. eip155:11155111) + TxHash string `protobuf:"bytes,2,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` // unique tx hash / identifier from source chain + Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` // sender address on source chain + Recipient string `protobuf:"bytes,4,opt,name=recipient,proto3" json:"recipient,omitempty"` // recipient address on destination chain + Amount string `protobuf:"bytes,5,opt,name=amount,proto3" json:"amount,omitempty"` // synthetic token amount bridged in + AssetAddr string `protobuf:"bytes,6,opt,name=asset_addr,json=assetAddr,proto3" json:"asset_addr,omitempty"` // address of erc20 token address on source chain + LogIndex string `protobuf:"bytes,7,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` // log index that originated the cross chain tx + TxType TxType `protobuf:"varint,8,opt,name=tx_type,json=txType,proto3,enum=uexecutor.v1.TxType" json:"tx_type,omitempty"` // inbound tx type + UniversalPayload *UniversalPayload `protobuf:"bytes,9,opt,name=universal_payload,json=universalPayload,proto3" json:"universal_payload,omitempty"` // payload is the universal payload to be executed + VerificationData string `protobuf:"bytes,10,opt,name=verification_data,json=verificationData,proto3" json:"verification_data,omitempty"` // verification_data is the bytes passed as verifier data for the given payload. + RevertInstructions *RevertInstructions `protobuf:"bytes,11,opt,name=revert_instructions,json=revertInstructions,proto3" json:"revert_instructions,omitempty"` // revert config } func (x *Inbound) Reset() { @@ -6514,11 +8480,11 @@ func (x *Inbound) GetLogIndex() string { return "" } -func (x *Inbound) GetTxType() InboundTxType { +func (x *Inbound) GetTxType() TxType { if x != nil { return x.TxType } - return InboundTxType_UNSPECIFIED_TX + return TxType_UNSPECIFIED_TX } func (x *Inbound) GetUniversalPayload() *UniversalPayload { @@ -6535,6 +8501,13 @@ func (x *Inbound) GetVerificationData() string { return "" } +func (x *Inbound) GetRevertInstructions() *RevertInstructions { + if x != nil { + return x.RevertInstructions + } + return nil +} + type PCTx struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -6610,22 +8583,134 @@ func (x *PCTx) GetErrorMsg() string { return "" } +type OutboundObservation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` // whether execution succeeded + BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` // block height on external chain + TxHash string `protobuf:"bytes,3,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` // external chain tx hash + ErrorMsg string `protobuf:"bytes,4,opt,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"` +} + +func (x *OutboundObservation) Reset() { + *x = OutboundObservation{} + if protoimpl.UnsafeEnabled { + mi := &file_uexecutor_v1_types_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutboundObservation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutboundObservation) ProtoMessage() {} + +// Deprecated: Use OutboundObservation.ProtoReflect.Descriptor instead. +func (*OutboundObservation) Descriptor() ([]byte, []int) { + return file_uexecutor_v1_types_proto_rawDescGZIP(), []int{7} +} + +func (x *OutboundObservation) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *OutboundObservation) GetBlockHeight() uint64 { + if x != nil { + return x.BlockHeight + } + return 0 +} + +func (x *OutboundObservation) GetTxHash() string { + if x != nil { + return x.TxHash + } + return "" +} + +func (x *OutboundObservation) GetErrorMsg() string { + if x != nil { + return x.ErrorMsg + } + return "" +} + +type OriginatingPcTx struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TxHash string `protobuf:"bytes,1,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` // pc_tx hash that initiated the outbound + LogIndex string `protobuf:"bytes,2,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` // log_index that initiated the outbound +} + +func (x *OriginatingPcTx) Reset() { + *x = OriginatingPcTx{} + if protoimpl.UnsafeEnabled { + mi := &file_uexecutor_v1_types_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OriginatingPcTx) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OriginatingPcTx) ProtoMessage() {} + +// Deprecated: Use OriginatingPcTx.ProtoReflect.Descriptor instead. +func (*OriginatingPcTx) Descriptor() ([]byte, []int) { + return file_uexecutor_v1_types_proto_rawDescGZIP(), []int{8} +} + +func (x *OriginatingPcTx) GetTxHash() string { + if x != nil { + return x.TxHash + } + return "" +} + +func (x *OriginatingPcTx) GetLogIndex() string { + if x != nil { + return x.LogIndex + } + return "" +} + type OutboundTx struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DestinationChain string `protobuf:"bytes,1,opt,name=destination_chain,json=destinationChain,proto3" json:"destination_chain,omitempty"` // chain where this outbound is sent - TxHash string `protobuf:"bytes,2,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` // outbound tx hash on destination chain - Recipient string `protobuf:"bytes,3,opt,name=recipient,proto3" json:"recipient,omitempty"` // recipient on destination chain - Amount string `protobuf:"bytes,4,opt,name=amount,proto3" json:"amount,omitempty"` // token amount or payload - AssetAddr string `protobuf:"bytes,5,opt,name=asset_addr,json=assetAddr,proto3" json:"asset_addr,omitempty"` // token contract if applicable + DestinationChain string `protobuf:"bytes,1,opt,name=destination_chain,json=destinationChain,proto3" json:"destination_chain,omitempty"` // chain where this outbound is sent + Recipient string `protobuf:"bytes,2,opt,name=recipient,proto3" json:"recipient,omitempty"` // recipient on destination chain + Amount string `protobuf:"bytes,3,opt,name=amount,proto3" json:"amount,omitempty"` // token amount + ExternalAssetAddr string `protobuf:"bytes,4,opt,name=external_asset_addr,json=externalAssetAddr,proto3" json:"external_asset_addr,omitempty"` // asset addr destination chain + Prc20AssetAddr string `protobuf:"bytes,5,opt,name=prc20_asset_addr,json=prc20AssetAddr,proto3" json:"prc20_asset_addr,omitempty"` // prc20 contract addr + Sender string `protobuf:"bytes,6,opt,name=sender,proto3" json:"sender,omitempty"` // sender of the outbound tx + Payload string `protobuf:"bytes,7,opt,name=payload,proto3" json:"payload,omitempty"` // payload to be executed + GasLimit string `protobuf:"bytes,8,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` // gas limit to be used for the outbound tx + TxType TxType `protobuf:"varint,9,opt,name=tx_type,json=txType,proto3,enum=uexecutor.v1.TxType" json:"tx_type,omitempty"` // outbound tx type + PcTx *OriginatingPcTx `protobuf:"bytes,10,opt,name=pc_tx,json=pcTx,proto3" json:"pc_tx,omitempty"` // pc_tx that originated the outbound + ObservedTx *OutboundObservation `protobuf:"bytes,11,opt,name=observed_tx,json=observedTx,proto3" json:"observed_tx,omitempty"` // observed tx on destination chain + Id string `protobuf:"bytes,12,opt,name=id,proto3" json:"id,omitempty"` // id of outbound tx + OutboundStatus Status `protobuf:"varint,13,opt,name=outbound_status,json=outboundStatus,proto3,enum=uexecutor.v1.Status" json:"outbound_status,omitempty"` // status of outbound tx + RevertInstructions *RevertInstructions `protobuf:"bytes,14,opt,name=revert_instructions,json=revertInstructions,proto3" json:"revert_instructions,omitempty"` + PcRevertExecution *PCTx `protobuf:"bytes,15,opt,name=pc_revert_execution,json=pcRevertExecution,proto3" json:"pc_revert_execution,omitempty"` } func (x *OutboundTx) Reset() { *x = OutboundTx{} if protoimpl.UnsafeEnabled { - mi := &file_uexecutor_v1_types_proto_msgTypes[7] + mi := &file_uexecutor_v1_types_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6639,7 +8724,7 @@ func (*OutboundTx) ProtoMessage() {} // Deprecated: Use OutboundTx.ProtoReflect.Descriptor instead. func (*OutboundTx) Descriptor() ([]byte, []int) { - return file_uexecutor_v1_types_proto_rawDescGZIP(), []int{7} + return file_uexecutor_v1_types_proto_rawDescGZIP(), []int{9} } func (x *OutboundTx) GetDestinationChain() string { @@ -6649,49 +8734,120 @@ func (x *OutboundTx) GetDestinationChain() string { return "" } -func (x *OutboundTx) GetTxHash() string { +func (x *OutboundTx) GetRecipient() string { if x != nil { - return x.TxHash + return x.Recipient } return "" } -func (x *OutboundTx) GetRecipient() string { +func (x *OutboundTx) GetAmount() string { if x != nil { - return x.Recipient + return x.Amount } return "" } -func (x *OutboundTx) GetAmount() string { +func (x *OutboundTx) GetExternalAssetAddr() string { if x != nil { - return x.Amount + return x.ExternalAssetAddr } return "" } -func (x *OutboundTx) GetAssetAddr() string { +func (x *OutboundTx) GetPrc20AssetAddr() string { if x != nil { - return x.AssetAddr + return x.Prc20AssetAddr + } + return "" +} + +func (x *OutboundTx) GetSender() string { + if x != nil { + return x.Sender + } + return "" +} + +func (x *OutboundTx) GetPayload() string { + if x != nil { + return x.Payload + } + return "" +} + +func (x *OutboundTx) GetGasLimit() string { + if x != nil { + return x.GasLimit + } + return "" +} + +func (x *OutboundTx) GetTxType() TxType { + if x != nil { + return x.TxType + } + return TxType_UNSPECIFIED_TX +} + +func (x *OutboundTx) GetPcTx() *OriginatingPcTx { + if x != nil { + return x.PcTx + } + return nil +} + +func (x *OutboundTx) GetObservedTx() *OutboundObservation { + if x != nil { + return x.ObservedTx + } + return nil +} + +func (x *OutboundTx) GetId() string { + if x != nil { + return x.Id } return "" } +func (x *OutboundTx) GetOutboundStatus() Status { + if x != nil { + return x.OutboundStatus + } + return Status_UNSPECIFIED +} + +func (x *OutboundTx) GetRevertInstructions() *RevertInstructions { + if x != nil { + return x.RevertInstructions + } + return nil +} + +func (x *OutboundTx) GetPcRevertExecution() *PCTx { + if x != nil { + return x.PcRevertExecution + } + return nil +} + type UniversalTx struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - InboundTx *Inbound `protobuf:"bytes,1,opt,name=inbound_tx,json=inboundTx,proto3" json:"inbound_tx,omitempty"` // Full inbound tx data - PcTx []*PCTx `protobuf:"bytes,2,rep,name=pc_tx,json=pcTx,proto3" json:"pc_tx,omitempty"` // Execution details on Push Chain - OutboundTx *OutboundTx `protobuf:"bytes,3,opt,name=outbound_tx,json=outboundTx,proto3" json:"outbound_tx,omitempty"` // Outbound tx triggered by this tx - UniversalStatus UniversalTxStatus `protobuf:"varint,4,opt,name=universal_status,json=universalStatus,proto3,enum=uexecutor.v1.UniversalTxStatus" json:"universal_status,omitempty"` // Current status + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + InboundTx *Inbound `protobuf:"bytes,2,opt,name=inbound_tx,json=inboundTx,proto3" json:"inbound_tx,omitempty"` // Full inbound tx data + PcTx []*PCTx `protobuf:"bytes,3,rep,name=pc_tx,json=pcTx,proto3" json:"pc_tx,omitempty"` // Execution details on Push Chain + OutboundTx []*OutboundTx `protobuf:"bytes,4,rep,name=outbound_tx,json=outboundTx,proto3" json:"outbound_tx,omitempty"` // Outbound tx triggered by this tx + UniversalStatus UniversalTxStatus `protobuf:"varint,5,opt,name=universal_status,json=universalStatus,proto3,enum=uexecutor.v1.UniversalTxStatus" json:"universal_status,omitempty"` // Current status } func (x *UniversalTx) Reset() { *x = UniversalTx{} if protoimpl.UnsafeEnabled { - mi := &file_uexecutor_v1_types_proto_msgTypes[8] + mi := &file_uexecutor_v1_types_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6705,7 +8861,14 @@ func (*UniversalTx) ProtoMessage() {} // Deprecated: Use UniversalTx.ProtoReflect.Descriptor instead. func (*UniversalTx) Descriptor() ([]byte, []int) { - return file_uexecutor_v1_types_proto_rawDescGZIP(), []int{8} + return file_uexecutor_v1_types_proto_rawDescGZIP(), []int{10} +} + +func (x *UniversalTx) GetId() string { + if x != nil { + return x.Id + } + return "" } func (x *UniversalTx) GetInboundTx() *Inbound { @@ -6722,7 +8885,7 @@ func (x *UniversalTx) GetPcTx() []*PCTx { return nil } -func (x *UniversalTx) GetOutboundTx() *OutboundTx { +func (x *UniversalTx) GetOutboundTx() []*OutboundTx { if x != nil { return x.OutboundTx } @@ -6789,122 +8952,183 @@ var file_uexecutor_v1_types_proto_rawDesc = []byte{ 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x3a, 0x28, 0x98, 0xa0, 0x1f, 0x00, 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, 0xb0, 0x2a, 0x1b, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x22, 0x3d, 0x0a, 0x0d, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x9f, 0x03, 0x0a, 0x07, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, - 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, - 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, - 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, - 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x73, 0x73, 0x65, - 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, - 0x65, 0x78, 0x12, 0x34, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x78, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x06, 0x74, 0x78, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x75, 0x6e, 0x69, 0x76, - 0x65, 0x72, 0x73, 0x61, 0x6c, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x52, 0x10, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x50, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, - 0x74, 0x61, 0x3a, 0x1e, 0x98, 0xa0, 0x1f, 0x00, 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, 0xb0, 0x2a, - 0x11, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x62, 0x6f, 0x75, - 0x6e, 0x64, 0x22, 0xc8, 0x01, 0x0a, 0x04, 0x50, 0x43, 0x54, 0x78, 0x12, 0x17, 0x0a, 0x07, 0x74, - 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x78, - 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, - 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, - 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x3a, - 0x1c, 0x98, 0xa0, 0x1f, 0x00, 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, 0xb0, 0x2a, 0x0f, 0x75, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x70, 0x63, 0x5f, 0x74, 0x78, 0x22, 0xcb, 0x01, - 0x0a, 0x0a, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x78, 0x12, 0x2b, 0x0a, 0x11, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, - 0x73, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, - 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x73, 0x73, 0x65, - 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x73, - 0x73, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x3a, 0x22, 0x98, 0xa0, 0x1f, 0x00, 0xe8, 0xa0, 0x1f, - 0x01, 0x8a, 0xe7, 0xb0, 0x2a, 0x15, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, - 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x74, 0x78, 0x22, 0x98, 0x02, 0x0a, 0x0b, - 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x54, 0x78, 0x12, 0x34, 0x0a, 0x0a, 0x69, - 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x49, - 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x09, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, - 0x78, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x63, 0x5f, 0x74, 0x78, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x43, 0x54, 0x78, 0x52, 0x04, 0x70, 0x63, 0x54, 0x78, 0x12, 0x39, 0x0a, 0x0b, 0x6f, 0x75, - 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x74, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, - 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x78, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x62, 0x6f, - 0x75, 0x6e, 0x64, 0x54, 0x78, 0x12, 0x4a, 0x0a, 0x10, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, - 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x1f, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x55, - 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x54, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x0f, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x3a, 0x23, 0x98, 0xa0, 0x1f, 0x00, 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, 0xb0, 0x2a, 0x16, - 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, - 0x73, 0x61, 0x6c, 0x5f, 0x74, 0x78, 0x2a, 0x47, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x73, 0x69, - 0x67, 0x6e, 0x65, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x54, - 0x78, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x10, 0x01, 0x2a, - 0x83, 0x02, 0x0a, 0x11, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x54, 0x78, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x1f, 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, - 0x41, 0x4c, 0x5f, 0x54, 0x58, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x49, 0x4e, - 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, - 0x1d, 0x0a, 0x19, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x49, 0x4e, 0x42, 0x4f, 0x55, - 0x4e, 0x44, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x17, - 0x0a, 0x13, 0x50, 0x43, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x55, - 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x43, 0x5f, 0x45, 0x58, - 0x45, 0x43, 0x55, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, - 0x15, 0x0a, 0x11, 0x50, 0x43, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x52, 0x45, - 0x56, 0x45, 0x52, 0x54, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, - 0x4e, 0x44, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, - 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, - 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x46, - 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, - 0x4c, 0x45, 0x44, 0x10, 0x09, 0x2a, 0x35, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, - 0x09, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x63, 0x0a, 0x0d, - 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x78, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, - 0x0e, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x5f, 0x54, 0x58, 0x10, - 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x41, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x55, - 0x4e, 0x44, 0x53, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x46, 0x55, 0x4e, 0x44, 0x53, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x50, 0x41, 0x59, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, - 0x47, 0x41, 0x53, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x50, 0x41, 0x59, 0x4c, 0x4f, 0x41, 0x44, 0x10, - 0x04, 0x42, 0xb2, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x70, 0x75, 0x73, 0x68, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2f, 0x70, 0x75, 0x73, 0x68, 0x2d, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x75, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x75, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x6f, 0x72, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x55, 0x58, 0x58, 0xaa, 0x02, 0x0c, - 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0c, 0x55, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x18, 0x55, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x22, 0x63, 0x0a, 0x12, 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x75, 0x6e, 0x64, 0x5f, + 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x66, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x3a, 0x26, + 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, 0xb0, 0x2a, 0x1d, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x2f, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xeb, 0x03, 0x0a, 0x07, 0x49, 0x6e, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, + 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x63, 0x69, 0x70, + 0x69, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x73, 0x73, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x6c, + 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2d, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x75, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x78, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x06, 0x74, 0x78, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x75, 0x6e, 0x69, 0x76, 0x65, + 0x72, 0x73, 0x61, 0x6c, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x50, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x10, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x50, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x73, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x12, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x1e, 0x98, 0xa0, 0x1f, 0x00, 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, + 0xb0, 0x2a, 0x11, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x22, 0xc8, 0x01, 0x0a, 0x04, 0x50, 0x43, 0x54, 0x78, 0x12, 0x17, 0x0a, + 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x19, + 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, + 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, + 0x67, 0x3a, 0x1c, 0x98, 0xa0, 0x1f, 0x00, 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, 0xb0, 0x2a, 0x0f, + 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x70, 0x63, 0x5f, 0x74, 0x78, 0x22, + 0xb1, 0x01, 0x0a, 0x13, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4f, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b, 0x0a, + 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x3a, 0x27, 0xe8, 0xa0, 0x1f, 0x01, + 0x8a, 0xe7, 0xb0, 0x2a, 0x1e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x6f, + 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x6d, 0x0a, 0x0f, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6e, 0x67, 0x50, 0x63, 0x54, 0x78, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, + 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x24, 0xe8, 0xa0, + 0x1f, 0x01, 0x8a, 0xe7, 0xb0, 0x2a, 0x1b, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, + 0x2f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x63, 0x5f, + 0x74, 0x78, 0x22, 0xc9, 0x05, 0x0a, 0x0a, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, + 0x78, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x1c, + 0x0a, 0x09, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x5f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x11, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x73, 0x73, 0x65, 0x74, + 0x41, 0x64, 0x64, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x72, 0x63, 0x32, 0x30, 0x5f, 0x61, 0x73, + 0x73, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x70, 0x72, 0x63, 0x32, 0x30, 0x41, 0x73, 0x73, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x2d, 0x0a, + 0x07, 0x74, 0x78, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, + 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x78, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x78, 0x54, 0x79, 0x70, 0x65, 0x12, 0x32, 0x0a, 0x05, + 0x70, 0x63, 0x5f, 0x74, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x75, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x63, 0x54, 0x78, 0x52, 0x04, 0x70, 0x63, 0x54, 0x78, + 0x12, 0x42, 0x0a, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x78, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4f, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x54, 0x78, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x3d, 0x0a, 0x0f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, + 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x0e, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x69, 0x6e, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x12, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x13, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x76, + 0x65, 0x72, 0x74, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x43, 0x54, 0x78, 0x52, 0x11, 0x70, 0x63, 0x52, 0x65, 0x76, 0x65, 0x72, + 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x22, 0x98, 0xa0, 0x1f, 0x00, + 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, 0xb0, 0x2a, 0x15, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x2f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x74, 0x78, 0x22, 0xa8, + 0x02, 0x0a, 0x0b, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x54, 0x78, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x34, + 0x0a, 0x0a, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x74, 0x78, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x09, 0x69, 0x6e, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x54, 0x78, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x63, 0x5f, 0x74, 0x78, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x43, 0x54, 0x78, 0x52, 0x04, 0x70, 0x63, 0x54, 0x78, 0x12, 0x39, 0x0a, + 0x0b, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x74, 0x78, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x78, 0x52, 0x0a, 0x6f, 0x75, + 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x78, 0x12, 0x4a, 0x0a, 0x10, 0x75, 0x6e, 0x69, 0x76, + 0x65, 0x72, 0x73, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x54, 0x78, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x0f, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x3a, 0x23, 0x98, 0xa0, 0x1f, 0x00, 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, + 0xb0, 0x2a, 0x16, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x75, 0x6e, 0x69, + 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x5f, 0x74, 0x78, 0x2a, 0x47, 0x0a, 0x10, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x12, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, + 0x61, 0x6c, 0x54, 0x78, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x10, 0x01, 0x2a, 0x83, 0x02, 0x0a, 0x11, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, + 0x54, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x1f, 0x55, 0x4e, 0x49, 0x56, + 0x45, 0x52, 0x53, 0x41, 0x4c, 0x5f, 0x54, 0x58, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x49, 0x4e, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, + 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x49, 0x4e, + 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, + 0x02, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x43, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x45, 0x44, + 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x43, + 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, + 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x43, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, + 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x54, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x55, 0x54, + 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, + 0x14, 0x0a, 0x10, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x53, 0x55, 0x43, 0x43, + 0x45, 0x53, 0x53, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, + 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, + 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44, 0x10, 0x09, 0x2a, 0x42, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, + 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x42, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0c, + 0x0a, 0x08, 0x52, 0x45, 0x56, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x03, 0x2a, 0x7d, 0x0a, 0x06, + 0x54, 0x78, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x5f, 0x54, 0x58, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x41, + 0x53, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, 0x53, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x50, + 0x41, 0x59, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x55, 0x4e, 0x44, + 0x53, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x46, 0x55, 0x4e, 0x44, 0x53, 0x5f, 0x41, 0x4e, 0x44, + 0x5f, 0x50, 0x41, 0x59, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x41, + 0x59, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x42, 0x4f, 0x55, + 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x54, 0x10, 0x06, 0x42, 0xb2, 0x01, 0x0a, 0x10, + 0x63, 0x6f, 0x6d, 0x2e, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, + 0x42, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x73, 0x68, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x2f, 0x70, 0x75, 0x73, 0x68, 0x2d, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2d, + 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x75, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x76, + 0x31, 0xa2, 0x02, 0x03, 0x55, 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0c, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x18, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, + 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x0d, 0x55, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -6920,36 +9144,44 @@ func file_uexecutor_v1_types_proto_rawDescGZIP() []byte { } var file_uexecutor_v1_types_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_uexecutor_v1_types_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_uexecutor_v1_types_proto_msgTypes = make([]protoimpl.MessageInfo, 11) var file_uexecutor_v1_types_proto_goTypes = []interface{}{ - (VerificationType)(0), // 0: uexecutor.v1.VerificationType - (UniversalTxStatus)(0), // 1: uexecutor.v1.UniversalTxStatus - (Status)(0), // 2: uexecutor.v1.Status - (InboundTxType)(0), // 3: uexecutor.v1.InboundTxType - (*Params)(nil), // 4: uexecutor.v1.Params - (*UniversalPayload)(nil), // 5: uexecutor.v1.UniversalPayload - (*MigrationPayload)(nil), // 6: uexecutor.v1.MigrationPayload - (*UniversalAccountId)(nil), // 7: uexecutor.v1.UniversalAccountId - (*InboundStatus)(nil), // 8: uexecutor.v1.InboundStatus - (*Inbound)(nil), // 9: uexecutor.v1.Inbound - (*PCTx)(nil), // 10: uexecutor.v1.PCTx - (*OutboundTx)(nil), // 11: uexecutor.v1.OutboundTx - (*UniversalTx)(nil), // 12: uexecutor.v1.UniversalTx + (VerificationType)(0), // 0: uexecutor.v1.VerificationType + (UniversalTxStatus)(0), // 1: uexecutor.v1.UniversalTxStatus + (Status)(0), // 2: uexecutor.v1.Status + (TxType)(0), // 3: uexecutor.v1.TxType + (*Params)(nil), // 4: uexecutor.v1.Params + (*UniversalPayload)(nil), // 5: uexecutor.v1.UniversalPayload + (*MigrationPayload)(nil), // 6: uexecutor.v1.MigrationPayload + (*UniversalAccountId)(nil), // 7: uexecutor.v1.UniversalAccountId + (*RevertInstructions)(nil), // 8: uexecutor.v1.RevertInstructions + (*Inbound)(nil), // 9: uexecutor.v1.Inbound + (*PCTx)(nil), // 10: uexecutor.v1.PCTx + (*OutboundObservation)(nil), // 11: uexecutor.v1.OutboundObservation + (*OriginatingPcTx)(nil), // 12: uexecutor.v1.OriginatingPcTx + (*OutboundTx)(nil), // 13: uexecutor.v1.OutboundTx + (*UniversalTx)(nil), // 14: uexecutor.v1.UniversalTx } var file_uexecutor_v1_types_proto_depIdxs = []int32{ 0, // 0: uexecutor.v1.UniversalPayload.v_type:type_name -> uexecutor.v1.VerificationType - 2, // 1: uexecutor.v1.InboundStatus.status:type_name -> uexecutor.v1.Status - 3, // 2: uexecutor.v1.Inbound.tx_type:type_name -> uexecutor.v1.InboundTxType - 5, // 3: uexecutor.v1.Inbound.universal_payload:type_name -> uexecutor.v1.UniversalPayload - 9, // 4: uexecutor.v1.UniversalTx.inbound_tx:type_name -> uexecutor.v1.Inbound - 10, // 5: uexecutor.v1.UniversalTx.pc_tx:type_name -> uexecutor.v1.PCTx - 11, // 6: uexecutor.v1.UniversalTx.outbound_tx:type_name -> uexecutor.v1.OutboundTx - 1, // 7: uexecutor.v1.UniversalTx.universal_status:type_name -> uexecutor.v1.UniversalTxStatus - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name + 3, // 1: uexecutor.v1.Inbound.tx_type:type_name -> uexecutor.v1.TxType + 5, // 2: uexecutor.v1.Inbound.universal_payload:type_name -> uexecutor.v1.UniversalPayload + 8, // 3: uexecutor.v1.Inbound.revert_instructions:type_name -> uexecutor.v1.RevertInstructions + 3, // 4: uexecutor.v1.OutboundTx.tx_type:type_name -> uexecutor.v1.TxType + 12, // 5: uexecutor.v1.OutboundTx.pc_tx:type_name -> uexecutor.v1.OriginatingPcTx + 11, // 6: uexecutor.v1.OutboundTx.observed_tx:type_name -> uexecutor.v1.OutboundObservation + 2, // 7: uexecutor.v1.OutboundTx.outbound_status:type_name -> uexecutor.v1.Status + 8, // 8: uexecutor.v1.OutboundTx.revert_instructions:type_name -> uexecutor.v1.RevertInstructions + 10, // 9: uexecutor.v1.OutboundTx.pc_revert_execution:type_name -> uexecutor.v1.PCTx + 9, // 10: uexecutor.v1.UniversalTx.inbound_tx:type_name -> uexecutor.v1.Inbound + 10, // 11: uexecutor.v1.UniversalTx.pc_tx:type_name -> uexecutor.v1.PCTx + 13, // 12: uexecutor.v1.UniversalTx.outbound_tx:type_name -> uexecutor.v1.OutboundTx + 1, // 13: uexecutor.v1.UniversalTx.universal_status:type_name -> uexecutor.v1.UniversalTxStatus + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name } func init() { file_uexecutor_v1_types_proto_init() } @@ -7007,7 +9239,7 @@ func file_uexecutor_v1_types_proto_init() { } } file_uexecutor_v1_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InboundStatus); i { + switch v := v.(*RevertInstructions); i { case 0: return &v.state case 1: @@ -7043,7 +9275,7 @@ func file_uexecutor_v1_types_proto_init() { } } file_uexecutor_v1_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutboundTx); i { + switch v := v.(*OutboundObservation); i { case 0: return &v.state case 1: @@ -7055,6 +9287,30 @@ func file_uexecutor_v1_types_proto_init() { } } file_uexecutor_v1_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OriginatingPcTx); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_uexecutor_v1_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutboundTx); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_uexecutor_v1_types_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UniversalTx); i { case 0: return &v.state @@ -7073,7 +9329,7 @@ func file_uexecutor_v1_types_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_uexecutor_v1_types_proto_rawDesc, NumEnums: 4, - NumMessages: 9, + NumMessages: 11, NumExtensions: 0, NumServices: 0, }, diff --git a/app/app.go b/app/app.go index 9e95a7f9..d173a48a 100755 --- a/app/app.go +++ b/app/app.go @@ -770,6 +770,8 @@ func NewChainApp( ), ) + app.EVMKeeper.SetHooks(uexecutorkeeper.NewEVMHooks(app.UexecutorKeeper)) + // NOTE: we are adding all available EVM extensions. // Not all of them need to be enabled, which can be configured on a per-chain basis. corePrecompiles := NewAvailableStaticPrecompiles( diff --git a/app/txpolicy/gasless.go b/app/txpolicy/gasless.go index 06854833..313375c8 100644 --- a/app/txpolicy/gasless.go +++ b/app/txpolicy/gasless.go @@ -20,6 +20,7 @@ func IsGaslessTx(tx sdk.Tx) bool { sdk.MsgTypeURL(&uexecutortypes.MsgDeployUEA{}), sdk.MsgTypeURL(&uexecutortypes.MsgMintPC{}), sdk.MsgTypeURL(&uexecutortypes.MsgVoteInbound{}), + sdk.MsgTypeURL(&uexecutortypes.MsgVoteOutbound{}), sdk.MsgTypeURL(&uexecutortypes.MsgVoteGasPrice{}), sdk.MsgTypeURL(&utsstypes.MsgVoteTssKeyProcess{}), } diff --git a/cmd/puniversald/authz.go b/cmd/puniversald/authz.go deleted file mode 100644 index 63e3af13..00000000 --- a/cmd/puniversald/authz.go +++ /dev/null @@ -1,53 +0,0 @@ -package main - -import ( - "github.com/spf13/cobra" - - authzcmd "github.com/pushchain/push-chain-node/cmd/puniversald/authz" - "github.com/pushchain/push-chain-node/universalClient/config" - "github.com/pushchain/push-chain-node/universalClient/constant" -) - -var ( - nodeEndpoint string - chainID string -) - -// authzCmd returns the authz command with all subcommands -func authzCmd() *cobra.Command { - // Load config to get default endpoint - cfg, err := config.Load(constant.DefaultNodeHome) - defaultEndpoint := "localhost" - if err == nil && len(cfg.PushChainGRPCURLs) > 0 { - // Use first configured URL as default (should be clean base URL without port) - defaultEndpoint = cfg.PushChainGRPCURLs[0] - } - - cmd := &cobra.Command{ - Use: "authz", - Short: "Use AuthZ grants for Universal Validator hot keys", - Long: ` -The authz commands allow you to use authorization grants for Universal Validator operations. -Hot keys can execute transactions on behalf of operator accounts using granted permissions. - -Note: Grant creation and revocation are handled by the core validator (pchaind). -Use the local-validator-manager setup-container-authz command to create grants. - -Available Commands: - list List all grants for an account - verify Verify hot key has required permissions - exec Execute a transaction using AuthZ grants -`, - } - - // Add persistent flags - cmd.PersistentFlags().StringVar(&nodeEndpoint, "node", defaultEndpoint, "Base URL for Push Chain node (gRPC: :9090, RPC: :26657)") - cmd.PersistentFlags().StringVar(&chainID, "chain-id", "pchain", "Chain ID for transactions") - - // Add subcommands - only operations that universal validator should perform - cmd.AddCommand(authzcmd.ListCmd(&nodeEndpoint, &chainID)) - cmd.AddCommand(authzcmd.VerifyCmd(&nodeEndpoint, &chainID)) - cmd.AddCommand(authzcmd.ExecCmd(&nodeEndpoint, &chainID)) - - return cmd -} \ No newline at end of file diff --git a/cmd/puniversald/authz/common.go b/cmd/puniversald/authz/common.go deleted file mode 100644 index ad4e4bbf..00000000 --- a/cmd/puniversald/authz/common.go +++ /dev/null @@ -1,77 +0,0 @@ -package authz - -import ( - "fmt" - - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/codec" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - "github.com/cosmos/cosmos-sdk/types/tx/signing" - "github.com/cosmos/cosmos-sdk/x/auth/tx" - "github.com/cosmos/cosmos-sdk/x/authz" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - sdk "github.com/cosmos/cosmos-sdk/types" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/pushchain/push-chain-node/universalClient/keys" -) - - -// setupClientContext creates a client context with all required interfaces registered -func setupClientContext(kb keyring.Keyring, chainID, rpcEndpoint string) (client.Context, error) { - // Create gRPC connection - conn, err := grpc.NewClient(rpcEndpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return client.Context{}, fmt.Errorf("failed to connect to gRPC endpoint: %w", err) - } - - // Setup codec with all required interfaces using shared EVM registry - registry := keys.CreateInterfaceRegistryWithEVMSupport() - authz.RegisterInterfaces(registry) - authtypes.RegisterInterfaces(registry) - banktypes.RegisterInterfaces(registry) - stakingtypes.RegisterInterfaces(registry) - govtypes.RegisterInterfaces(registry) - - cdc := codec.NewProtoCodec(registry) - - // Create TxConfig - txConfig := tx.NewTxConfig(cdc, []signing.SignMode{signing.SignMode_SIGN_MODE_DIRECT}) - - // Create client context - return client.Context{}. - WithCodec(cdc). - WithInterfaceRegistry(registry). - WithChainID(chainID). - WithKeyring(kb). - WithGRPCClient(conn). - WithTxConfig(txConfig), nil -} - - - -// resolveAccountAddress resolves account string to address (can be address or key name) -func resolveAccountAddress(account string, kb keyring.Keyring) (sdk.AccAddress, error) { - // Try to parse as address first, then as key name - if addr, err := sdk.AccAddressFromBech32(account); err == nil { - return addr, nil - } - - // Try as key name - record, err := kb.Key(account) - if err != nil { - return nil, fmt.Errorf("account '%s' not found as address or key name: %w", account, err) - } - - return record.GetAddress() -} - -// ensureGRPCPort appends the standard gRPC port to the base URL -func ensureGRPCPort(endpoint string) string { - // Config contains clean base URLs, append standard gRPC port - return endpoint + ":9090" -} \ No newline at end of file diff --git a/cmd/puniversald/authz/exec.go b/cmd/puniversald/authz/exec.go deleted file mode 100644 index 1deca41f..00000000 --- a/cmd/puniversald/authz/exec.go +++ /dev/null @@ -1,158 +0,0 @@ -package authz - -import ( - "context" - "fmt" - - rpchttp "github.com/cometbft/cometbft/rpc/client/http" - "github.com/rs/zerolog" - "github.com/spf13/cobra" - - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - sdk "github.com/cosmos/cosmos-sdk/types" - - uauthz "github.com/pushchain/push-chain-node/universalClient/authz" - "github.com/pushchain/push-chain-node/universalClient/config" - "github.com/pushchain/push-chain-node/universalClient/constant" - "github.com/pushchain/push-chain-node/universalClient/keys" -) - -// ExecCmd creates the authz exec command -func ExecCmd(rpcEndpoint, chainID *string) *cobra.Command { - var gasLimit uint64 = 300000 - var feeAmount = "300000000000000upc" - var memo string - - cmd := &cobra.Command{ - Use: "exec [args...]", - Short: "Execute a transaction using AuthZ grants", - Long: ` -Execute a transaction using AuthZ permissions. -The grantee (hot key) must have been granted permission to execute the specified message type. - -Note: MsgVoteInbound is a gasless transaction type and does not require gas or fees by default. -You can override the gas and fee values using the --gas and --fees flags if needed. - -Supported message types: - /uexecutor.v1.MsgVoteInbound - - -Example: - puniversald authz exec container-hotkey /uexecutor.v1.MsgVoteInbound push1signer... eip155:11155111 0x123abc 0xsender 0xrecipient 1000 0xasset 1 1 -`, - Args: cobra.MinimumNArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return runExecCommand(args[0], args[1], args[2:], *rpcEndpoint, *chainID, gasLimit, feeAmount, memo) - }, - } - - // Add command flags - cmd.Flags().Uint64Var(&gasLimit, "gas", 300000, "Gas limit for the transaction") - cmd.Flags().StringVar(&feeAmount, "fees", "300000000000000upc", "Fee amount for the transaction") - cmd.Flags().StringVar(&memo, "memo", "", "Memo for the transaction") - - return cmd -} - -func runExecCommand(granteeKeyName, msgType string, msgArgs []string, rpcEndpoint, chainID string, gasLimit uint64, feeAmount, memo string) error { - ctx := context.Background() - - // Load config for keyring settings - cfg, err := config.Load(constant.DefaultNodeHome) - if err != nil { - return fmt.Errorf("failed to load config: %w", err) - } - - // Use the same keyring directory as the EVM key commands - keyringDir := constant.DefaultNodeHome - - // Create keyring using universalClient keyring (compatible with pchaind) - keyConfig := keys.KeyringConfig{ - HomeDir: keyringDir, - KeyringBackend: keys.KeyringBackend(cfg.KeyringBackend), - HotkeyName: granteeKeyName, - } - kb, _, err := keys.GetKeyringKeybase(keyConfig) - if err != nil { - return fmt.Errorf("failed to create keyring: %w", err) - } - - // Get grantee key - granteeRecord, err := kb.Key(granteeKeyName) - if err != nil { - return fmt.Errorf("grantee key '%s' not found: %w", granteeKeyName, err) - } - - granteeAddr, err := granteeRecord.GetAddress() - if err != nil { - return fmt.Errorf("failed to get grantee address: %w", err) - } - - // Create the inner message based on type - innerMsg, err := ParseMessageFromArgs(msgType, msgArgs) - if err != nil { - return err - } - - // Setup client context - clientCtx, err := setupClientContextForExec(kb, chainID, rpcEndpoint, granteeAddr, granteeKeyName) - if err != nil { - return fmt.Errorf("failed to setup client context: %w", err) - } - - // Create keys instance for the hot key - hotKeys := keys.NewKeysWithKeybase(kb, granteeAddr, granteeKeyName, "") - - // Create TxSigner for handling the transaction - // Create TxSigner for handling the transaction - logger := zerolog.New(nil).Level(zerolog.InfoLevel) - txSigner := uauthz.NewTxSigner(hotKeys, clientCtx, logger) - - // Parse fee amount - feeCoins, err := sdk.ParseCoinsNormalized(feeAmount) - if err != nil { - return fmt.Errorf("invalid fee amount: %w", err) - } - - fmt.Printf("🚀 AuthZ TX: executor=%s(%s) type=%s gas=%d fee=%s memo=%s\n", granteeAddr, granteeKeyName, msgType, gasLimit, feeCoins, memo) - - // Execute the transaction - res, err := txSigner.SignAndBroadcastAuthZTx(ctx, []sdk.Msg{innerMsg}, memo, gasLimit, feeCoins) - if err != nil { - return fmt.Errorf("failed to execute AuthZ transaction: %w", err) - } - - // Print transaction result - if res.Code != 0 { - fmt.Printf("⚠️ TX Failed (code %d): hash=%s error=%s\n", res.Code, res.TxHash, res.RawLog) - } else { - fmt.Printf("✅ TX Success: hash=%s gasUsed=%d/%d\n", res.TxHash, res.GasUsed, res.GasWanted) - } - - return nil -} - -// setupClientContextForExec creates a client context specifically for exec command with HTTP client -func setupClientContextForExec(kb keyring.Keyring, chainID, rpcEndpoint string, granteeAddr sdk.AccAddress, granteeKeyName string) (client.Context, error) { - // Assume rpcEndpoint is a clean base URL, append standard ports - // Setup basic client context with gRPC (standard port 9090) - grpcEndpoint := rpcEndpoint + ":9090" - clientCtx, err := setupClientContext(kb, chainID, grpcEndpoint) - if err != nil { - return client.Context{}, err - } - - // Create HTTP RPC client for broadcasting (standard port 26657) - rpcURL := "http://" + rpcEndpoint + ":26657" - httpClient, err := rpchttp.New(rpcURL, "/websocket") - if err != nil { - return client.Context{}, fmt.Errorf("failed to create RPC client: %w", err) - } - - // Enhance client context with additional settings for exec - return clientCtx. - WithClient(httpClient). - WithFromAddress(granteeAddr). - WithFromName(granteeKeyName). - WithBroadcastMode("sync"), nil -} diff --git a/cmd/puniversald/authz/list.go b/cmd/puniversald/authz/list.go deleted file mode 100644 index 1f1c1434..00000000 --- a/cmd/puniversald/authz/list.go +++ /dev/null @@ -1,118 +0,0 @@ -package authz - -import ( - "context" - "fmt" - - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/cosmos/cosmos-sdk/x/authz" - - "github.com/pushchain/push-chain-node/universalClient/config" - "github.com/pushchain/push-chain-node/universalClient/constant" - "github.com/pushchain/push-chain-node/universalClient/keys" -) - -// ListCmd creates the authz list command -func ListCmd(rpcEndpoint, chainID *string) *cobra.Command { - cmd := &cobra.Command{ - Use: "list ", - Short: "List all grants for an account", - Long: ` -List all authorization grants where the specified account is either granter or grantee. -This helps verify what permissions have been granted. - -Examples: - puniversald authz list push1abc... - puniversald authz list my-validator-key -`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runListCommand(args[0], *rpcEndpoint) - }, - } - - return cmd -} - -func runListCommand(account, rpcEndpoint string) error { - // Load config - cfg, err := config.Load(constant.DefaultNodeHome) - if err != nil { - return fmt.Errorf("failed to load config: %w", err) - } - - // Use the same keyring directory as the EVM key commands - keyringDir := constant.DefaultNodeHome - - // Create keyring - kb, err := keys.CreateKeyringFromConfig(keyringDir, nil, cfg.KeyringBackend) - if err != nil { - return fmt.Errorf("failed to create keyring: %w", err) - } - - // Resolve account to address - accountAddr, err := resolveAccountAddress(account, kb) - if err != nil { - return err - } - - // Ensure endpoint has gRPC port - grpcEndpoint := ensureGRPCPort(rpcEndpoint) - - // Create gRPC connection - conn, err := grpc.NewClient(grpcEndpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return fmt.Errorf("failed to connect to gRPC endpoint: %w", err) - } - defer func() { - if err := conn.Close(); err != nil { - fmt.Printf("Warning: failed to close gRPC connection: %v\n", err) - } - }() - - // Create authz query client - authzClient := authz.NewQueryClient(conn) - - ctx := context.Background() - - // Query grants by granter - fmt.Printf("Grants where %s is the GRANTER:\n", accountAddr) - granterResp, err := authzClient.GranterGrants(ctx, &authz.QueryGranterGrantsRequest{ - Granter: accountAddr.String(), - }) - if err == nil && len(granterResp.Grants) > 0 { - for _, grant := range granterResp.Grants { - fmt.Printf(" → To: %s\n", grant.Grantee) - fmt.Printf(" Authorization: %s\n", grant.Authorization.TypeUrl) - if grant.Expiration != nil { - fmt.Printf(" Expires: %s\n", grant.Expiration.String()) - } - fmt.Println() - } - } else { - fmt.Printf(" No grants found\n\n") - } - - // Query grants by grantee - fmt.Printf("Grants where %s is the GRANTEE:\n", accountAddr) - granteeResp, err := authzClient.GranteeGrants(ctx, &authz.QueryGranteeGrantsRequest{ - Grantee: accountAddr.String(), - }) - if err == nil && len(granteeResp.Grants) > 0 { - for _, grant := range granteeResp.Grants { - fmt.Printf(" → From: %s\n", grant.Granter) - fmt.Printf(" Authorization: %s\n", grant.Authorization.TypeUrl) - if grant.Expiration != nil { - fmt.Printf(" Expires: %s\n", grant.Expiration.String()) - } - fmt.Println() - } - } else { - fmt.Printf(" No grants found\n\n") - } - - return nil -} \ No newline at end of file diff --git a/cmd/puniversald/authz/messages.go b/cmd/puniversald/authz/messages.go deleted file mode 100644 index 026f0382..00000000 --- a/cmd/puniversald/authz/messages.go +++ /dev/null @@ -1,62 +0,0 @@ -package authz - -import ( - "fmt" - "strconv" - - sdk "github.com/cosmos/cosmos-sdk/types" - uetypes "github.com/pushchain/push-chain-node/x/uexecutor/types" -) - -// ParseMessageFromArgs parses command line arguments into a message -func ParseMessageFromArgs(msgType string, msgArgs []string) (sdk.Msg, error) { - switch msgType { - case "/uexecutor.v1.MsgVoteInbound": - if len(msgArgs) < 9 { - return nil, fmt.Errorf("MsgVoteInbound requires: ") - } - signerAddr, err := sdk.AccAddressFromBech32(msgArgs[0]) - if err != nil { - return nil, fmt.Errorf("invalid signer address: %w", err) - } - - // Parse tx type (0=UNSPECIFIED_TX, 1=GAS, 2=FUNDS, 3=FUNDS_AND_PAYLOAD, 4=GAS_AND_PAYLOAD) - txTypeInt, err := strconv.Atoi(msgArgs[8]) - if err != nil { - return nil, fmt.Errorf("invalid tx type (must be number 0-4): %w", err) - } - - var txType uetypes.InboundTxType - switch txTypeInt { - case 0: - txType = uetypes.InboundTxType_UNSPECIFIED_TX - case 1: - txType = uetypes.InboundTxType_GAS - case 2: - txType = uetypes.InboundTxType_FUNDS - case 3: - txType = uetypes.InboundTxType_FUNDS_AND_PAYLOAD - case 4: - txType = uetypes.InboundTxType_GAS_AND_PAYLOAD - default: - return nil, fmt.Errorf("invalid tx type: %d (must be 0-4)", txTypeInt) - } - - return &uetypes.MsgVoteInbound{ - Signer: signerAddr.String(), - Inbound: &uetypes.Inbound{ - SourceChain: msgArgs[1], - TxHash: msgArgs[2], - Sender: msgArgs[3], - Recipient: msgArgs[4], - Amount: msgArgs[5], - AssetAddr: msgArgs[6], - LogIndex: msgArgs[7], - TxType: txType, - }, - }, nil - - default: - return nil, fmt.Errorf("unsupported message type: %s", msgType) - } -} diff --git a/cmd/puniversald/authz/messages_test.go b/cmd/puniversald/authz/messages_test.go deleted file mode 100644 index 6b331d18..00000000 --- a/cmd/puniversald/authz/messages_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package authz - -import ( - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - uetypes "github.com/pushchain/push-chain-node/x/uexecutor/types" -) - -func init() { - sdkConfig := sdk.GetConfig() - defer func() { - // Config already sealed, that's fine - ignore panic - _ = recover() - }() - sdkConfig.SetBech32PrefixForAccount("push", "pushpub") - sdkConfig.SetBech32PrefixForValidator("pushvaloper", "pushvaloperpub") - sdkConfig.SetBech32PrefixForConsensusNode("pushvalcons", "pushvalconspub") -} - -func TestParseMsgVoteInbound(t *testing.T) { - tests := []struct { - name string - msgType string - msgArgs []string - wantErr bool - errMsg string - validate func(t *testing.T, msg interface{}) - }{ - { - name: "valid MsgVoteInbound", - msgType: "/uexecutor.v1.MsgVoteInbound", - msgArgs: []string{ - "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", // signer - valid bech32 address - "eip155:11155111", // source chain - "0x123abc", // tx hash - "0xsender", // sender - "0xrecipient", // recipient - "1000", // amount - "0xasset", // asset addr - "1", // log index - "1", // tx type (GAS) - }, - wantErr: false, - validate: func(t *testing.T, msg interface{}) { - voteMsg, ok := msg.(*uetypes.MsgVoteInbound) - require.True(t, ok) - assert.Equal(t, "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", voteMsg.Signer) - assert.Equal(t, "eip155:11155111", voteMsg.Inbound.SourceChain) - assert.Equal(t, "0x123abc", voteMsg.Inbound.TxHash) - assert.Equal(t, "0xsender", voteMsg.Inbound.Sender) - assert.Equal(t, "0xrecipient", voteMsg.Inbound.Recipient) - assert.Equal(t, "1000", voteMsg.Inbound.Amount) - assert.Equal(t, "0xasset", voteMsg.Inbound.AssetAddr) - assert.Equal(t, "1", voteMsg.Inbound.LogIndex) - assert.Equal(t, uetypes.InboundTxType_GAS, voteMsg.Inbound.TxType) - }, - }, - { - name: "MsgVoteInbound with FUNDS type", - msgType: "/uexecutor.v1.MsgVoteInbound", - msgArgs: []string{ - "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", - "eip155:1", - "0xdef456", - "0xsender2", - "0xrecipient2", - "5000", - "0xtoken", - "5", - "2", // FUNDS - }, - wantErr: false, - validate: func(t *testing.T, msg interface{}) { - voteMsg, ok := msg.(*uetypes.MsgVoteInbound) - require.True(t, ok) - assert.Equal(t, uetypes.InboundTxType_FUNDS, voteMsg.Inbound.TxType) - }, - }, - { - name: "MsgVoteInbound with UNSPECIFIED type", - msgType: "/uexecutor.v1.MsgVoteInbound", - msgArgs: []string{ - "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", - "eip155:1", - "0xdef456", - "0xsender2", - "0xrecipient2", - "5000", - "0xtoken", - "5", - "0", // UNSPECIFIED - }, - wantErr: false, - validate: func(t *testing.T, msg interface{}) { - voteMsg, ok := msg.(*uetypes.MsgVoteInbound) - require.True(t, ok) - assert.Equal(t, uetypes.InboundTxType_UNSPECIFIED_TX, voteMsg.Inbound.TxType) - }, - }, - { - name: "MsgVoteInbound with insufficient args", - msgType: "/uexecutor.v1.MsgVoteInbound", - msgArgs: []string{"push1abc123", "eip155:1"}, - wantErr: true, - errMsg: "MsgVoteInbound requires", - }, - { - name: "MsgVoteInbound with invalid signer", - msgType: "/uexecutor.v1.MsgVoteInbound", - msgArgs: []string{ - "invalid_address", - "eip155:1", - "0x123", - "0xsender", - "0xrecipient", - "1000", - "0xasset", - "1", - "1", - }, - wantErr: true, - errMsg: "invalid signer address", - }, - { - name: "MsgVoteInbound with invalid tx type", - msgType: "/uexecutor.v1.MsgVoteInbound", - msgArgs: []string{ - "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", - "eip155:1", - "0x123", - "0xsender", - "0xrecipient", - "1000", - "0xasset", - "1", - "5", // Invalid type - }, - wantErr: true, - errMsg: "invalid tx type: 5", - }, - { - name: "MsgVoteInbound with non-numeric tx type", - msgType: "/uexecutor.v1.MsgVoteInbound", - msgArgs: []string{ - "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", - "eip155:1", - "0x123", - "0xsender", - "0xrecipient", - "1000", - "0xasset", - "1", - "abc", // Non-numeric - }, - wantErr: true, - errMsg: "invalid tx type (must be number", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - msg, err := ParseMessageFromArgs(tt.msgType, tt.msgArgs) - - if tt.wantErr { - require.Error(t, err) - if tt.errMsg != "" { - assert.Contains(t, err.Error(), tt.errMsg) - } - } else { - require.NoError(t, err) - require.NotNil(t, msg) - if tt.validate != nil { - tt.validate(t, msg) - } - } - }) - } -} diff --git a/cmd/puniversald/authz/verify.go b/cmd/puniversald/authz/verify.go deleted file mode 100644 index 7eb6ba7d..00000000 --- a/cmd/puniversald/authz/verify.go +++ /dev/null @@ -1,196 +0,0 @@ -package authz - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/x/authz" - - "github.com/pushchain/push-chain-node/universalClient/config" - "github.com/pushchain/push-chain-node/universalClient/constant" - "github.com/pushchain/push-chain-node/universalClient/keys" -) - -// Color constants for terminal output -var ( - colorReset = "\033[0m" - colorRed = "\033[31m" - colorGreen = "\033[32m" - colorYellow = "\033[33m" -) - -func init() { - // Check if colors should be disabled (for CI/CD or non-TTY environments) - if os.Getenv("NO_COLOR") != "" || os.Getenv("TERM") == "dumb" { - colorReset = "" - colorRed = "" - colorGreen = "" - colorYellow = "" - } -} - -// Status formatting helpers -func statusOK(msg string) string { - return fmt.Sprintf("%s[OK]%s %s", colorGreen, colorReset, msg) -} - -func statusFail(msg string) string { - return fmt.Sprintf("%s[FAIL]%s %s", colorRed, colorReset, msg) -} - -func statusExp(msg string) string { - return fmt.Sprintf("%s[EXP]%s %s", colorYellow, colorReset, msg) -} - - -// VerifyCmd creates the authz verify command -func VerifyCmd(rpcEndpoint, chainID *string) *cobra.Command { - cmd := &cobra.Command{ - Use: "verify [msg-types...]", - Short: "Verify hot key has required permissions", - Args: cobra.MinimumNArgs(2), - Long: ` -Verify that the specified hot key has all required permissions to execute -transactions on behalf of the granter. This validates that AuthZ grants -are properly set up for the specified message types. - -If no message types are specified, checks default message type: - /uexecutor.v1.MsgVoteInbound - -Examples: - puniversald authz verify container-hotkey push1granter... - puniversald authz verify container-hotkey push1granter... /uexecutor.v1.MsgVoteInbound -`, - RunE: func(cmd *cobra.Command, args []string) error { - return runVerifyCommand(*rpcEndpoint, args) - }, - } - - return cmd -} - -func runVerifyCommand(rpcEndpoint string, args []string) error { - // Parse arguments - granteeKeyName := args[0] - granterAddr := args[1] - - // Get message types (use defaults if not provided) - var msgTypes []string - if len(args) > 2 { - msgTypes = args[2:] - } else { - // Use default message type for universal validator voting - msgTypes = []string{ - "/uexecutor.v1.MsgVoteInbound", - } - } - - fmt.Printf("Verifying AuthZ configuration...\n") - fmt.Printf(" Granter: %s\n", granterAddr) - fmt.Printf(" Grantee: %s\n", granteeKeyName) - fmt.Printf(" Required: %d message types\n", len(msgTypes)) - - // Parse granter address - granterAddress, err := sdk.AccAddressFromBech32(granterAddr) - if err != nil { - return fmt.Errorf("invalid granter address: %w", err) - } - - // Load config for keyring backend - cfg, err := config.Load(constant.DefaultNodeHome) - if err != nil { - return fmt.Errorf("failed to load config: %w", err) - } - - // Get grantee key address - keyringDir := constant.DefaultNodeHome - kb, err := keys.CreateKeyringFromConfig(keyringDir, nil, cfg.KeyringBackend) - if err != nil { - return fmt.Errorf("failed to create keyring: %w", err) - } - - granteeRecord, err := kb.Key(granteeKeyName) - if err != nil { - return fmt.Errorf("grantee key '%s' not found in keyring: %w", granteeKeyName, err) - } - - granteeAddr, err := granteeRecord.GetAddress() - if err != nil { - return fmt.Errorf("failed to get grantee key address: %w", err) - } - - // Ensure endpoint has gRPC port - grpcEndpoint := ensureGRPCPort(rpcEndpoint) - - // Create gRPC connection - conn, err := grpc.NewClient(grpcEndpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return fmt.Errorf("failed to connect to gRPC endpoint: %w", err) - } - defer func() { - if err := conn.Close(); err != nil { - fmt.Printf("Warning: failed to close gRPC connection: %v\n", err) - } - }() - - // Create authz query client - authzClient := authz.NewQueryClient(conn) - ctx := context.Background() - - // Check each required grant - var missingGrants []string - var validGrants []string - - fmt.Printf("\nChecking permissions:\n") - - for _, msgType := range msgTypes { - // Query specific grant - grantResp, err := authzClient.Grants(ctx, &authz.QueryGrantsRequest{ - Granter: granterAddress.String(), - Grantee: granteeAddr.String(), - MsgTypeUrl: msgType, - }) - - if err != nil || len(grantResp.Grants) == 0 { - missingGrants = append(missingGrants, msgType) - fmt.Printf(" %s\n", statusFail(msgType)) - } else { - grant := grantResp.Grants[0] - if grant.Expiration != nil && grant.Expiration.Before(time.Now()) { - missingGrants = append(missingGrants, msgType) - fmt.Printf(" %s\n", statusExp(msgType)) - } else { - validGrants = append(validGrants, msgType) - msg := msgType - if grant.Expiration != nil { - msg += fmt.Sprintf(" (exp: %s)", grant.Expiration.Format("2006-01-02")) - } - fmt.Printf(" %s\n", statusOK(msg)) - } - } - } - - // Summary - fmt.Printf("\nStatus: ") - if len(missingGrants) > 0 { - fmt.Printf("%sINCOMPLETE%s (%d/%d grants valid)\n", colorRed, colorReset, len(validGrants), len(msgTypes)) - - fmt.Printf("\nMissing grants:\n") - for _, msgType := range missingGrants { - fmt.Printf(" %s\n", msgType) - } - fmt.Printf("\nTo grant missing permissions, use the core validator (pchaind) or local-validator-manager setup-container-authz\n") - return fmt.Errorf("verification failed - missing grants") - } - - fmt.Printf("%sREADY%s (all grants valid)\n", colorGreen, colorReset) - - return nil -} \ No newline at end of file diff --git a/cmd/puniversald/commands.go b/cmd/puniversald/commands.go index 46755ec9..4403e921 100644 --- a/cmd/puniversald/commands.go +++ b/cmd/puniversald/commands.go @@ -6,7 +6,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "path/filepath" "strings" "github.com/libp2p/go-libp2p/core/crypto" @@ -16,25 +15,16 @@ import ( "github.com/pushchain/push-chain-node/universalClient/config" "github.com/pushchain/push-chain-node/universalClient/constant" "github.com/pushchain/push-chain-node/universalClient/core" - "github.com/pushchain/push-chain-node/universalClient/db" - "github.com/pushchain/push-chain-node/universalClient/logger" - "github.com/pushchain/push-chain-node/universalClient/store" "github.com/spf13/cobra" cosmosevmcmd "github.com/cosmos/evm/client" - "gorm.io/gorm" ) -var cfg config.Config - func InitRootCmd(rootCmd *cobra.Command) { rootCmd.AddCommand(versionCmd()) - rootCmd.AddCommand(initCmd()) rootCmd.AddCommand(startCmd()) - rootCmd.AddCommand(queryCmd()) + rootCmd.AddCommand(initCmd()) rootCmd.AddCommand(cosmosevmcmd.KeyCommands(constant.DefaultNodeHome, true)) - rootCmd.AddCommand(authzCmd()) - rootCmd.AddCommand(setblockCmd()) rootCmd.AddCommand(tssPeerIDCmd()) } @@ -55,51 +45,31 @@ func versionCmd() *cobra.Command { func initCmd() *cobra.Command { cmd := &cobra.Command{ Use: "init", - Short: "Create initial config file with default values", + Short: "Initialize configuration file", + Long: `Initialize the configuration file with default values. + +This command creates a default configuration file at: + ~/.puniversal/config/pushuv_config.json + +You can edit this file to customize your universal validator settings.`, RunE: func(cmd *cobra.Command, args []string) error { // Load default config - cfg, err := config.LoadDefaultConfig() + defaultCfg, err := config.LoadDefaultConfig() if err != nil { return fmt.Errorf("failed to load default config: %w", err) } - // Override with flags if provided - if cmd.Flags().Changed("log-level") { - logLevel, _ := cmd.Flags().GetInt("log-level") - cfg.LogLevel = logLevel - } - if cmd.Flags().Changed("log-format") { - logFormat, _ := cmd.Flags().GetString("log-format") - cfg.LogFormat = logFormat - } - if cmd.Flags().Changed("log-sampler") { - logSampler, _ := cmd.Flags().GetBool("log-sampler") - cfg.LogSampler = logSampler - } - - // If TSS fields are still empty, use test defaults for init - // (user can update config file later or use jq as in the script) - if cfg.TSSP2PPrivateKeyHex == "" { - cfg.TSSP2PPrivateKeyHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - } - if cfg.TSSPassword == "" { - cfg.TSSPassword = "testpassword" - } - - // Save config - if err := config.Save(&cfg, constant.DefaultNodeHome); err != nil { + // Save to config directory + if err := config.Save(&defaultCfg, constant.DefaultNodeHome); err != nil { return fmt.Errorf("failed to save config: %w", err) } - fmt.Printf("✅ Config saved to %s/config/pushuv_config.json\n", constant.DefaultNodeHome) + + configPath := fmt.Sprintf("%s/%s/%s", constant.DefaultNodeHome, constant.ConfigSubdir, constant.ConfigFileName) + fmt.Printf("✅ Configuration file initialized at: %s\n", configPath) + fmt.Println("You can now edit this file to customize your settings.") return nil }, } - - // Define flags (not bound to a specific cfg instance) - cmd.Flags().Int("log-level", 1, "Log level (0=debug, 1=info, ..., 5=panic)") - cmd.Flags().String("log-format", "console", "Log format: json or console") - cmd.Flags().Bool("log-sampler", false, "Enable log sampling") - return cmd } @@ -121,20 +91,9 @@ func startCmd() *cobra.Command { } fmt.Printf("\n=== Loaded Configuration ===\n%s\n===========================\n\n", string(configJSON)) - // --- Step 2: Setup logger --- - log := logger.Init(loadedCfg) - - // --- Step 3: Setup ChainDBManager --- - // Set default database base directory if not configured - if loadedCfg.DatabaseBaseDir == "" { - loadedCfg.DatabaseBaseDir = filepath.Join(constant.DefaultNodeHome, "databases") - } - - dbManager := db.NewChainDBManager(loadedCfg.DatabaseBaseDir, log, &loadedCfg) - - // --- Step 4: Start client --- + // --- Step 2: Start client --- ctx := context.Background() - client, err := core.NewUniversalClient(ctx, log, dbManager, &loadedCfg) + client, err := core.NewUniversalClient(ctx, &loadedCfg) if err != nil { return fmt.Errorf("failed to create universal client: %w", err) } @@ -144,118 +103,6 @@ func startCmd() *cobra.Command { return cmd } -func setblockCmd() *cobra.Command { - var ( - chainID string - block int64 - list bool - blockSet bool - ) - - cmd := &cobra.Command{ - Use: "setblock", - Short: "Set or list last observed blocks for chains", - RunE: func(cmd *cobra.Command, args []string) error { - // Load config to get database base directory - loadedCfg, err := config.Load(constant.DefaultNodeHome) - if err != nil { - return fmt.Errorf("failed to load config: %w", err) - } - - // Set default database base directory if not configured - if loadedCfg.DatabaseBaseDir == "" { - loadedCfg.DatabaseBaseDir = filepath.Join(constant.DefaultNodeHome, "databases") - } - - // Setup logger (minimal for CLI) - log := logger.Init(loadedCfg) - - // Create ChainDBManager - dbManager := db.NewChainDBManager(loadedCfg.DatabaseBaseDir, log, &loadedCfg) - defer dbManager.CloseAll() - - // List mode - if list { - databases := dbManager.GetAllDatabases() - if len(databases) == 0 { - fmt.Println("No chain databases found") - return nil - } - - fmt.Println("\nCurrent last observed blocks:") - fmt.Println("================================") - - for chainID, chainDB := range databases { - var chainState store.ChainState - if err := chainDB.Client().First(&chainState).Error; err != nil { - if err == gorm.ErrRecordNotFound { - fmt.Printf("No state found for chain %s\n", chainID) - } else { - fmt.Printf("Error reading chain %s: %v\n", chainID, err) - } - continue - } - - fmt.Printf("Chain: %s\n", chainID) - fmt.Printf("Last Block: %d\n", chainState.LastBlock) - fmt.Printf("Updated: %v\n", chainState.UpdatedAt) - fmt.Println("--------------------------------") - } - return nil - } - - // Check if block flag was actually provided - blockSet = cmd.Flags().Changed("block") - - // Set mode - if chainID == "" || !blockSet { - return fmt.Errorf("--chain and --block are required when not using --list") - } - - // Get chain-specific database - database, err := dbManager.GetChainDB(chainID) - if err != nil { - return fmt.Errorf("failed to get database for chain %s: %w", chainID, err) - } - - var chainState store.ChainState - result := database.Client().First(&chainState) - - if result.Error != nil && result.Error != gorm.ErrRecordNotFound { - return fmt.Errorf("failed to query chain state: %w", result.Error) - } - - if result.Error == gorm.ErrRecordNotFound { - // Create new record - chainState = store.ChainState{ - LastBlock: uint64(block), - } - if err := database.Client().Create(&chainState).Error; err != nil { - return fmt.Errorf("failed to create chain state record: %w", err) - } - fmt.Printf("Created new record for chain %s at block %d\n", chainID, block) - } else { - // Update existing record - oldBlock := chainState.LastBlock - chainState.LastBlock = uint64(block) - if err := database.Client().Save(&chainState).Error; err != nil { - return fmt.Errorf("failed to update chain state: %w", err) - } - fmt.Printf("Updated block from %d to %d for chain %s\n", oldBlock, block, chainID) - } - - fmt.Printf("✅ Successfully set block %d for chain %s\n", block, chainID) - return nil - }, - } - - cmd.Flags().StringVar(&chainID, "chain", "", "Chain ID (e.g., 'eip155:11155111' or 'solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1')") - cmd.Flags().Int64Var(&block, "block", -1, "Block number to set") - cmd.Flags().BoolVar(&list, "list", false, "List all current block records") - - return cmd -} - // tssPeerIDCmd computes and prints the libp2p peer ID from a TSS private key hex string. // This is used during devnet setup to derive the peer ID for universal validator registration. func tssPeerIDCmd() *cobra.Command { diff --git a/cmd/puniversald/query.go b/cmd/puniversald/query.go deleted file mode 100644 index 14e40690..00000000 --- a/cmd/puniversald/query.go +++ /dev/null @@ -1,329 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net/http" - "os" - "time" - - "github.com/pushchain/push-chain-node/universalClient/config" - "github.com/pushchain/push-chain-node/universalClient/constant" - uregistrytypes "github.com/pushchain/push-chain-node/x/uregistry/types" - "github.com/spf13/cobra" - "gopkg.in/yaml.v2" -) - -// Output formats -const ( - OutputFormatYAML = "yaml" - OutputFormatJSON = "json" -) - -// ChainConfigOutput represents the output format for chain configs -type ChainConfigOutput struct { - Config *uregistrytypes.ChainConfig `yaml:"config,omitempty" json:"config,omitempty"` - Configs []*uregistrytypes.ChainConfig `yaml:"configs,omitempty" json:"configs,omitempty"` - LastFetched time.Time `yaml:"last_fetched" json:"last_fetched"` -} - -// TokenConfigOutput represents the output format for token configs -type TokenConfigOutput struct { - Config *uregistrytypes.TokenConfig `yaml:"config,omitempty" json:"config,omitempty"` - Configs []*uregistrytypes.TokenConfig `yaml:"configs,omitempty" json:"configs,omitempty"` - LastFetched time.Time `yaml:"last_fetched" json:"last_fetched"` -} - -// QueryResponse represents the standard query response format from HTTP API -type QueryResponse struct { - Data json.RawMessage `json:"data"` - LastFetched time.Time `json:"last_fetched"` -} - -// ErrorResponse represents an error response from HTTP API -type ErrorResponse struct { - Error string `json:"error"` -} - -func queryCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "query", - Aliases: []string{"q"}, - Short: "Querying commands", - } - - cmd.AddCommand(uregistryCmd()) - return cmd -} - -func uregistryCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "uregistry", - Short: "Querying commands for the uregistry module", - } - - cmd.AddCommand( - allChainConfigsCmd(), - allTokenConfigsCmd(), - tokenConfigsByChainCmd(), - tokenConfigCmd(), - ) - - return cmd -} - -func allChainConfigsCmd() *cobra.Command { - var outputFormat string - - cmd := &cobra.Command{ - Use: "all-chain-configs", - Short: "Query all chain configurations", - RunE: func(cmd *cobra.Command, args []string) error { - port, err := getQueryServerPort() - if err != nil { - return err - } - - resp, err := http.Get(fmt.Sprintf("http://localhost:%d/api/v1/chain-configs", port)) - if err != nil { - return fmt.Errorf("failed to query chain configs: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - var errResp ErrorResponse - if err := json.NewDecoder(resp.Body).Decode(&errResp); err != nil { - return fmt.Errorf("server returned status %d", resp.StatusCode) - } - return fmt.Errorf("server error: %s", errResp.Error) - } - - var queryResp QueryResponse - if err := json.NewDecoder(resp.Body).Decode(&queryResp); err != nil { - return fmt.Errorf("failed to decode response: %w", err) - } - - var configs []*uregistrytypes.ChainConfig - if err := json.Unmarshal(queryResp.Data, &configs); err != nil { - return fmt.Errorf("failed to unmarshal chain configs: %w", err) - } - - output := ChainConfigOutput{ - Configs: configs, - LastFetched: queryResp.LastFetched, - } - - return printOutput(output, outputFormat) - }, - } - - cmd.Flags().StringVarP(&outputFormat, "output", "o", OutputFormatYAML, "Output format (yaml|json)") - return cmd -} - -func allTokenConfigsCmd() *cobra.Command { - var outputFormat string - - cmd := &cobra.Command{ - Use: "all-token-configs", - Short: "Query all token configurations", - RunE: func(cmd *cobra.Command, args []string) error { - port, err := getQueryServerPort() - if err != nil { - return err - } - - resp, err := http.Get(fmt.Sprintf("http://localhost:%d/api/v1/token-configs", port)) - if err != nil { - return fmt.Errorf("failed to query token configs: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - var errResp ErrorResponse - if err := json.NewDecoder(resp.Body).Decode(&errResp); err != nil { - return fmt.Errorf("server returned status %d", resp.StatusCode) - } - return fmt.Errorf("server error: %s", errResp.Error) - } - - var queryResp QueryResponse - if err := json.NewDecoder(resp.Body).Decode(&queryResp); err != nil { - return fmt.Errorf("failed to decode response: %w", err) - } - - var configs []*uregistrytypes.TokenConfig - if err := json.Unmarshal(queryResp.Data, &configs); err != nil { - return fmt.Errorf("failed to unmarshal token configs: %w", err) - } - - output := TokenConfigOutput{ - Configs: configs, - LastFetched: queryResp.LastFetched, - } - - return printOutput(output, outputFormat) - }, - } - - cmd.Flags().StringVarP(&outputFormat, "output", "o", OutputFormatYAML, "Output format (yaml|json)") - return cmd -} - -func tokenConfigsByChainCmd() *cobra.Command { - var ( - chain string - outputFormat string - ) - - cmd := &cobra.Command{ - Use: "token-configs-by-chain", - Short: "Query all token configurations for a specific chain", - RunE: func(cmd *cobra.Command, args []string) error { - if chain == "" { - return fmt.Errorf("chain is required") - } - - port, err := getQueryServerPort() - if err != nil { - return err - } - - resp, err := http.Get(fmt.Sprintf("http://localhost:%d/api/v1/token-configs-by-chain?chain=%s", port, chain)) - if err != nil { - return fmt.Errorf("failed to query token configs by chain: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - var errResp ErrorResponse - if err := json.NewDecoder(resp.Body).Decode(&errResp); err != nil { - return fmt.Errorf("server returned status %d", resp.StatusCode) - } - return fmt.Errorf("server error: %s", errResp.Error) - } - - var queryResp QueryResponse - if err := json.NewDecoder(resp.Body).Decode(&queryResp); err != nil { - return fmt.Errorf("failed to decode response: %w", err) - } - - var configs []*uregistrytypes.TokenConfig - if err := json.Unmarshal(queryResp.Data, &configs); err != nil { - return fmt.Errorf("failed to unmarshal token configs: %w", err) - } - - output := TokenConfigOutput{ - Configs: configs, - LastFetched: queryResp.LastFetched, - } - - return printOutput(output, outputFormat) - }, - } - - cmd.Flags().StringVar(&chain, "chain", "", "Chain ID (e.g., eip155:11155111)") - cmd.Flags().StringVarP(&outputFormat, "output", "o", OutputFormatYAML, "Output format (yaml|json)") - cmd.MarkFlagRequired("chain") - - return cmd -} - -func tokenConfigCmd() *cobra.Command { - var ( - chain string - address string - outputFormat string - ) - - cmd := &cobra.Command{ - Use: "token-config", - Short: "Query a specific token configuration", - RunE: func(cmd *cobra.Command, args []string) error { - if chain == "" { - return fmt.Errorf("chain is required") - } - if address == "" { - return fmt.Errorf("address is required") - } - - port, err := getQueryServerPort() - if err != nil { - return err - } - - resp, err := http.Get(fmt.Sprintf("http://localhost:%d/api/v1/token-config?chain=%s&address=%s", port, chain, address)) - if err != nil { - return fmt.Errorf("failed to query token config: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound { - var errResp ErrorResponse - if err := json.NewDecoder(resp.Body).Decode(&errResp); err != nil { - return fmt.Errorf("token config not found for chain %s and address %s", chain, address) - } - return fmt.Errorf("%s", errResp.Error) - } - - if resp.StatusCode != http.StatusOK { - var errResp ErrorResponse - if err := json.NewDecoder(resp.Body).Decode(&errResp); err != nil { - return fmt.Errorf("server returned status %d", resp.StatusCode) - } - return fmt.Errorf("server error: %s", errResp.Error) - } - - var queryResp QueryResponse - if err := json.NewDecoder(resp.Body).Decode(&queryResp); err != nil { - return fmt.Errorf("failed to decode response: %w", err) - } - - var config *uregistrytypes.TokenConfig - if err := json.Unmarshal(queryResp.Data, &config); err != nil { - return fmt.Errorf("failed to unmarshal token config: %w", err) - } - - output := TokenConfigOutput{ - Config: config, - LastFetched: queryResp.LastFetched, - } - - return printOutput(output, outputFormat) - }, - } - - cmd.Flags().StringVar(&chain, "chain", "", "Chain ID (e.g., eip155:11155111)") - cmd.Flags().StringVar(&address, "address", "", "Token address") - cmd.Flags().StringVarP(&outputFormat, "output", "o", OutputFormatYAML, "Output format (yaml|json)") - cmd.MarkFlagRequired("chain") - cmd.MarkFlagRequired("address") - - return cmd -} - -// getQueryServerPort loads the config to get the query server port -func getQueryServerPort() (int, error) { - // Load config - loadedCfg, err := config.Load(constant.DefaultNodeHome) - if err != nil { - return 0, fmt.Errorf("failed to load config: %w", err) - } - - return loadedCfg.QueryServerPort, nil -} - -// printOutput prints the output in the specified format -func printOutput(data interface{}, format string) error { - switch format { - case OutputFormatJSON: - encoder := json.NewEncoder(os.Stdout) - encoder.SetIndent("", " ") - return encoder.Encode(data) - case OutputFormatYAML: - encoder := yaml.NewEncoder(os.Stdout) - return encoder.Encode(data) - default: - return fmt.Errorf("unsupported output format: %s", format) - } -} diff --git a/go.mod b/go.mod index 9b6f0dd0..c96d8738 100755 --- a/go.mod +++ b/go.mod @@ -87,7 +87,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 google.golang.org/grpc v1.75.0 google.golang.org/protobuf v1.36.8 - gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v2 v2.4.0 // indirect gorm.io/driver/sqlite v1.6.0 gorm.io/gorm v1.30.1 ) diff --git a/proto/uexecutor/v1/tx.proto b/proto/uexecutor/v1/tx.proto index 858f8746..c452fecf 100755 --- a/proto/uexecutor/v1/tx.proto +++ b/proto/uexecutor/v1/tx.proto @@ -33,6 +33,9 @@ service Msg { // VoteInbound defines a message for voting on synthetic assets bridging from external chain to PC rpc VoteInbound(MsgVoteInbound) returns (MsgVoteInboundResponse); + // VoteOutbound defines a message for voting on a observed outbound tx on external chain + rpc VoteOutbound(MsgVoteOutbound) returns (MsgVoteOutboundResponse); + // VoteGasPrice defines a message for universal validators to vote on the gas price rpc VoteGasPrice(MsgVoteGasPrice) returns (MsgVoteGasPriceResponse); } @@ -154,6 +157,20 @@ message MsgVoteInbound { // MsgVoteInboundResponse defines the response for MsgExecutePayload. message MsgVoteInboundResponse {} +// MsgVoteOutbound allows a universal validator to vote on an outbound tx observation. +message MsgVoteOutbound { + option (amino.name) = "uexecutor/MsgVoteOutbound"; + option (cosmos.msg.v1.signer) = "signer"; + + // signer is the Cosmos address initiating the tx (used for tx signing) + string signer = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + string tx_id = 2; // Tx Id (abi.encode(utxId,outboundId)) + OutboundObservation observed_tx = 3; // observed tx on destination chain +} + +// MsgVoteInboundResponse defines the response for MsgExecutePayload. +message MsgVoteOutboundResponse {} + // MsgVoteGasPrice is broadcasted by Universal Validators to submit their observed gas prices message MsgVoteGasPrice { option (amino.name) = "uexecutor/MsgVoteGasPrice"; diff --git a/proto/uexecutor/v1/types.proto b/proto/uexecutor/v1/types.proto index c3c35fed..b933c14e 100644 --- a/proto/uexecutor/v1/types.proto +++ b/proto/uexecutor/v1/types.proto @@ -76,19 +76,25 @@ enum UniversalTxStatus { enum Status { UNSPECIFIED = 0; PENDING = 1; - FINALIZED = 2; + OBSERVED = 2; + REVERTED = 3; } -message InboundStatus { - Status status = 1; +enum TxType { + UNSPECIFIED_TX = 0; + GAS = 1; + GAS_AND_PAYLOAD = 2; + FUNDS = 3; + FUNDS_AND_PAYLOAD = 4; + PAYLOAD = 5; + INBOUND_REVERT = 6; } -enum InboundTxType { - UNSPECIFIED_TX = 0; - GAS = 1; // fee abstraction - FUNDS = 2; // synthetic - FUNDS_AND_PAYLOAD = 3; // synthetic + payload exec - GAS_AND_PAYLOAD = 4; // fee abstraction + payload exec +message RevertInstructions { + option (amino.name) = "uexecutor/revert_instructions"; + option (gogoproto.equal) = true; + + string fund_recipient = 1; // where funds go in revert/refund } message Inbound { @@ -103,9 +109,10 @@ message Inbound { string amount = 5; // synthetic token amount bridged in string asset_addr = 6; // address of erc20 token address on source chain string log_index = 7; // log index that originated the cross chain tx - InboundTxType tx_type = 8; // inbound tx type + TxType tx_type = 8; // inbound tx type UniversalPayload universal_payload = 9; // payload is the universal payload to be executed string verification_data = 10; // verification_data is the bytes passed as verifier data for the given payload. + RevertInstructions revert_instructions = 11; // revert config } message PCTx { @@ -121,16 +128,44 @@ message PCTx { string error_msg = 7; // optional error info if failed } +message OutboundObservation { + option (amino.name) = "uexecutor/outbound_observation"; + option (gogoproto.equal) = true; + + bool success = 1; // whether execution succeeded + uint64 block_height = 2; // block height on external chain + string tx_hash = 3; // external chain tx hash + string error_msg = 4; +} + +message OriginatingPcTx { + option (amino.name) = "uexecutor/originating_pc_tx"; + option (gogoproto.equal) = true; + + string tx_hash = 1; // pc_tx hash that initiated the outbound + string log_index = 2; // log_index that initiated the outbound +} + message OutboundTx { option (amino.name) = "uexecutor/outbound_tx"; option (gogoproto.equal) = true; option (gogoproto.goproto_stringer) = false; string destination_chain = 1; // chain where this outbound is sent - string tx_hash = 2; // outbound tx hash on destination chain - string recipient = 3; // recipient on destination chain - string amount = 4; // token amount or payload - string asset_addr = 5; // token contract if applicable + string recipient = 2; // recipient on destination chain + string amount = 3; // token amount + string external_asset_addr = 4; // asset addr destination chain + string prc20_asset_addr = 5; // prc20 contract addr + string sender = 6; // sender of the outbound tx + string payload = 7; // payload to be executed + string gas_limit = 8; // gas limit to be used for the outbound tx + TxType tx_type = 9; // outbound tx type + OriginatingPcTx pc_tx = 10; // pc_tx that originated the outbound + OutboundObservation observed_tx = 11; // observed tx on destination chain + string id = 12; // id of outbound tx + Status outbound_status = 13; // status of outbound tx + RevertInstructions revert_instructions = 14; + PCTx pc_revert_execution = 15; } message UniversalTx { @@ -138,8 +173,9 @@ message UniversalTx { option (gogoproto.equal) = true; option (gogoproto.goproto_stringer) = false; - Inbound inbound_tx = 1; // Full inbound tx data - repeated PCTx pc_tx = 2; // Execution details on Push Chain - OutboundTx outbound_tx = 3; // Outbound tx triggered by this tx - UniversalTxStatus universal_status = 4; // Current status + string id = 1; + Inbound inbound_tx = 2; // Full inbound tx data + repeated PCTx pc_tx = 3; // Execution details on Push Chain + repeated OutboundTx outbound_tx = 4; // Outbound tx triggered by this tx + UniversalTxStatus universal_status = 5; // Current status } diff --git a/test/integration/uexecutor/execute_payload_test.go b/test/integration/uexecutor/execute_payload_test.go index 9502982d..48ce2771 100644 --- a/test/integration/uexecutor/execute_payload_test.go +++ b/test/integration/uexecutor/execute_payload_test.go @@ -58,7 +58,7 @@ func TestExecutePayload(t *testing.T) { GasLimit: "21000000", MaxFeePerGas: "1000000000", MaxPriorityFeePerGas: "200000000", - Nonce: "0", + Nonce: "1", Deadline: "0", VType: uexecutortypes.VerificationType(0), } diff --git a/test/integration/uexecutor/inbound_initiated_outbound_test.go b/test/integration/uexecutor/inbound_initiated_outbound_test.go new file mode 100644 index 00000000..11652a8e --- /dev/null +++ b/test/integration/uexecutor/inbound_initiated_outbound_test.go @@ -0,0 +1,198 @@ +package integrationtest + +import ( + "fmt" + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + authz "github.com/cosmos/cosmos-sdk/x/authz" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/pushchain/push-chain-node/app" + utils "github.com/pushchain/push-chain-node/test/utils" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" + uregistrytypes "github.com/pushchain/push-chain-node/x/uregistry/types" + uvalidatortypes "github.com/pushchain/push-chain-node/x/uvalidator/types" +) + +func setupInboundInitiatedOutboundTest(t *testing.T, numVals int) (*app.ChainApp, sdk.Context, []string, *uexecutortypes.Inbound, []stakingtypes.Validator, common.Address) { + app, ctx, _, validators := utils.SetAppWithMultipleValidators(t, numVals) + + chainConfigTest := uregistrytypes.ChainConfig{ + Chain: "eip155:11155111", + VmType: uregistrytypes.VmType_EVM, + PublicRpcUrl: "https://sepolia.drpc.org", + GatewayAddress: "0x28E0F09bE2321c1420Dc60Ee146aACbD68B335Fe", + BlockConfirmation: &uregistrytypes.BlockConfirmation{ + FastInbound: 5, + StandardInbound: 12, + }, + GatewayMethods: []*uregistrytypes.GatewayMethods{{ + Name: "addFunds", + Identifier: "", + EventIdentifier: "0xb28f49668e7e76dc96d7aabe5b7f63fecfbd1c3574774c05e8204e749fd96fbd", + ConfirmationType: 5, + }}, + Enabled: &uregistrytypes.ChainEnabled{ + IsInboundEnabled: true, + IsOutboundEnabled: true, + }, + } + + prc20Address := utils.GetDefaultAddresses().PRC20USDCAddr + testAddress := utils.GetDefaultAddresses().DefaultTestAddr + usdcAddress := utils.GetDefaultAddresses().ExternalUSDCAddr + + tokenConfigTest := uregistrytypes.TokenConfig{ + Chain: "eip155:11155111", + Address: usdcAddress.String(), + Name: "USD Coin", + Symbol: "USDC", + Decimals: 6, + Enabled: true, + LiquidityCap: "1000000000000000000000000", + TokenType: 1, + NativeRepresentation: &uregistrytypes.NativeRepresentation{ + Denom: "", + ContractAddress: prc20Address.String(), + }, + } + + app.UregistryKeeper.AddChainConfig(ctx, &chainConfigTest) + app.UregistryKeeper.AddTokenConfig(ctx, &tokenConfigTest) + + // Register each validator with a universal validator + universalVals := make([]string, len(validators)) + for i, val := range validators { + coreValAddr := val.OperatorAddress + universalValAddr := sdk.AccAddress([]byte( + fmt.Sprintf("universal-validator-%d", i), + )).String() + + network := uvalidatortypes.NetworkInfo{PeerId: fmt.Sprintf("temp%d", i+1), MultiAddrs: []string{"temp"}} + + err := app.UvalidatorKeeper.AddUniversalValidator(ctx, coreValAddr, network) + require.NoError(t, err) + + universalVals[i] = universalValAddr + } + + // Grant authz permission: core validator -> universal validator + for i, val := range validators { + accAddr, err := sdk.ValAddressFromBech32(val.OperatorAddress) // gives ValAddress + require.NoError(t, err) + + coreValAddr := sdk.AccAddress(accAddr) // converts to normal account address + + uniValAddr := sdk.MustAccAddressFromBech32(universalVals[i]) + + // Define grant for MsgVoteInbound + msgType := sdk.MsgTypeURL(&uexecutortypes.MsgVoteInbound{}) + auth := authz.NewGenericAuthorization(msgType) + + // Expiration + exp := ctx.BlockTime().Add(time.Hour) + + // SaveGrant takes (ctx, grantee, granter, authz.Authorization, *time.Time) + err = app.AuthzKeeper.SaveGrant(ctx, uniValAddr, coreValAddr, auth, &exp) + require.NoError(t, err) + + // Define grant for MsgVoteOutbound + outboundAuth := authz.NewGenericAuthorization( + sdk.MsgTypeURL(&uexecutortypes.MsgVoteOutbound{}), + ) + err = app.AuthzKeeper.SaveGrant(ctx, uniValAddr, coreValAddr, outboundAuth, &exp) + require.NoError(t, err) + } + + validUA := &uexecutortypes.UniversalAccountId{ + ChainNamespace: "eip155", + ChainId: "11155111", + Owner: testAddress, + } + + ueModuleAccAddress, _ := app.UexecutorKeeper.GetUeModuleAddress(ctx) + receipt, err := app.UexecutorKeeper.DeployUEAV2(ctx, ueModuleAccAddress, validUA) + ueaAddrHex := common.BytesToAddress(receipt.Ret) + require.NoError(t, err) + + // signature + validVerificationData := "0x4c719b6c0e03cc7faadc7b679eea3bf301983e28ef241baa4e0b2dc17b3bc09f1a8221abf166bd8cbe38aefdda7d62f9f944e28431b9982b6e2d4d1b8594446b1c" + + validUP := &uexecutortypes.UniversalPayload{ + To: utils.GetDefaultAddresses().UniversalGatewayPCAddr.Hex(), + Value: "0", + Data: "0xb3ca1fbc000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000e0600000000000000000000000000000000000000000000000000000000000f4240000000000000000000000000000000000000000000000000000000000007a12000000000000000000000000000000000000000000000000000000000000001000000000000000000000000001234567890abcdef1234567890abcdef1234567800000000000000000000000000000000000000000000000000000000000000141234567890abcdef1234567890abcdef123456780000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + GasLimit: "21000000", + MaxFeePerGas: "1000000000", + MaxPriorityFeePerGas: "200000000", + Nonce: "0", + Deadline: "0", + VType: uexecutortypes.VerificationType(0), + } + + inbound := &uexecutortypes.Inbound{ + SourceChain: "eip155:11155111", + TxHash: "0xabcd", + Sender: testAddress, + Recipient: "", + Amount: "1000000", + AssetAddr: usdcAddress.String(), + LogIndex: "1", + TxType: uexecutortypes.TxType_FUNDS_AND_PAYLOAD, + UniversalPayload: validUP, + VerificationData: validVerificationData, + } + + return app, ctx, universalVals, inbound, validators, ueaAddrHex +} + +func TestInboundInitiatedOutbound(t *testing.T) { + + t.Run("successfully creates outbound in the UniversalTx when payload invokes Gateway's withdraw fn", func(t *testing.T) { + app, ctx, vals, inbound, coreVals, _ := setupInboundInitiatedOutboundTest(t, 4) + + // --- Quorum reached --- + for i := 0; i < 3; i++ { + valAddr, err := sdk.ValAddressFromBech32(coreVals[i].OperatorAddress) + require.NoError(t, err) + coreValAcc := sdk.AccAddress(valAddr).String() + + err = utils.ExecVoteInbound(t, ctx, app, vals[i], coreValAcc, inbound) + require.NoError(t, err) + } + + utxKey := uexecutortypes.GetInboundUniversalTxKey(*inbound) + utx, _, err := app.UexecutorKeeper.GetUniversalTx(ctx, utxKey) + require.NoError(t, err) + + require.NotEmpty(t, utx.OutboundTx, "OutboundTx should exist after successful withdraw event") + require.Len(t, utx.OutboundTx, 1, "Only one outbound expected") + + out := utx.OutboundTx[0] + + // Validate outbound params + require.Equal(t, + "eip155:11155111", + out.DestinationChain, + "Destination chain must be correct", + ) + + require.Equal(t, + "21000", + out.GasLimit, + "Gas limit must match event (gasFeeUsed) value", + ) + + // checks + require.Equal(t, "0x1234567890abcdef1234567890abcdef12345678", out.Recipient) + require.Equal(t, "1000000", out.Amount) + require.Equal(t, "0x0000000000000000000000000000000000000e07", out.ExternalAssetAddr) + require.Equal(t, "0x0000000000000000000000000000000000000e06", out.Prc20AssetAddr) + require.Equal(t, uexecutortypes.TxType_FUNDS, out.TxType) + require.Equal(t, uexecutortypes.Status_PENDING, out.OutboundStatus) + }) +} diff --git a/test/integration/uexecutor/inbound_synthetic_bridge_payload_test.go b/test/integration/uexecutor/inbound_synthetic_bridge_payload_test.go index fbee35a0..8ed894d1 100644 --- a/test/integration/uexecutor/inbound_synthetic_bridge_payload_test.go +++ b/test/integration/uexecutor/inbound_synthetic_bridge_payload_test.go @@ -44,10 +44,11 @@ func setupInboundBridgePayloadTest(t *testing.T, numVals int) (*app.ChainApp, sd prc20Address := utils.GetDefaultAddresses().PRC20USDCAddr testAddress := utils.GetDefaultAddresses().DefaultTestAddr + usdcAddress := utils.GetDefaultAddresses().ExternalUSDCAddr tokenConfigTest := uregistrytypes.TokenConfig{ Chain: "eip155:11155111", - Address: prc20Address.String(), + Address: usdcAddress.String(), Name: "USD Coin", Symbol: "USDC", Decimals: 6, @@ -132,9 +133,9 @@ func setupInboundBridgePayloadTest(t *testing.T, numVals int) (*app.ChainApp, sd Sender: testAddress, Recipient: "", Amount: "1000000", - AssetAddr: prc20Address.String(), + AssetAddr: usdcAddress.String(), LogIndex: "1", - TxType: uexecutortypes.InboundTxType_FUNDS_AND_PAYLOAD, + TxType: uexecutortypes.TxType_FUNDS_AND_PAYLOAD, UniversalPayload: validUP, VerificationData: validVerificationData, } @@ -143,7 +144,8 @@ func setupInboundBridgePayloadTest(t *testing.T, numVals int) (*app.ChainApp, sd } func TestInboundSyntheticBridgePayload(t *testing.T) { - prc20Address := utils.GetDefaultAddresses().PRC20USDCAddr + // prc20Address := utils.GetDefaultAddresses().PRC20USDCAddr + usdcAddress := utils.GetDefaultAddresses().ExternalUSDCAddr t.Run("less than quorum votes keeps inbound pending", func(t *testing.T) { app, ctx, vals, inbound, coreVals, _ := setupInboundBridgePayloadTest(t, 4) @@ -198,9 +200,9 @@ func TestInboundSyntheticBridgePayload(t *testing.T) { Sender: utils.GetDefaultAddresses().TargetAddr2, Recipient: "", Amount: "1000000", - AssetAddr: prc20Address.String(), + AssetAddr: usdcAddress.String(), LogIndex: "1", - TxType: uexecutortypes.InboundTxType_FUNDS_AND_PAYLOAD, + TxType: uexecutortypes.TxType_FUNDS_AND_PAYLOAD, UniversalPayload: validUP, VerificationData: "", } diff --git a/test/integration/uexecutor/inbound_synthetic_bridge_test.go b/test/integration/uexecutor/inbound_synthetic_bridge_test.go index 4562f3f0..ec760987 100644 --- a/test/integration/uexecutor/inbound_synthetic_bridge_test.go +++ b/test/integration/uexecutor/inbound_synthetic_bridge_test.go @@ -49,10 +49,11 @@ func setupInboundBridgeTest(t *testing.T, numVals int) (*app.ChainApp, sdk.Conte prc20Address := utils.GetDefaultAddresses().PRC20USDCAddr testAddress := utils.GetDefaultAddresses().DefaultTestAddr + usdcAddress := utils.GetDefaultAddresses().ExternalUSDCAddr tokenConfigTest := uregistrytypes.TokenConfig{ Chain: "eip155:11155111", - Address: prc20Address.String(), + Address: usdcAddress.String(), Name: "USD Coin", Symbol: "USDC", Decimals: 6, @@ -113,11 +114,14 @@ func setupInboundBridgeTest(t *testing.T, numVals int) (*app.ChainApp, sdk.Conte Sender: testAddress, Recipient: testAddress, Amount: "1000000", - AssetAddr: prc20Address.String(), + AssetAddr: usdcAddress.String(), LogIndex: "1", - TxType: uexecutortypes.InboundTxType_FUNDS, + TxType: uexecutortypes.TxType_FUNDS, UniversalPayload: nil, VerificationData: "", + RevertInstructions: &uexecutortypes.RevertInstructions{ + FundRecipient: testAddress, + }, } return app, ctx, universalVals, inbound, validators @@ -359,4 +363,81 @@ func TestInboundSyntheticBridge(t *testing.T) { } t.Logf("All %d pc_tx.tx_hash values are unique", len(txHashes)) }) + + t.Run("creates outbound revert when pre-deposit step fails", func(t *testing.T) { + app, ctx, vals, inbound, coreVals := setupInboundBridgeTest(t, 4) + + // --- Remove token config to force pre-deposit failure + app.UregistryKeeper.RemoveTokenConfig(ctx, inbound.SourceChain, inbound.AssetAddr) + + // reach quorum + for i := 0; i < 3; i++ { + valAddr, err := sdk.ValAddressFromBech32(coreVals[i].OperatorAddress) + require.NoError(t, err) + coreValAcc := sdk.AccAddress(valAddr).String() + + err = utils.ExecVoteInbound(t, ctx, app, vals[i], coreValAcc, inbound) + require.NoError(t, err) + } + + // Fetch universal tx + q := uexecutorkeeper.Querier{Keeper: app.UexecutorKeeper} + resp, err := q.GetUniversalTx( + sdk.WrapSDKContext(ctx), + &uexecutortypes.QueryGetUniversalTxRequest{ + Id: uexecutortypes.GetInboundUniversalTxKey(*inbound), + }, + ) + require.NoError(t, err) + require.NotNil(t, resp.UniversalTx) + + // --- Assert outbound revert exists + foundRevert := false + for _, ob := range resp.UniversalTx.OutboundTx { + if ob.TxType == uexecutortypes.TxType_INBOUND_REVERT { + foundRevert = true + require.Equal(t, inbound.SourceChain, ob.DestinationChain) + require.Equal(t, inbound.Amount, ob.Amount) + require.Equal(t, inbound.AssetAddr, ob.ExternalAssetAddr) + } + } + + require.True(t, foundRevert, "expected INBOUND_REVERT outbound to be created") + }) + + t.Run("does not create outbound revert on successful inbound execution", func(t *testing.T) { + app, ctx, vals, inbound, coreVals := setupInboundBridgeTest(t, 4) + + // reach quorum (happy path) + for i := 0; i < 3; i++ { + valAddr, err := sdk.ValAddressFromBech32(coreVals[i].OperatorAddress) + require.NoError(t, err) + coreValAcc := sdk.AccAddress(valAddr).String() + + err = utils.ExecVoteInbound(t, ctx, app, vals[i], coreValAcc, inbound) + require.NoError(t, err) + } + + // Fetch universal tx + q := uexecutorkeeper.Querier{Keeper: app.UexecutorKeeper} + resp, err := q.GetUniversalTx( + sdk.WrapSDKContext(ctx), + &uexecutortypes.QueryGetUniversalTxRequest{ + Id: uexecutortypes.GetInboundUniversalTxKey(*inbound), + }, + ) + require.NoError(t, err) + require.NotNil(t, resp.UniversalTx) + + // --- Assert NO inbound revert exists + for _, ob := range resp.UniversalTx.OutboundTx { + require.NotEqual( + t, + uexecutortypes.TxType_INBOUND_REVERT, + ob.TxType, + "should not create inbound revert on successful execution", + ) + } + }) + } diff --git a/test/integration/uexecutor/migrate_uea_test.go b/test/integration/uexecutor/migrate_uea_test.go deleted file mode 100644 index cf6ab9d1..00000000 --- a/test/integration/uexecutor/migrate_uea_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package integrationtest - -import ( - "testing" - - "cosmossdk.io/math" - "github.com/ethereum/go-ethereum/common" - utils "github.com/pushchain/push-chain-node/test/utils" - uexecutorkeeper "github.com/pushchain/push-chain-node/x/uexecutor/keeper" - uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" - uregistrytypes "github.com/pushchain/push-chain-node/x/uregistry/types" - "github.com/stretchr/testify/require" -) - -func TestMigrateUEA(t *testing.T) { - app, ctx, _ := utils.SetAppWithValidators(t) - - chainConfigTest := uregistrytypes.ChainConfig{ - Chain: "eip155:11155111", - VmType: uregistrytypes.VmType_EVM, - PublicRpcUrl: "https://sepolia.drpc.org", - GatewayAddress: "0x28E0F09bE2321c1420Dc60Ee146aACbD68B335Fe", - BlockConfirmation: &uregistrytypes.BlockConfirmation{ - FastInbound: 5, - StandardInbound: 12, - }, - GatewayMethods: []*uregistrytypes.GatewayMethods{&uregistrytypes.GatewayMethods{ - Name: "addFunds", - Identifier: "", - EventIdentifier: "0xb28f49668e7e76dc96d7aabe5b7f63fecfbd1c3574774c05e8204e749fd96fbd", - }}, - Enabled: &uregistrytypes.ChainEnabled{ - IsInboundEnabled: true, - IsOutboundEnabled: true, - }, - } - - app.UregistryKeeper.AddChainConfig(ctx, &chainConfigTest) - - params := app.FeeMarketKeeper.GetParams(ctx) - params.BaseFee = math.LegacyNewDec(1000000000) - app.FeeMarketKeeper.SetParams(ctx, params) - - ms := uexecutorkeeper.NewMsgServerImpl(app.UexecutorKeeper) - - t.Run("Success!", func(t *testing.T) { - migratedAddress, newEVMImplAddr := utils.DeployMigrationContract(t, app, ctx) - - validUA := &uexecutortypes.UniversalAccountId{ - ChainNamespace: "eip155", - ChainId: "11155111", - Owner: "0x778d3206374f8ac265728e18e3fe2ae6b93e4ce4", - } - - validTxHash := "0x770f8df204a925dbfc3d73c7d532c832bd5fe78ed813835b365320e65b105ec2" - - validMP := &uexecutortypes.MigrationPayload{ - Migration: migratedAddress.Hex(), - Nonce: "0", - Deadline: "0", - } - - msgDeploy := &uexecutortypes.MsgDeployUEA{ - Signer: "cosmos1xpurwdecvsenyvpkxvmnge3cv93nyd34xuersef38pjnxen9xfsk2dnz8yek2drrv56qmn2ak9", - UniversalAccountId: validUA, - TxHash: validTxHash, - } - - deployUEAResponse, err := ms.DeployUEA(ctx, msgDeploy) - require.NoError(t, err) - - msgMint := &uexecutortypes.MsgMintPC{ - Signer: "cosmos1xpurwdecvsenyvpkxvmnge3cv93nyd34xuersef38pjnxen9xfsk2dnz8yek2drrv56qmn2ak9", - UniversalAccountId: validUA, - TxHash: validTxHash, - } - - _, err = ms.MintPC(ctx, msgMint) - require.NoError(t, err) - - msg := &uexecutortypes.MsgMigrateUEA{ - Signer: "cosmos1xpurwdecvsenyvpkxvmnge3cv93nyd34xuersef38pjnxen9xfsk2dnz8yek2drrv56qmn2ak9", - UniversalAccountId: validUA, - MigrationPayload: validMP, - Signature: "0xd1d343e944b77d71542ff15b545244464d0a9bb5606a69ca97d123abc52ec84a54cd46e50cf771fcdf40df0cdb047c50c1dcc17f6482d5def3895ad94e0b1cad1c", - } - - _, err = ms.MigrateUEA(ctx, msg) - require.NoError(t, err) - - logicAfterMigration := app.EVMKeeper.GetState(ctx, common.BytesToAddress(deployUEAResponse.UEA[12:]), common.HexToHash("0x868a771a75a4aa6c2be13e9a9617cb8ea240ed84a3a90c8469537393ec3e115d")) - require.Equal(t, newEVMImplAddr, common.BytesToAddress(logicAfterMigration.Bytes())) - - }) - t.Run("Invalid Migration Payload!", func(t *testing.T) { - validUA := &uexecutortypes.UniversalAccountId{ - ChainNamespace: "eip155", - ChainId: "11155111", - Owner: "0x778d3206374f8ac265728e18e3fe2ae6b93e4ce4", - } - - // validTxHash := "0x770f8df204a925dbfc3d73c7d532c832bd5fe78ed813835b365320e65b105ec2" - - validMP := &uexecutortypes.MigrationPayload{ - Migration: "", - Nonce: "1", - Deadline: "9999999999", - } - - msg := &uexecutortypes.MsgMigrateUEA{ - Signer: "cosmos1xpurwdecvsenyvpkxvmnge3cv93nyd34xuersef38pjnxen9xfsk2dnz8yek2drrv56qmn2ak9", - UniversalAccountId: validUA, - MigrationPayload: validMP, - } - - _, err := ms.MigrateUEA(ctx, msg) - require.ErrorContains(t, err, "invalid migration payload") - - }) - - t.Run("Invalid Signature Data", func(t *testing.T) { - validUA := &uexecutortypes.UniversalAccountId{ - ChainNamespace: "eip155", - ChainId: "11155111", - Owner: "0x778d3206374f8ac265728e18e3fe2ae6b93e4ce4", - } - - // validTxHash := "0x770f8df204a925dbfc3d73c7d532c832bd5fe78ed813835b365320e65b105ec2" - - validMP := &uexecutortypes.MigrationPayload{ - Migration: "0x527F3692F5C53CfA83F7689885995606F93b6164", - Nonce: "1", - Deadline: "9999999999", - } - - msg := &uexecutortypes.MsgMigrateUEA{ - Signer: "cosmos1xpurwdecvsenyvpkxvmnge3cv93nyd34xuersef38pjnxen9xfsk2dnz8yek2drrv56qmn2ak9", - UniversalAccountId: validUA, - MigrationPayload: validMP, - Signature: "0xZZZZ", - } - - _, err := ms.MigrateUEA(ctx, msg) - require.ErrorContains(t, err, "invalid signature format") - - }) -} diff --git a/test/integration/uexecutor/vote_outbound_test.go b/test/integration/uexecutor/vote_outbound_test.go new file mode 100644 index 00000000..a9479420 --- /dev/null +++ b/test/integration/uexecutor/vote_outbound_test.go @@ -0,0 +1,290 @@ +package integrationtest + +import ( + "fmt" + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + authz "github.com/cosmos/cosmos-sdk/x/authz" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/stretchr/testify/require" + + "github.com/pushchain/push-chain-node/app" + utils "github.com/pushchain/push-chain-node/test/utils" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" +) + +func setupOutboundVotingTest( + t *testing.T, + numVals int, +) ( + *app.ChainApp, + sdk.Context, + []string, // universal validators + string, // utxId + *uexecutortypes.OutboundTx, // outbound + []stakingtypes.Validator, // core validators +) { + + app, ctx, universalVals, inbound, coreVals, _ := + setupInboundInitiatedOutboundTest(t, numVals) + + // reach quorum for inbound + for i := 0; i < 3; i++ { + valAddr, err := sdk.ValAddressFromBech32(coreVals[i].OperatorAddress) + require.NoError(t, err) + + coreAcc := sdk.AccAddress(valAddr).String() + err = utils.ExecVoteInbound( + t, + ctx, + app, + universalVals[i], + coreAcc, + inbound, + ) + require.NoError(t, err) + } + + utxId := uexecutortypes.GetInboundUniversalTxKey(*inbound) + + utx, found, err := app.UexecutorKeeper.GetUniversalTx(ctx, utxId) + require.NoError(t, err) + require.True(t, found) + require.Len(t, utx.OutboundTx, 1) + + return app, ctx, universalVals, utxId, utx.OutboundTx[0], coreVals +} + +func TestOutboundVoting(t *testing.T) { + + t.Run("less than quorum outbound votes keeps outbound pending", func(t *testing.T) { + app, ctx, vals, utxId, outbound, coreVals := + setupOutboundVotingTest(t, 4) + + // grant authz for outbound voting + for i, val := range coreVals { + accAddr, err := sdk.ValAddressFromBech32(val.OperatorAddress) + require.NoError(t, err) + + coreAcc := sdk.AccAddress(accAddr) + uniAcc := sdk.MustAccAddressFromBech32(vals[i]) + + auth := authz.NewGenericAuthorization( + sdk.MsgTypeURL(&uexecutortypes.MsgVoteOutbound{}), + ) + exp := ctx.BlockTime().Add(time.Hour) + + err = app.AuthzKeeper.SaveGrant(ctx, uniAcc, coreAcc, auth, &exp) + require.NoError(t, err) + } + + // only 1 vote + valAddr, _ := sdk.ValAddressFromBech32(coreVals[0].OperatorAddress) + coreAcc := sdk.AccAddress(valAddr).String() + + err := utils.ExecVoteOutbound( + t, + ctx, + app, + vals[0], + coreAcc, + utxId, + outbound, + true, + "", + ) + require.NoError(t, err) + + utx, _, err := app.UexecutorKeeper.GetUniversalTx(ctx, utxId) + require.NoError(t, err) + require.Equal( + t, + uexecutortypes.Status_PENDING, + utx.OutboundTx[0].OutboundStatus, + ) + }) + + t.Run("quorum reached finalizes outbound successfully", func(t *testing.T) { + app, ctx, vals, utxId, outbound, coreVals := + setupOutboundVotingTest(t, 4) + + for i := 0; i < 3; i++ { + valAddr, err := sdk.ValAddressFromBech32(coreVals[i].OperatorAddress) + require.NoError(t, err) + + coreAcc := sdk.AccAddress(valAddr).String() + err = utils.ExecVoteOutbound( + t, + ctx, + app, + vals[i], + coreAcc, + utxId, + outbound, + true, + "", + ) + require.NoError(t, err) + } + + utx, _, err := app.UexecutorKeeper.GetUniversalTx(ctx, utxId) + require.NoError(t, err) + + ob := utx.OutboundTx[0] + require.Equal(t, uexecutortypes.Status_OBSERVED, ob.OutboundStatus) + require.NotNil(t, ob.ObservedTx) + require.True(t, ob.ObservedTx.Success) + }) + + t.Run("duplicate outbound vote fails", func(t *testing.T) { + app, ctx, vals, utxId, outbound, coreVals := + setupOutboundVotingTest(t, 4) + + valAddr, _ := sdk.ValAddressFromBech32(coreVals[0].OperatorAddress) + coreAcc := sdk.AccAddress(valAddr).String() + + err := utils.ExecVoteOutbound( + t, + ctx, + app, + vals[0], + coreAcc, + utxId, + outbound, + true, + "", + ) + require.NoError(t, err) + + err = utils.ExecVoteOutbound( + t, + ctx, + app, + vals[0], + coreAcc, + utxId, + outbound, + true, + "", + ) + require.Error(t, err) + require.Contains(t, err.Error(), "already voted") + }) + + t.Run("vote after outbound finalized fails", func(t *testing.T) { + app, ctx, vals, utxId, outbound, coreVals := + setupOutboundVotingTest(t, 4) + + // finalize + for i := 0; i < 3; i++ { + valAddr, _ := sdk.ValAddressFromBech32(coreVals[i].OperatorAddress) + coreAcc := sdk.AccAddress(valAddr).String() + + err := utils.ExecVoteOutbound( + t, + ctx, + app, + vals[i], + coreAcc, + utxId, + outbound, + true, + "", + ) + require.NoError(t, err) + } + + // extra vote + valAddr, _ := sdk.ValAddressFromBech32(coreVals[3].OperatorAddress) + coreAcc := sdk.AccAddress(valAddr).String() + + err := utils.ExecVoteOutbound( + t, + ctx, + app, + vals[3], + coreAcc, + utxId, + outbound, + true, + "", + ) + require.Error(t, err) + require.Contains(t, err.Error(), "already finalized") + }) + + t.Run("outbound failure triggers revert execution", func(t *testing.T) { + app, ctx, vals, utxId, outbound, coreVals := + setupOutboundVotingTest(t, 4) + + // Reach quorum with FAILED observation + for i := 0; i < 3; i++ { + valAddr, _ := sdk.ValAddressFromBech32(coreVals[i].OperatorAddress) + coreAcc := sdk.AccAddress(valAddr).String() + + err := utils.ExecVoteOutbound( + t, + ctx, + app, + vals[i], + coreAcc, + utxId, + outbound, + false, + "execution reverted", // revert reason + ) + require.NoError(t, err) + } + + utx, _, err := app.UexecutorKeeper.GetUniversalTx(ctx, utxId) + require.NoError(t, err) + + fmt.Println(utx) + + ob := utx.OutboundTx[0] + + require.Equal(t, uexecutortypes.Status_REVERTED, ob.OutboundStatus) + require.NotNil(t, ob.PcRevertExecution) + + pc := ob.PcRevertExecution + require.Equal(t, "SUCCESS", pc.Status) + require.NotEmpty(t, pc.TxHash) + }) + + t.Run("revert recipient defaults to sender when revert instructions missing", func(t *testing.T) { + app, ctx, vals, utxId, outbound, coreVals := + setupOutboundVotingTest(t, 4) + + // explicitly remove revert instructions + outbound.RevertInstructions = nil + + for i := 0; i < 3; i++ { + valAddr, _ := sdk.ValAddressFromBech32(coreVals[i].OperatorAddress) + coreAcc := sdk.AccAddress(valAddr).String() + + err := utils.ExecVoteOutbound( + t, + ctx, + app, + vals[i], + coreAcc, + utxId, + outbound, + false, + "failed", + ) + require.NoError(t, err) + } + + utx, _, err := app.UexecutorKeeper.GetUniversalTx(ctx, utxId) + require.NoError(t, err) + + ob := utx.OutboundTx[0] + + require.Equal(t, uexecutortypes.Status_REVERTED, ob.OutboundStatus) + require.Equal(t, outbound.Sender, ob.PcRevertExecution.Sender) + }) + +} diff --git a/test/utils/bytecode.go b/test/utils/bytecode.go index daebaa07..7e8177be 100644 --- a/test/utils/bytecode.go +++ b/test/utils/bytecode.go @@ -10,6 +10,8 @@ const HANDLER_CONTRACT_BYTECODE = "6080604052600436106102ae575f3560e01c80638456c const PRC20_CREATION_BYTECODE = "608060405234801561000f575f80fd5b50600436106101a5575f3560e01c806374be2150116100e8578063c701262611610093578063eddeb1231161006e578063eddeb12314610457578063f687d12a1461046a578063f97c007a1461047d578063fc5fecd514610486575f80fd5b8063c7012626146103cb578063d9eeebed146103de578063dd62ed3e14610412575f80fd5b8063b84c8246116100c3578063b84c82461461037e578063c47f002714610391578063c6f1b7e7146103a4575f80fd5b806374be21501461033c57806395d89b4114610363578063a9059cbb1461036b575f80fd5b806323b872dd1161015357806347e7ef241161012e57806347e7ef24146102a1578063609c92b8146102b4578063701cd43b146102e857806370a0823114610307575f80fd5b806323b872dd14610266578063313ce5671461027957806342966c681461028e575f80fd5b8063091d278811610183578063091d278814610224578063095ea7b31461023b57806318160ddd1461025e575f80fd5b8063044d9371146101a957806306fdde03146101fa57806307e2bd8d1461020f575b5f80fd5b6101d07f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b610202610499565b6040516101f1919061143c565b61022261021d366004611479565b610529565b005b61022d60015481565b6040519081526020016101f1565b61024e610249366004611494565b6105ef565b60405190151581526020016101f1565b60065461022d565b61024e6102743660046114be565b6106ae565b60055460405160ff90911681526020016101f1565b61024e61029c3660046114fc565b61079b565b61024e6102af366004611494565b6107ae565b6102db7f000000000000000000000000000000000000000000000000000000000000000081565b6040516101f19190611513565b5f546101d09073ffffffffffffffffffffffffffffffffffffffff1681565b61022d610315366004611479565b73ffffffffffffffffffffffffffffffffffffffff165f9081526007602052604090205490565b61022d7f000000000000000000000000000000000000000000000000000000000000000081565b610202610879565b61024e610379366004611494565b610888565b61022261038c36600461157f565b61089d565b61022261039f36600461157f565b61091c565b6101d07f000000000000000000000000000000000000000000000000000000000000000081565b61024e6103d936600461166f565b610997565b6103e6610af9565b6040805173ffffffffffffffffffffffffffffffffffffffff90931683526020830191909152016101f1565b61022d6104203660046116e1565b73ffffffffffffffffffffffffffffffffffffffff9182165f90815260086020908152604080832093909416825291909152205490565b6102226104653660046114fc565b610d04565b6102226104783660046114fc565b610da8565b61022d60025481565b6103e66104943660046114fc565b610e4c565b6060600380546104a890611718565b80601f01602080910402602001604051908101604052809291908181526020018280546104d490611718565b801561051f5780601f106104f65761010080835404028352916020019161051f565b820191905f5260205f20905b81548152906001019060200180831161050257829003601f168201915b5050505050905090565b73ffffffffffffffffffffffffffffffffffffffff8116610576576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83169081179091556040519081527f412d5a95dc32cbb6bd9319bccf1bc1febeda71e734893a440f1f6853252fe99f906020015b60405180910390a150565b5f73ffffffffffffffffffffffffffffffffffffffff831661063d576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b335f81815260086020908152604080832073ffffffffffffffffffffffffffffffffffffffff881680855290835292819020869055518581529192917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925910160405180910390a35060015b92915050565b5f6106ba848484611055565b73ffffffffffffffffffffffffffffffffffffffff84165f90815260086020908152604080832033845290915290205482811015610724576040517f10bad14700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff85165f81815260086020908152604080832033808552908352928190208786039081905590519081529192917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925910160405180910390a3506001949350505050565b5f6107a6338361119c565b506001919050565b5f6107b983836112ed565b6040517fffffffffffffffffffffffffffffffffffffffff0000000000000000000000007f000000000000000000000000000000000000000000000000000000000000000060601b1660208201527f67fc7bdaed5b0ec550d8706b87d60568ab70c6b781263c70101d54cd1564aab390603401604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290526108689186908690611769565b60405180910390a150600192915050565b6060600480546104a890611718565b5f610894338484611055565b50600192915050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461090c576040517f6626eaef00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600461091882826117ef565b5050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461098b576040517f6626eaef00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600361091882826117ef565b5f805f6109a2610af9565b6040517f23b872dd00000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000081166024830152604482018390529294509092505f918416906323b872dd906064016020604051808303815f875af1158015610a42573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610a669190611906565b905080610a9f576040517f0a7cd6d600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610aa9338661119c565b7f9ffbffc04a397460ee1dbe8c9503e098090567d6b7f4b3c02a8617d800b6d9553388888886600254604051610ae496959493929190611925565b60405180910390a15060019695505050505050565b5f80546040517f7471e6970000000000000000000000000000000000000000000000000000000081527f00000000000000000000000000000000000000000000000000000000000000006004820152829173ffffffffffffffffffffffffffffffffffffffff1690637471e69790602401602060405180830381865afa158015610b85573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610ba991906119a5565b915073ffffffffffffffffffffffffffffffffffffffff8216610bf8576040517f3d5729c100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f80546040517fd7fd7afb0000000000000000000000000000000000000000000000000000000081527f0000000000000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff9091169063d7fd7afb90602401602060405180830381865afa158015610c84573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610ca891906119c0565b9050805f03610ce3576040517fe661aed000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600254600154610cf39083611a04565b610cfd9190611a1b565b9150509091565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610d73576040517f6626eaef00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60028190556040518181527fef13af88e424b5d15f49c77758542c1938b08b8b95b91ed0751f98ba99000d8f906020016105e4565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610e17576040517f6626eaef00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60018190556040518181527fff5788270f43bfc1ca41c503606d2594aa3023a1a7547de403a3e2f146a4a80a906020016105e4565b5f80546040517f7471e6970000000000000000000000000000000000000000000000000000000081527f00000000000000000000000000000000000000000000000000000000000000006004820152829173ffffffffffffffffffffffffffffffffffffffff1690637471e69790602401602060405180830381865afa158015610ed8573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610efc91906119a5565b915073ffffffffffffffffffffffffffffffffffffffff8216610f4b576040517f3d5729c100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f80546040517fd7fd7afb0000000000000000000000000000000000000000000000000000000081527f0000000000000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff9091169063d7fd7afb90602401602060405180830381865afa158015610fd7573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610ffb91906119c0565b9050805f03611036576040517fe661aed000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6002546110438583611a04565b61104d9190611a1b565b915050915091565b73ffffffffffffffffffffffffffffffffffffffff8316158061108c575073ffffffffffffffffffffffffffffffffffffffff8216155b156110c3576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83165f9081526007602052604090205481811015611122576040517ffe382aa700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8085165f8181526007602052604080822086860390559286168082529083902080548601905591517fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9061118e9086815260200190565b60405180910390a350505050565b73ffffffffffffffffffffffffffffffffffffffff82166111e9576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805f03611222576040517f1f2a200500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82165f9081526007602052604090205481811015611281576040517ffe382aa700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83165f8181526007602090815260408083208686039055600680548790039055518581529192917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3505050565b73ffffffffffffffffffffffffffffffffffffffff821661133a576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805f03611373576040517f1f2a200500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600680548201905573ffffffffffffffffffffffffffffffffffffffff82165f818152600760209081526040808320805486019055518481527fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a35050565b5f81518084525f5b818110156113ff576020818501810151868301820152016113e3565b505f6020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081525f61144e60208301846113db565b9392505050565b73ffffffffffffffffffffffffffffffffffffffff81168114611476575f80fd5b50565b5f60208284031215611489575f80fd5b813561144e81611455565b5f80604083850312156114a5575f80fd5b82356114b081611455565b946020939093013593505050565b5f805f606084860312156114d0575f80fd5b83356114db81611455565b925060208401356114eb81611455565b929592945050506040919091013590565b5f6020828403121561150c575f80fd5b5035919050565b602081016003831061154c577f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b91905290565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f6020828403121561158f575f80fd5b813567ffffffffffffffff8111156115a5575f80fd5b8201601f810184136115b5575f80fd5b803567ffffffffffffffff8111156115cf576115cf611552565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561163b5761163b611552565b604052818152828201602001861015611652575f80fd5b816020840160208301375f91810160200191909152949350505050565b5f805f60408486031215611681575f80fd5b833567ffffffffffffffff811115611697575f80fd5b8401601f810186136116a7575f80fd5b803567ffffffffffffffff8111156116bd575f80fd5b8660208284010111156116ce575f80fd5b6020918201979096509401359392505050565b5f80604083850312156116f2575f80fd5b82356116fd81611455565b9150602083013561170d81611455565b809150509250929050565b600181811c9082168061172c57607f821691505b602082108103611763577f4e487b71000000000000000000000000000000000000000000000000000000005f52602260045260245ffd5b50919050565b606081525f61177b60608301866113db565b73ffffffffffffffffffffffffffffffffffffffff9490941660208301525060400152919050565b601f8211156117ea57805f5260205f20601f840160051c810160208510156117c85750805b601f840160051c820191505b818110156117e7575f81556001016117d4565b50505b505050565b815167ffffffffffffffff81111561180957611809611552565b61181d816118178454611718565b846117a3565b6020601f82116001811461186e575f83156118385750848201515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600385901b1c1916600184901b1784556117e7565b5f848152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08516915b828110156118bb578785015182556020948501946001909201910161189b565b50848210156118f757868401517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b60f8161c191681555b50505050600190811b01905550565b5f60208284031215611916575f80fd5b8151801515811461144e575f80fd5b73ffffffffffffffffffffffffffffffffffffffff8716815260a060208201528460a0820152848660c08301375f60c086830101525f60c07fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8801168301019050846040830152836060830152826080830152979650505050505050565b5f602082840312156119b5575f80fd5b815161144e81611455565b5f602082840312156119d0575f80fd5b5051919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b80820281158282048414176106a8576106a86119d7565b808201808211156106a8576106a86119d756fea26469706673582212206be692aa215f21df823c52c689a11caa03254730bfade7b8b36788d6a72ba61764736f6c634300081a0033" +const UNIVERSAL_GATEWAY_PC_BYTECODE = "6080806040526004361015610012575f80fd5b5f3560e01c90816301ffc9a714610f6857508063248a9ca314610ef85780632f2ff15d14610e7d57806336568abe14610df55780633f4ba83a14610d1d5780635c975abb14610cbe5780637f57735014610c6e5780638456cb5914610b935780638e61856014610a6a57806391d14854146109d6578063a217fddf1461099e578063affed0e014610963578063b3ca1fbc1461058e578063c1ee135a1461053d578063d547741f146104bb578063e63ab1e9146104635763f8c8765e146100d7575f80fd5b3461045f5760807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f5761010e611047565b610116611024565b9060443573ffffffffffffffffffffffffffffffffffffffff811680910361045f576064359173ffffffffffffffffffffffffffffffffffffffff831680930361045f577ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00549360ff8560401c16159467ffffffffffffffff811680159081610457575b600114908161044d575b159081610444575b5061041c578560017fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000008316177ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00556103c7575b5073ffffffffffffffffffffffffffffffffffffffff82161580156103a9575b80156103a1575b8015610399575b61037157610281610287926102406117ef565b6102486117ef565b6102506117ef565b60017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f005561027c6117ef565b6112c6565b506113ad565b507fffffffffffffffffffffffff00000000000000000000000000000000000000005f5416175f557fffffffffffffffffffffffff000000000000000000000000000000000000000060015416176001556102de57005b7fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a0054167ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00557fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2602060405160018152a1005b7fd92e233d000000000000000000000000000000000000000000000000000000005f5260045ffd5b50831561022d565b508215610226565b5073ffffffffffffffffffffffffffffffffffffffff81161561021f565b7fffffffffffffffffffffffffffffffffffffffffffffff0000000000000000001668010000000000000001177ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00555f6101ff565b7ff92ee8a9000000000000000000000000000000000000000000000000000000005f5260045ffd5b9050155f6101ac565b303b1591506101a4565b87915061019a565b5f80fd5b3461045f575f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f5760206040517f65d7a28e3265b37a6474929f336521b332c1681b933f6cb9f3376673440d862a8152f35b3461045f5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f5761053b6004356104f8611024565b90610536610531825f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b626800602052600160405f20015490565b611240565b6115c3565b005b3461045f575f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f57602073ffffffffffffffffffffffffffffffffffffffff60015416604051908152f35b3461045f5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f5760043567ffffffffffffffff811161045f578060040160c07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc833603011261045f5761060761171f565b60027f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f00541461093b5761063981611772565b6040805191610648828461106a565b600f83527f6569703135353a313131353531313100000000000000000000000000000000006020840152600254946001860180871161090e576002556024810193610692856110ab565b9360448301359460848401936106a8858a6110cc565b97909a67ffffffffffffffff89116108e1578451956106ef601f8b017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0166020018861106a565b8987526020870197368e8c011161045f576107e36107da6107f99f8f9d60a499610807976107c273ffffffffffffffffffffffffffffffffffffffff9f61082f9f908f915f6020886107cb996107d29b8637830101525190209281519373ffffffffffffffffffffffffffffffffffffffff928593602085019733895216908401528d6060840152608083015260c060a083015261079060e083018c61111d565b9060c0830152037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0810183528261106a565b5190209f6110ab565b9d806110cc565b93909f6110cc565b979096016110ab565b9c87519e8f93610140855261014085019061111d565b92602081850391015261117a565b938b0152600160608b015261520860808b015261520860a08b015289830360c08b015261117a565b9461520860e08801521661010086015260048110156108b4577fd8352005a8b681f8cf230f28f60cef58ebe9a25d92e88f72cb6cb78799a6ca6d9173ffffffffffffffffffffffffffffffffffffffff9161012087015216938033940390a460017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f0055005b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b7f3ee5aeb5000000000000000000000000000000000000000000000000000000005f5260045ffd5b3461045f575f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f576020600254604051908152f35b3461045f575f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f5760206040515f8152f35b3461045f5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f57610a0d611024565b6004355f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205273ffffffffffffffffffffffffffffffffffffffff60405f2091165f52602052602060ff60405f2054166040519015158152f35b3461045f5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f57610aa1611047565b335f9081527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d602052604090205460ff1615610b635773ffffffffffffffffffffffffffffffffffffffff90610af561171f565b1680156103715773ffffffffffffffffffffffffffffffffffffffff600154827fffffffffffffffffffffffff0000000000000000000000000000000000000000821617600155167fd0ef78509e8ed82196200827f0d10672cfab667994f990456881f413c1c475eb5f80a3005b7fe2517d3f000000000000000000000000000000000000000000000000000000005f52336004525f60245260445ffd5b3461045f575f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f57610bc96111b8565b610bd161171f565b610bd961171f565b60017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f033005416177fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f03300557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2586020604051338152a1005b3461045f575f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f57602073ffffffffffffffffffffffffffffffffffffffff5f5416604051908152f35b3461045f575f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f57602060ff7fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f0330054166040519015158152f35b3461045f575f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f57610d536111b8565b610d5b6116cb565b610d636116cb565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f0330054167fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f03300557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa6020604051338152a1005b3461045f5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f57610e2c611024565b3373ffffffffffffffffffffffffffffffffffffffff821603610e555761053b906004356115c3565b7f6697b232000000000000000000000000000000000000000000000000000000005f5260045ffd5b3461045f5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f5761053b600435610eba611024565b90610ef3610531825f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b626800602052600160405f20015490565b6114b1565b3461045f5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f576020610f606004355f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b626800602052600160405f20015490565b604051908152f35b3461045f5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261045f57600435907fffffffff00000000000000000000000000000000000000000000000000000000821680920361045f57817f7965db0b0000000000000000000000000000000000000000000000000000000060209314908115610ffa575b5015158152f35b7f01ffc9a70000000000000000000000000000000000000000000000000000000091501483610ff3565b6024359073ffffffffffffffffffffffffffffffffffffffff8216820361045f57565b6004359073ffffffffffffffffffffffffffffffffffffffff8216820361045f57565b90601f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0910116810190811067ffffffffffffffff8211176108e157604052565b3573ffffffffffffffffffffffffffffffffffffffff8116810361045f5790565b9035907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18136030182121561045f570180359067ffffffffffffffff821161045f5760200191813603831361045f57565b91908251928382525f5b8481106111655750507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f845f6020809697860101520116010190565b80602080928401015182828601015201611127565b601f82602094937fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe093818652868601375f8582860101520116010190565b335f9081527f75442b0a96088b5456bc4ed01394c96a4feec0f883c9494257d76b96ab1c9b6b602052604090205460ff16156111f057565b7fe2517d3f000000000000000000000000000000000000000000000000000000005f52336004527f65d7a28e3265b37a6474929f336521b332c1681b933f6cb9f3376673440d862a60245260445ffd5b805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156112975750565b7fe2517d3f000000000000000000000000000000000000000000000000000000005f523360045260245260445ffd5b73ffffffffffffffffffffffffffffffffffffffff81165f9081527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d602052604090205460ff166113a85773ffffffffffffffffffffffffffffffffffffffff165f8181527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d6020526040812080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790553391907f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d8180a4600190565b505f90565b73ffffffffffffffffffffffffffffffffffffffff81165f9081527f75442b0a96088b5456bc4ed01394c96a4feec0f883c9494257d76b96ab1c9b6b602052604090205460ff166113a85773ffffffffffffffffffffffffffffffffffffffff165f8181527f75442b0a96088b5456bc4ed01394c96a4feec0f883c9494257d76b96ab1c9b6b6020526040812080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790553391907f65d7a28e3265b37a6474929f336521b332c1681b933f6cb9f3376673440d862a907f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d9080a4600190565b805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260ff60405f205416155f146115bd57805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260405f2060017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082541617905573ffffffffffffffffffffffffffffffffffffffff339216907f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d5f80a4600190565b50505f90565b805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260ff60405f2054165f146115bd57805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260405f207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00815416905573ffffffffffffffffffffffffffffffffffffffff339216907ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b5f80a4600190565b60ff7fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f033005416156116f757565b7f8dfc202b000000000000000000000000000000000000000000000000000000005f5260045ffd5b60ff7fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f03300541661174a57565b7fd93c0665000000000000000000000000000000000000000000000000000000005f5260045ffd5b61177f60808201826110cc565b15801592604001351591506117e85781806117e0575b6117d957816117d1575b506117cc577fb4fa3fb3000000000000000000000000000000000000000000000000000000005f5260045ffd5b600190565b90505f61179f565b5050600390565b508015611795565b5050600290565b60ff7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a005460401c161561181e57565b7fd7e6bcf8000000000000000000000000000000000000000000000000000000005f5260045ffdfea26469706673582212206796924c3fa73cff81856f96b0134a35adc90b65de0eff7f3363998084206a5064736f6c634300081a0033" + const UEA_MIGRATION_BYTECODE = "608060405234801561000f575f80fd5b506004361061006f575f3560e01c80639538c4b31161004d5780639538c4b3146100e7578063a5df53a01461010b578063f6829c321461012a575f80fd5b806352fa5c221461007357806367f102111461007d5780638c9ec7f5146100c7575b5f80fd5b61007b610132565b005b60025461009d9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b60015461009d9073ffffffffffffffffffffffffffffffffffffffff1681565b6100fb6100f5366004610248565b3b151590565b60405190151581526020016100be565b5f5461009d9073ffffffffffffffffffffffffffffffffffffffff1681565b61007b61018a565b610e077f868a771a75a4aa6c2be13e9a9617cb8ea240ed84a3a90c8469537393ec3e115d81815560405190919081907f310ba5f1d2ed074b51e2eccd052a47ae9ab7c6b800d1fca3db3999d6a592ca03905f90a25050565b5f5473ffffffffffffffffffffffffffffffffffffffff1630036101da576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60025473ffffffffffffffffffffffffffffffffffffffff167f868a771a75a4aa6c2be13e9a9617cb8ea240ed84a3a90c8469537393ec3e115d81815560405190919081907f310ba5f1d2ed074b51e2eccd052a47ae9ab7c6b800d1fca3db3999d6a592ca03905f90a25050565b5f60208284031215610258575f80fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461027b575f80fd5b939250505056fea2646970667358221220b7392b390eabe1d8398ca6a63c585fe969825199b6bfe2a285e4edf564c16b6a64736f6c634300081a0033" const ABI_MIGRATION = `[ diff --git a/test/utils/constants.go b/test/utils/constants.go index 1b27cc23..b2896ae2 100644 --- a/test/utils/constants.go +++ b/test/utils/constants.go @@ -8,15 +8,17 @@ const MintModule string = "mint" type Addresses struct { // Contract addresses - FactoryAddr common.Address - UEProxyAddr common.Address - EVMImplAddr common.Address - SVMImplAddr common.Address - NewEVMImplAddr common.Address - NewSVMImplAddr common.Address - HandlerAddr common.Address - PRC20USDCAddr common.Address - MigratedUEAAddr common.Address + FactoryAddr common.Address + UEProxyAddr common.Address + EVMImplAddr common.Address + SVMImplAddr common.Address + NewEVMImplAddr common.Address + NewSVMImplAddr common.Address + MigratedUEAAddr common.Address + HandlerAddr common.Address + PRC20USDCAddr common.Address + ExternalUSDCAddr common.Address + UniversalGatewayPCAddr common.Address // Account addresses (hex format) DefaultTestAddr string @@ -31,19 +33,21 @@ type TestConfig struct { func GetDefaultAddresses() Addresses { return Addresses{ - FactoryAddr: common.HexToAddress("0x00000000000000000000000000000000000000ea"), - UEProxyAddr: common.HexToAddress("0x0000000000000000000000000000000000000e09"), - EVMImplAddr: common.HexToAddress("0x0000000000000000000000000000000000000e01"), - SVMImplAddr: common.HexToAddress("0x0000000000000000000000000000000000000e03"), - NewEVMImplAddr: common.HexToAddress("0x0000000000000000000000000000000000000e07"), - NewSVMImplAddr: common.HexToAddress("0x0000000000000000000000000000000000000e05"), - HandlerAddr: common.HexToAddress("0x00000000000000000000000000000000000000C0"), - PRC20USDCAddr: common.HexToAddress("0x0000000000000000000000000000000000000e06"), - MigratedUEAAddr: common.HexToAddress("0x0000000000000000000000000000000000000d08"), - DefaultTestAddr: "0x778d3206374f8ac265728e18e3fe2ae6b93e4ce4", - CosmosTestAddr: "cosmos18pjnzwr9xdnx2vnpv5mxywfnv56xxef5cludl5", - TargetAddr: "\x86i\xbe\xd1!\xfe\xfa=\x9c\xf2\x82\x12s\xf4\x89\xe7\x17̩]", - TargetAddr2: "0x527F3692F5C53CfA83F7689885995606F93b6164", + FactoryAddr: common.HexToAddress("0x00000000000000000000000000000000000000ea"), + UEProxyAddr: common.HexToAddress("0x0000000000000000000000000000000000000e09"), + EVMImplAddr: common.HexToAddress("0x0000000000000000000000000000000000000e01"), + SVMImplAddr: common.HexToAddress("0x0000000000000000000000000000000000000e03"), + NewEVMImplAddr: common.HexToAddress("0x0000000000000000000000000000000000000e08"), + NewSVMImplAddr: common.HexToAddress("0x0000000000000000000000000000000000000e05"), + HandlerAddr: common.HexToAddress("0x00000000000000000000000000000000000000C0"), + PRC20USDCAddr: common.HexToAddress("0x0000000000000000000000000000000000000e06"), + ExternalUSDCAddr: common.HexToAddress("0x0000000000000000000000000000000000000e07"), + UniversalGatewayPCAddr: common.HexToAddress("0x00000000000000000000000000000000000000B0"), + MigratedUEAAddr: common.HexToAddress("0x0000000000000000000000000000000000000d08"), + DefaultTestAddr: "0x778d3206374f8ac265728e18e3fe2ae6b93e4ce4", + CosmosTestAddr: "cosmos18pjnzwr9xdnx2vnpv5mxywfnv56xxef5cludl5", + TargetAddr: "\x86i\xbe\xd1!\xfe\xfa=\x9c\xf2\x82\x12s\xf4\x89\xe7\x17̩]", + TargetAddr2: "0x527F3692F5C53CfA83F7689885995606F93b6164", } } diff --git a/test/utils/contracts_setup.go b/test/utils/contracts_setup.go index 2cc86a97..71374279 100644 --- a/test/utils/contracts_setup.go +++ b/test/utils/contracts_setup.go @@ -52,6 +52,10 @@ func setupUESystem( err = setupPrc20Contract(t, app, ctx, prc20ABI, opts, accounts) require.NoError(t, err) + // setup UniversalGatewayPC + err = setupUniversalGatewayPC(t, app, ctx, prc20ABI, opts) + require.NoError(t, err) + return nil } @@ -311,3 +315,67 @@ func DeployContract( return contractAddr } + +// --------------------------------------------------------------------------------------- +// NOTE: The UniversalGatewayPC contract deployed here is a TEST-ONLY version. +// +// The withdraw() and withdrawAndExecute() functions inside this test contract: +// +// - DO NOT run validation (_validateCommon) +// - DO NOT compute gas fees via UniversalCore +// - DO NOT pull PRC20 fees into VaultPC +// - DO NOT burn PRC20 tokens +// - DO NOT interact with any external contracts +// +// Instead, both functions simply **emit UniversalTxWithdraw with hardcoded values**: +// +// chainId = "eip155:11155111" +// gasToken = fixed test address +// gasFee = 111 +// +// This behavior is intentional because Cosmos integration tests only need to verify: +// - ABI correctness +// - Event emission structure +// - Outbound pipeline handling +// - UE/UEM processing logic on the Cosmos side +func setupUniversalGatewayPC( + t *testing.T, + app *app.ChainApp, + ctx sdk.Context, + gatewayABI abi.ABI, + opts AppSetupOptions, +) error { + + gatewayAddr := opts.Addresses.UniversalGatewayPCAddr + universalCoreAddr := opts.Addresses.HandlerAddr + vaultPCAddr := opts.Addresses.EVMImplAddr + + // 1. Deploy bytecode of UniversalGatewayPC at reserved address + _ = DeployContract( + t, + app, + ctx, + gatewayAddr, + UNIVERSAL_GATEWAY_PC_BYTECODE, + ) + + // 2. Manually set storage because initialize() cannot be used + // + // Storage layout: + // slot 0 → UNIVERSAL_CORE (address) + // slot 1 → VAULT_PC (address) + app.EVMKeeper.SetState( + ctx, + gatewayAddr, + common.BigToHash(big.NewInt(0)), // key + common.LeftPadBytes(universalCoreAddr.Bytes(), 32), // value []byte + ) + + app.EVMKeeper.SetState( + ctx, + gatewayAddr, + common.BigToHash(big.NewInt(1)), // key + common.LeftPadBytes(vaultPCAddr.Bytes(), 32), // value []byte + ) + return nil +} diff --git a/test/utils/helpers.go b/test/utils/helpers.go index 83c45c7a..4472bfff 100644 --- a/test/utils/helpers.go +++ b/test/utils/helpers.go @@ -1,10 +1,12 @@ package utils import ( + "fmt" "testing" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/pushchain/push-chain-node/app" + "github.com/stretchr/testify/require" uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" @@ -37,6 +39,45 @@ func ExecVoteInbound( return err } +func ExecVoteOutbound( + t *testing.T, + ctx sdk.Context, + app *app.ChainApp, + universalAddr string, // universal validator (grantee) + coreValAddr string, // core validator (signer) + utxId string, // universal tx id + outbound *uexecutortypes.OutboundTx, + success bool, + errorMsg string, +) error { + t.Helper() + + // Encode the real outbound tx_id (this is what validators vote on) + txIDHex, err := uexecutortypes.EncodeOutboundTxIDHex(utxId, outbound.Id) + require.NoError(t, err) + + observed := &uexecutortypes.OutboundObservation{ + Success: success, + ErrorMsg: errorMsg, + TxHash: fmt.Sprintf("0xobserved-%s", outbound.Id), + BlockHeight: 1, + } + + msg := &uexecutortypes.MsgVoteOutbound{ + Signer: coreValAddr, + TxId: txIDHex, + ObservedTx: observed, + } + + execMsg := authz.NewMsgExec( + sdk.MustAccAddressFromBech32(universalAddr), + []sdk.Msg{msg}, + ) + + _, err = app.AuthzKeeper.Exec(ctx, &execMsg) + return err +} + // ExecVoteGasPrice executes a MsgVoteGasPrice on behalf of the core validator // through the universal validator using authz Exec. func ExecVoteGasPrice( diff --git a/test/utils/setup_app.go b/test/utils/setup_app.go index 2c832db5..238e89c0 100644 --- a/test/utils/setup_app.go +++ b/test/utils/setup_app.go @@ -37,6 +37,8 @@ func SetAppWithValidators(t *testing.T) (*app.ChainApp, sdk.Context, sdk.Account ctx := app.BaseApp.NewContext(true) + ctx = ctx.WithChainID("push_42101-1") + // start with block height 1 ctx = ctx.WithBlockHeight(1) @@ -64,6 +66,8 @@ func SetAppWithMultipleValidators(t *testing.T, numVals int) (*app.ChainApp, sdk ctx := app.BaseApp.NewContext(true) + ctx = ctx.WithChainID("push_42101-1") + // start with block height 1 ctx = ctx.WithBlockHeight(1) diff --git a/universalClient/api/handlers.go b/universalClient/api/handlers.go index c1245742..7a594e62 100644 --- a/universalClient/api/handlers.go +++ b/universalClient/api/handlers.go @@ -1,29 +1,9 @@ package api -import ( - "encoding/json" - "net/http" -) +import "net/http" // handleHealth handles GET /health func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Write([]byte("OK")) } - -// handleChainConfigs handles GET /api/v1/chain-configs -func (s *Server) handleChainData(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - configs := s.client.GetAllChainData() - - response := queryResponse{ - Data: configs, - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(response) -} diff --git a/universalClient/api/handlers_test.go b/universalClient/api/handlers_test.go index 1af7d642..197c0cb0 100644 --- a/universalClient/api/handlers_test.go +++ b/universalClient/api/handlers_test.go @@ -1,21 +1,17 @@ package api import ( - "encoding/json" "net/http" "net/http/httptest" "testing" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestHandleHealth(t *testing.T) { logger := zerolog.New(zerolog.NewTestWriter(t)) - mockClient := NewMockUniversalClient() server := &Server{ - client: mockClient, logger: logger, } @@ -29,44 +25,3 @@ func TestHandleHealth(t *testing.T) { assert.Equal(t, "OK", w.Body.String()) }) } - -func TestHandleChainData(t *testing.T) { - logger := zerolog.New(zerolog.NewTestWriter(t)) - mockClient := NewMockUniversalClient() - server := &Server{ - client: mockClient, - logger: logger, - } - - t.Run("GET chain data success", func(t *testing.T) { - req := httptest.NewRequest(http.MethodGet, "/api/v1/chain-data", nil) - w := httptest.NewRecorder() - - server.handleChainData(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "application/json", w.Header().Get("Content-Type")) - - var response map[string]interface{} - err := json.Unmarshal(w.Body.Bytes(), &response) - require.NoError(t, err) - - // Check that we have data field - assert.Contains(t, response, "data") - - // Verify the data is an array - data, ok := response["data"].([]interface{}) - assert.True(t, ok) - assert.Len(t, data, 2) - }) - - t.Run("Non-GET method not allowed", func(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/v1/chain-data", nil) - w := httptest.NewRecorder() - - server.handleChainData(w, req) - - assert.Equal(t, http.StatusMethodNotAllowed, w.Code) - assert.Contains(t, w.Body.String(), "Method not allowed") - }) -} \ No newline at end of file diff --git a/universalClient/api/routes.go b/universalClient/api/routes.go index ac636621..75e9ffa7 100644 --- a/universalClient/api/routes.go +++ b/universalClient/api/routes.go @@ -9,8 +9,5 @@ func (s *Server) setupRoutes() *http.ServeMux { // Health check endpoint mux.HandleFunc("/health", s.handleHealth) - // API v1 endpoints - mux.HandleFunc("/api/v1/chain-configs", s.handleChainData) - return mux } diff --git a/universalClient/api/routes_test.go b/universalClient/api/routes_test.go index f644a265..63a4e2e1 100644 --- a/universalClient/api/routes_test.go +++ b/universalClient/api/routes_test.go @@ -11,14 +11,12 @@ import ( func TestSetupRoutes(t *testing.T) { logger := zerolog.New(zerolog.NewTestWriter(t)) - mockClient := NewMockUniversalClient() server := &Server{ - client: mockClient, logger: logger, } - + mux := server.setupRoutes() - + // Test that all routes are registered correctly testCases := []struct { name string @@ -30,26 +28,21 @@ func TestSetupRoutes(t *testing.T) { path: "/health", expectedStatus: http.StatusOK, }, - { - name: "Chain configs endpoint", - path: "/api/v1/chain-configs", - expectedStatus: http.StatusOK, - }, { name: "Non-existent endpoint", path: "/api/v1/non-existent", expectedStatus: http.StatusNotFound, }, } - + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { req := httptest.NewRequest(http.MethodGet, tc.path, nil) w := httptest.NewRecorder() - + mux.ServeHTTP(w, req) - + assert.Equal(t, tc.expectedStatus, w.Code) }) } -} \ No newline at end of file +} diff --git a/universalClient/api/server.go b/universalClient/api/server.go index f358ec52..a98a496c 100644 --- a/universalClient/api/server.go +++ b/universalClient/api/server.go @@ -9,17 +9,15 @@ import ( "github.com/rs/zerolog" ) -// Server provides HTTP endpoints for querying configuration data +// Server provides HTTP endpoints type Server struct { - client universalClientInterface logger zerolog.Logger server *http.Server } // NewServer creates a new Server instance -func NewServer(client universalClientInterface, logger zerolog.Logger, port int) *Server { +func NewServer(logger zerolog.Logger, port int) *Server { s := &Server{ - client: client, logger: logger, } diff --git a/universalClient/api/server_test.go b/universalClient/api/server_test.go index b95f5214..65c44b0b 100644 --- a/universalClient/api/server_test.go +++ b/universalClient/api/server_test.go @@ -1,8 +1,6 @@ package api import ( - "encoding/json" - "io" "net/http" "net/http/httptest" "testing" @@ -11,137 +9,21 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/pushchain/push-chain-node/universalClient/cache" - uregistrytypes "github.com/pushchain/push-chain-node/x/uregistry/types" ) -// MockUniversalClient implements UniversalClientInterface for testing -type MockUniversalClient struct { - chainConfigs []*uregistrytypes.ChainConfig - tokenConfigs []*uregistrytypes.TokenConfig - lastUpdate time.Time - forceUpdateErr error - forceUpdateCalled bool -} - -func NewMockUniversalClient() *MockUniversalClient { - enabled := &uregistrytypes.ChainEnabled{ - IsInboundEnabled: true, - IsOutboundEnabled: true, - } - return &MockUniversalClient{ - chainConfigs: []*uregistrytypes.ChainConfig{ - { - Chain: "eip155:1", - VmType: uregistrytypes.VmType_EVM, - GatewayAddress: "0x123", - Enabled: enabled, - }, - { - Chain: "solana:mainnet", - VmType: uregistrytypes.VmType_SVM, - GatewayAddress: "11111111111111111111111111111111", - Enabled: enabled, - }, - }, - tokenConfigs: []*uregistrytypes.TokenConfig{ - { - Chain: "eip155:1", - Address: "0xAAA", - Symbol: "USDT", - Decimals: 6, - }, - { - Chain: "eip155:1", - Address: "0xBBB", - Symbol: "USDC", - Decimals: 6, - }, - { - Chain: "solana:mainnet", - Address: "So11111111111111111111111111111111111111112", - Symbol: "SOL", - Decimals: 9, - }, - }, - lastUpdate: time.Now(), - } -} - -func (m *MockUniversalClient) GetAllChainConfigs() []*uregistrytypes.ChainConfig { - return m.chainConfigs -} - -func (m *MockUniversalClient) GetAllTokenConfigs() []*uregistrytypes.TokenConfig { - return m.tokenConfigs -} - -func (m *MockUniversalClient) GetTokenConfigsByChain(chain string) []*uregistrytypes.TokenConfig { - configs := []*uregistrytypes.TokenConfig{} - for _, tc := range m.tokenConfigs { - if tc.Chain == chain { - configs = append(configs, tc) - } - } - return configs -} - -func (m *MockUniversalClient) GetTokenConfig(chain, address string) *uregistrytypes.TokenConfig { - for _, tc := range m.tokenConfigs { - if tc.Chain == chain && tc.Address == address { - return tc - } - } - return nil -} - -func (m *MockUniversalClient) GetCacheLastUpdate() time.Time { - return m.lastUpdate -} - -func (m *MockUniversalClient) GetChainConfig(chain string) *uregistrytypes.ChainConfig { - for _, cfg := range m.chainConfigs { - if cfg.Chain == chain { - return cfg - } - } - return nil -} - -func (m *MockUniversalClient) ForceConfigUpdate() error { - m.forceUpdateCalled = true - return m.forceUpdateErr -} - -// GetAllChainData implements universalClientInterface -func (m *MockUniversalClient) GetAllChainData() []*cache.ChainData { - // Convert chainConfigs to ChainData format - data := make([]*cache.ChainData, len(m.chainConfigs)) - for i, cfg := range m.chainConfigs { - data[i] = &cache.ChainData{ - Config: cfg, - UpdatedAt: m.lastUpdate, - } - } - return data -} - func TestNewServer(t *testing.T) { logger := zerolog.New(zerolog.NewTestWriter(t)) - mockClient := NewMockUniversalClient() t.Run("Create server with valid config", func(t *testing.T) { - server := NewServer(mockClient, logger, 8080) + server := NewServer(logger, 8080) assert.NotNil(t, server) - assert.Equal(t, mockClient, server.client) assert.NotNil(t, server.server) assert.Equal(t, ":8080", server.server.Addr) }) t.Run("Create server with different port", func(t *testing.T) { - server := NewServer(mockClient, logger, 9090) + server := NewServer(logger, 9090) assert.NotNil(t, server) assert.Equal(t, ":9090", server.server.Addr) @@ -150,11 +32,10 @@ func TestNewServer(t *testing.T) { func TestServerStartStop(t *testing.T) { logger := zerolog.New(zerolog.NewTestWriter(t)) - mockClient := NewMockUniversalClient() t.Run("Start and stop server", func(t *testing.T) { // Use a random port to avoid conflicts - server := NewServer(mockClient, logger, 0) + server := NewServer(logger, 0) // Start server err := server.Start() @@ -163,17 +44,13 @@ func TestServerStartStop(t *testing.T) { // Give server time to start time.Sleep(200 * time.Millisecond) - // Verify server is running by making a health check request - // Note: We're using port 0, so we can't easily test the actual HTTP endpoint - // In a real test, we'd get the actual port from the listener - // Stop server err = server.Stop() assert.NoError(t, err) }) + t.Run("Start with nil server", func(t *testing.T) { server := &Server{ - client: mockClient, logger: logger, server: nil, } @@ -181,9 +58,9 @@ func TestServerStartStop(t *testing.T) { assert.Error(t, err) assert.Contains(t, err.Error(), "query server is nil") }) + t.Run("Stop with nil server", func(t *testing.T) { server := &Server{ - client: mockClient, logger: logger, server: nil, } @@ -194,11 +71,10 @@ func TestServerStartStop(t *testing.T) { func TestServerIntegration(t *testing.T) { logger := zerolog.New(zerolog.NewTestWriter(t)) - mockClient := NewMockUniversalClient() t.Run("Server lifecycle with HTTP client", func(t *testing.T) { // Create server on a specific port - server := NewServer(mockClient, logger, 18080) + server := NewServer(logger, 18080) // Start server err := server.Start() @@ -220,9 +96,7 @@ func TestServerIntegration(t *testing.T) { // Test handler functions directly using httptest func TestHealthHandler(t *testing.T) { logger := zerolog.New(zerolog.NewTestWriter(t)) - mockClient := NewMockUniversalClient() server := &Server{ - client: mockClient, logger: logger, } @@ -239,111 +113,3 @@ func TestHealthHandler(t *testing.T) { assert.Equal(t, "OK", w.Body.String()) }) } - -// TestGetAllChainConfigsHandler removed - handler doesn't exist - -// TestGetAllTokenConfigsHandler removed - handler doesn't exist - -func TestInvalidMethods(t *testing.T) { - logger := zerolog.New(zerolog.NewTestWriter(t)) - mockClient := NewMockUniversalClient() - server := &Server{ - client: mockClient, - logger: logger, - } - - tests := []struct { - name string - handler http.HandlerFunc - method string - expectedMsg string - }{ - { - name: "POST to health check", - handler: http.HandlerFunc(server.handleHealth), - method: http.MethodPost, - expectedMsg: "method not allowed", - }, - { - name: "DELETE to chain data", - handler: http.HandlerFunc(server.handleChainData), - method: http.MethodDelete, - expectedMsg: "method not allowed", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := httptest.NewRequest(tt.method, "/test", nil) - w := httptest.NewRecorder() - - tt.handler(w, req) - - // Most handlers should return 405 for wrong methods - // but implementation may vary - body, _ := io.ReadAll(w.Body) - t.Logf("Response: %d - %s", w.Code, string(body)) - }) - } -} - -// Table-driven tests for various scenarios -func TestAPIEndpointsTable(t *testing.T) { - logger := zerolog.New(zerolog.NewTestWriter(t)) - mockClient := NewMockUniversalClient() - server := &Server{ - client: mockClient, - logger: logger, - } - - tests := []struct { - name string - handler http.HandlerFunc - method string - url string - wantStatus int - checkBody func(t *testing.T, body []byte) - }{ - { - name: "health check", - handler: http.HandlerFunc(server.handleHealth), - method: http.MethodGet, - url: "/health", - wantStatus: http.StatusOK, - checkBody: func(t *testing.T, body []byte) { - // handleHealth returns plain "OK" - assert.Equal(t, "OK", string(body)) - }, - }, - { - name: "chain data", - handler: http.HandlerFunc(server.handleChainData), - method: http.MethodGet, - url: "/api/v1/chain-data", - wantStatus: http.StatusOK, - checkBody: func(t *testing.T, body []byte) { - var resp map[string]interface{} - err := json.Unmarshal(body, &resp) - require.NoError(t, err) - assert.Contains(t, resp, "data") - data, ok := resp["data"].([]interface{}) - require.True(t, ok) - assert.Len(t, data, 2) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := httptest.NewRequest(tt.method, tt.url, nil) - w := httptest.NewRecorder() - - tt.handler(w, req) - - assert.Equal(t, tt.wantStatus, w.Code) - if tt.checkBody != nil { - tt.checkBody(t, w.Body.Bytes()) - } - }) - } -} diff --git a/universalClient/api/types.go b/universalClient/api/types.go deleted file mode 100644 index 86b26fa1..00000000 --- a/universalClient/api/types.go +++ /dev/null @@ -1,17 +0,0 @@ -package api - -import "github.com/pushchain/push-chain-node/universalClient/cache" - -type universalClientInterface interface { - GetAllChainData() []*cache.ChainData -} - -// QueryResponse represents the standard query response format -type queryResponse struct { - Data interface{} `json:"data"` -} - -// ErrorResponse represents an error response -type errorResponse struct { - Error string `json:"error"` -} diff --git a/universalClient/authz/tx_signer.go b/universalClient/authz/tx_signer.go deleted file mode 100644 index f3db474d..00000000 --- a/universalClient/authz/tx_signer.go +++ /dev/null @@ -1,457 +0,0 @@ -package authz - -import ( - "context" - "fmt" - "regexp" - "slices" - "strconv" - "strings" - "sync" - - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/tx" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/tx/signing" - authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" - "github.com/cosmos/cosmos-sdk/x/authz" - "github.com/pushchain/push-chain-node/universalClient/constant" - "github.com/pushchain/push-chain-node/universalClient/keys" - "github.com/rs/zerolog" -) - -// TxSigner handles AuthZ transaction signing for Universal Validator -type TxSigner struct { - keys keys.UniversalValidatorKeys - clientCtx client.Context - txConfig client.TxConfig - log zerolog.Logger - sequenceMutex sync.Mutex // Mutex to synchronize transaction signing - lastSequence uint64 // Track the last used sequence -} - -// NewTxSigner creates a new transaction signer -func NewTxSigner( - keys keys.UniversalValidatorKeys, - clientCtx client.Context, - log zerolog.Logger, -) *TxSigner { - return &TxSigner{ - keys: keys, - clientCtx: clientCtx, - txConfig: clientCtx.TxConfig, - log: log, - } -} - -// SignAndBroadcastAuthZTx signs and broadcasts an AuthZ transaction -func (ts *TxSigner) SignAndBroadcastAuthZTx( - ctx context.Context, - msgs []sdk.Msg, - memo string, - gasLimit uint64, - feeAmount sdk.Coins, -) (*sdk.TxResponse, error) { - // Lock to prevent concurrent sequence issues - ts.sequenceMutex.Lock() - defer ts.sequenceMutex.Unlock() - - ts.log.Info(). - Int("msg_count", len(msgs)). - Str("memo", memo). - Msg("Creating AuthZ transaction") - - // Wrap messages with AuthZ - authzMsgs, err := ts.wrapMessagesWithAuthZ(msgs) - if err != nil { - return nil, fmt.Errorf("failed to wrap messages with AuthZ: %w", err) - } - - // Try up to 3 times in case of sequence mismatch - maxAttempts := 3 - for attempt := 1; attempt <= maxAttempts; attempt++ { - // Create and sign transaction - txBuilder, err := ts.createTxBuilder(authzMsgs, memo, gasLimit, feeAmount) - if err != nil { - return nil, fmt.Errorf("failed to create tx builder: %w", err) - } - - // Sign the transaction with sequence management - if err := ts.signTxWithSequence(ctx, txBuilder); err != nil { - return nil, fmt.Errorf("failed to sign transaction: %w", err) - } - - // Encode transaction - txBytes, err := ts.clientCtx.TxConfig.TxEncoder()(txBuilder.GetTx()) - if err != nil { - return nil, fmt.Errorf("failed to encode transaction: %w", err) - } - - // Broadcast transaction - res, err := ts.broadcastTransaction(ctx, txBytes) - if err != nil { - // Check if error is due to sequence mismatch - if strings.Contains(err.Error(), "account sequence mismatch") && attempt < maxAttempts { - ts.log.Warn(). - Err(err). - Uint64("current_sequence", ts.lastSequence). - Int("attempt", attempt). - Msg("Sequence mismatch detected, forcing refresh and retrying") - // Force refresh sequence on next attempt - ts.lastSequence = 0 // This will force a refresh from chain - continue // Retry - } - // For other errors or final attempt, increment and return error - ts.lastSequence++ - ts.log.Debug(). - Uint64("new_sequence", ts.lastSequence). - Msg("Incremented sequence after broadcast error") - return nil, fmt.Errorf("failed to broadcast transaction: %w", err) - } - - // If chain responded with error code, handle sequence-mismatch specially - if res != nil && res.Code != 0 { - // Retry immediately for account sequence mismatch responses - if strings.Contains(strings.ToLower(res.RawLog), "account sequence mismatch") && attempt < maxAttempts { - ts.log.Warn(). - Uint64("current_sequence", ts.lastSequence). - Int("attempt", attempt). - Str("raw_log", res.RawLog). - Msg("Sequence mismatch in response, refreshing and retrying") - // Parse expected sequence from raw log. If found, prefer it over chain query. - if exp, got, ok := parseSequenceMismatch(res.RawLog); ok { - // Set lastSequence to the expected (next) sequence so that signer uses it. - ts.log.Debug(). - Uint64("expected", exp). - Uint64("got", got). - Msg("Parsed sequence mismatch values from raw log") - ts.lastSequence = exp - } else { - // Fallback: force refresh from chain on next attempt - ts.lastSequence = 0 - } - continue - } - - // Conservatively increment sequence since the sequence may have been consumed - ts.lastSequence++ - ts.log.Debug(). - Uint64("new_sequence", ts.lastSequence). - Msg("Incremented sequence after on-chain error response") - - // Log and return error - ts.log.Error(). - Str("tx_hash", res.TxHash). - Uint32("code", res.Code). - Str("raw_log", res.RawLog). - Uint64("sequence_used", ts.lastSequence-1). - Msg("Transaction failed on chain") - return res, fmt.Errorf("transaction failed with code %d: %s", res.Code, res.RawLog) - } - - // Success: increment sequence once and return - ts.lastSequence++ - ts.log.Debug(). - Uint64("new_sequence", ts.lastSequence). - Str("tx_hash", res.TxHash). - Msg("Incremented sequence after successful broadcast") - - ts.log.Info(). - Str("tx_hash", res.TxHash). - Int64("gas_used", res.GasUsed). - Uint64("sequence_used", ts.lastSequence-1). - Msg("Transaction broadcasted and executed successfully") - - return res, nil - } - - return nil, fmt.Errorf("failed to broadcast transaction after %d attempts", maxAttempts) -} - -// handleBroadcastSuccess processes a successful broadcast -func (ts *TxSigner) handleBroadcastSuccess(res *sdk.TxResponse) (*sdk.TxResponse, error) { - // Check if transaction failed due to sequence mismatch - // In this case, don't increment sequence as it wasn't consumed - if res.Code == 32 && strings.Contains(res.RawLog, "account sequence mismatch") { - ts.log.Warn(). - Str("tx_hash", res.TxHash). - Uint32("code", res.Code). - Str("raw_log", res.RawLog). - Uint64("current_sequence", ts.lastSequence). - Msg("Sequence mismatch error - not incrementing sequence") - // Force sequence refresh on next attempt - ts.lastSequence = 0 - return res, fmt.Errorf("transaction failed with code %d: %s", res.Code, res.RawLog) - } - - // Increment sequence once per broadcast attempt. The chain consumes the - // sequence for all other cases. - ts.lastSequence++ - ts.log.Debug(). - Uint64("new_sequence", ts.lastSequence). - Str("tx_hash", res.TxHash). - Msg("Incremented sequence after broadcast") - - // Check if transaction was successful - if res.Code != 0 { - ts.log.Error(). - Str("tx_hash", res.TxHash). - Uint32("code", res.Code). - Str("raw_log", res.RawLog). - Uint64("sequence_used", ts.lastSequence-1). - Msg("Transaction failed on chain but sequence was consumed") - return res, fmt.Errorf("transaction failed with code %d: %s", res.Code, res.RawLog) - } - - ts.log.Info(). - Str("tx_hash", res.TxHash). - Int64("gas_used", res.GasUsed). - Uint64("sequence_used", ts.lastSequence-1). - Msg("Transaction broadcasted and executed successfully") - - return res, nil -} - -// WrapMessagesWithAuthZ wraps messages with AuthZ MsgExec -func (ts *TxSigner) wrapMessagesWithAuthZ(msgs []sdk.Msg) ([]sdk.Msg, error) { - if len(msgs) == 0 { - return nil, fmt.Errorf("no messages to wrap") - } - - // Validate that all messages are allowed - for i, msg := range msgs { - msgType := sdk.MsgTypeURL(msg) - if !isAllowedMsgType(msgType) { - return nil, fmt.Errorf("message type %s at index %d is not allowed for AuthZ", msgType, i) - } - } - - // Get hot key address for grantee - hotKeyAddr, err := ts.keys.GetAddress() - if err != nil { - return nil, fmt.Errorf("failed to get hot key address: %w", err) - } - - ts.log.Debug(). - Str("grantee", hotKeyAddr.String()). - Int("msg_count", len(msgs)). - Msg("Wrapping messages with AuthZ") - - // Create MsgExec - msgExec := authz.NewMsgExec(hotKeyAddr, msgs) - - return []sdk.Msg{&msgExec}, nil -} - -// CreateTxBuilder creates a transaction builder with the given parameters -func (ts *TxSigner) createTxBuilder( - msgs []sdk.Msg, - memo string, - gasLimit uint64, - feeAmount sdk.Coins, -) (client.TxBuilder, error) { - txBuilder := ts.txConfig.NewTxBuilder() - - // Set messages - if err := txBuilder.SetMsgs(msgs...); err != nil { - return nil, fmt.Errorf("failed to set messages: %w", err) - } - - // Set memo - txBuilder.SetMemo(memo) - - // Set gas limit - txBuilder.SetGasLimit(gasLimit) - - // Set fee amount - txBuilder.SetFeeAmount(feeAmount) - - return txBuilder, nil -} - -// signTxWithSequence signs a transaction with proper sequence management -func (ts *TxSigner) signTxWithSequence(ctx context.Context, txBuilder client.TxBuilder) error { - ts.log.Debug().Msg("Starting transaction signing with sequence management") - - // Get account info to refresh sequence if needed - account, err := ts.getAccountInfo(ctx) - if err != nil { - return fmt.Errorf("failed to get account info: %w", err) - } - - // Reconcile local vs chain sequence conservatively: - // - If we have no local sequence (0), adopt chain's sequence. - // - If local < chain, adopt chain (we are behind). - // - If local > chain, keep local (likely recent tx not yet reflected in query). - chainSequence := account.GetSequence() - if ts.lastSequence == 0 { - ts.lastSequence = chainSequence - ts.log.Info(). - Uint64("adopted_chain_sequence", chainSequence). - Msg("Initialized local sequence from chain") - } else if ts.lastSequence < chainSequence { - ts.log.Info(). - Uint64("chain_sequence", chainSequence). - Uint64("cached_sequence", ts.lastSequence). - Msg("Local sequence behind chain, adopting chain's sequence") - ts.lastSequence = chainSequence - } else if ts.lastSequence > chainSequence { - ts.log.Warn(). - Uint64("chain_sequence", chainSequence). - Uint64("cached_sequence", ts.lastSequence). - Msg("Local sequence ahead of chain query, keeping local to avoid reuse") - } - - // Get hot key address - hotKeyAddr, err := ts.keys.GetAddress() - if err != nil { - return fmt.Errorf("failed to get hot key address: %w", err) - } - - // Get private key - password := ts.keys.GetHotkeyPassword() - privKey, err := ts.keys.GetPrivateKey(password) - if err != nil { - return fmt.Errorf("failed to get private key: %w", err) - } - - ts.log.Debug(). - Str("signer", hotKeyAddr.String()). - Uint64("account_number", account.GetAccountNumber()). - Uint64("sequence", ts.lastSequence). - Msg("Signing transaction with managed sequence") - - // Create signature data - sigData := signing.SingleSignatureData{ - SignMode: signing.SignMode_SIGN_MODE_DIRECT, - Signature: nil, - } - - sig := signing.SignatureV2{ - PubKey: privKey.PubKey(), - Data: &sigData, - Sequence: ts.lastSequence, - } - - // Set empty signature first to populate SignerInfos - if err := txBuilder.SetSignatures(sig); err != nil { - return fmt.Errorf("failed to set signatures: %w", err) - } - - // Use SDK's SignWithPrivKey helper function for proper signing - signerData := authsigning.SignerData{ - Address: hotKeyAddr.String(), - ChainID: ts.clientCtx.ChainID, - AccountNumber: account.GetAccountNumber(), - Sequence: ts.lastSequence, - PubKey: privKey.PubKey(), - } - - signV2, err := tx.SignWithPrivKey( - ctx, - signing.SignMode_SIGN_MODE_DIRECT, - signerData, - txBuilder, - privKey, - ts.clientCtx.TxConfig, - ts.lastSequence, - ) - if err != nil { - return fmt.Errorf("failed to sign with private key: %w", err) - } - - // Set the final signature - if err := txBuilder.SetSignatures(signV2); err != nil { - return fmt.Errorf("failed to set final signatures: %w", err) - } - - ts.log.Info(). - Str("signer", hotKeyAddr.String()). - Uint64("sequence", ts.lastSequence). - Msg("Transaction signed successfully with managed sequence") - - return nil -} - -// getAccountInfo retrieves account information for the hot key -func (ts *TxSigner) getAccountInfo(ctx context.Context) (client.Account, error) { - hotKeyAddr, err := ts.keys.GetAddress() - if err != nil { - return nil, fmt.Errorf("failed to get hot key address: %w", err) - } - - ts.log.Debug(). - Str("address", hotKeyAddr.String()). - Msg("Querying account info from chain") - - // Create auth query client - authClient := authtypes.NewQueryClient(ts.clientCtx) - - // Query account information - accountResp, err := authClient.Account(ctx, &authtypes.QueryAccountRequest{ - Address: hotKeyAddr.String(), - }) - if err != nil { - return nil, fmt.Errorf("failed to query account info: %w", err) - } - - // Unpack account - var account sdk.AccountI - if err := ts.clientCtx.InterfaceRegistry.UnpackAny(accountResp.Account, &account); err != nil { - return nil, fmt.Errorf("failed to unpack account: %w", err) - } - - ts.log.Debug(). - Str("address", account.GetAddress().String()). - Uint64("account_number", account.GetAccountNumber()). - Uint64("sequence", account.GetSequence()). - Msg("Retrieved account info") - - return account, nil -} - -// broadcastTransaction broadcasts a signed transaction to the chain -func (ts *TxSigner) broadcastTransaction(_ context.Context, txBytes []byte) (*sdk.TxResponse, error) { - // Use the client context's BroadcastTx method for proper broadcasting - res, err := ts.clientCtx.BroadcastTx(txBytes) - if err != nil { - return nil, fmt.Errorf("failed to broadcast transaction: %w", err) - } - - // Log the result - ts.log.Info(). - Str("tx_hash", res.TxHash). - Uint32("code", res.Code). - Int64("gas_used", res.GasUsed). - Int64("gas_wanted", res.GasWanted). - Msg("Transaction broadcast result") - - return res, nil -} - -// checks if a message type is allowed for AuthZ execution -func isAllowedMsgType(msgType string) bool { - return slices.Contains(constant.SupportedMessages, msgType) -} - -// parseSequenceMismatch attempts to parse expected and got sequence numbers from a raw log -// Example raw log: "account sequence mismatch, expected 601, got 600: incorrect account sequence" -func parseSequenceMismatch(raw string) (expected uint64, got uint64, ok bool) { - // Quick path using Sscanf if format matches exactly - var e, g uint64 - if _, err := fmt.Sscanf(raw, "account sequence mismatch, expected %d, got %d", &e, &g); err == nil { - return e, g, true - } - // Fallback to regex to be resilient to prefixes/suffixes - re := regexp.MustCompile(`expected\s+(\d+)\s*,\s*got\s+(\d+)`) - m := re.FindStringSubmatch(raw) - if len(m) == 3 { - if ev, err1 := strconv.ParseUint(m[1], 10, 64); err1 == nil { - if gv, err2 := strconv.ParseUint(m[2], 10, 64); err2 == nil { - return ev, gv, true - } - } - } - return 0, 0, false -} diff --git a/universalClient/authz/tx_signer_test.go b/universalClient/authz/tx_signer_test.go deleted file mode 100644 index 090fbb69..00000000 --- a/universalClient/authz/tx_signer_test.go +++ /dev/null @@ -1,429 +0,0 @@ -package authz - -import ( - "context" - "testing" - - txsigning "cosmossdk.io/x/tx/signing" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/tx" - signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" - "github.com/cosmos/cosmos-sdk/x/auth/signing" - "github.com/cosmos/cosmos-sdk/x/authz" - uetypes "github.com/pushchain/push-chain-node/x/uexecutor/types" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -func init() { - // Initialize SDK config for tests if not already sealed - sdkConfig := sdk.GetConfig() - defer func() { - // Config already sealed, that's fine - ignore panic - _ = recover() - }() - sdkConfig.SetBech32PrefixForAccount("push", "pushpub") - sdkConfig.SetBech32PrefixForValidator("pushvaloper", "pushvaloperpub") - sdkConfig.SetBech32PrefixForConsensusNode("pushvalcons", "pushvalconspub") -} - -// MockUniversalValidatorKeys is a mock implementation of the keys interface -type MockUniversalValidatorKeys struct { - mock.Mock -} - -func (m *MockUniversalValidatorKeys) GetAddress() (sdk.AccAddress, error) { - args := m.Called() - return args.Get(0).(sdk.AccAddress), args.Error(1) -} - -func (m *MockUniversalValidatorKeys) GetPrivateKey(password string) (cryptotypes.PrivKey, error) { - args := m.Called(password) - return args.Get(0).(cryptotypes.PrivKey), args.Error(1) -} - -func (m *MockUniversalValidatorKeys) GetHotkeyPassword() string { - args := m.Called() - return args.String(0) -} - -// MockTxConfig is a mock implementation of client.TxConfig -type MockTxConfig struct { - mock.Mock -} - -func (m *MockTxConfig) TxEncoder() sdk.TxEncoder { - args := m.Called() - return args.Get(0).(sdk.TxEncoder) -} - -func (m *MockTxConfig) TxDecoder() sdk.TxDecoder { - args := m.Called() - return args.Get(0).(sdk.TxDecoder) -} - -func (m *MockTxConfig) TxJSONEncoder() sdk.TxEncoder { - args := m.Called() - return args.Get(0).(sdk.TxEncoder) -} - -func (m *MockTxConfig) TxJSONDecoder() sdk.TxDecoder { - args := m.Called() - return args.Get(0).(sdk.TxDecoder) -} - -func (m *MockTxConfig) NewTxBuilder() client.TxBuilder { - args := m.Called() - return args.Get(0).(client.TxBuilder) -} - -func (m *MockTxConfig) WrapTxBuilder(newTx sdk.Tx) (client.TxBuilder, error) { - args := m.Called(newTx) - return args.Get(0).(client.TxBuilder), args.Error(1) -} - -func (m *MockTxConfig) SignModeHandler() *txsigning.HandlerMap { - return nil -} - -func (m *MockTxConfig) SigningContext() *txsigning.Context { - return nil -} - -// Test helper functions to reduce redundancy - -// setupTestTxSigner creates a TxSigner with common test setup -func setupTestTxSigner() (*TxSigner, *MockUniversalValidatorKeys, sdk.AccAddress) { - mockKeys := &MockUniversalValidatorKeys{} - granteeAddr := sdk.MustAccAddressFromBech32("push1w7ku9j7jezma7mqv7yterhdvxu0wxzv6c6vrlw") - - mockKeys.On("GetAddress").Return(granteeAddr, nil) - - cdc := codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) - mockTxConfig := &MockTxConfig{} - clientCtx := client.Context{}.WithTxConfig(mockTxConfig).WithCodec(cdc) - logger := zerolog.New(nil) - - txSigner := NewTxSigner(mockKeys, clientCtx, logger) - return txSigner, mockKeys, granteeAddr -} - -// setupTestTxSignerWithTxConfig creates a TxSigner with custom TxConfig -func setupTestTxSignerWithTxConfig(mockTxConfig *MockTxConfig) (*TxSigner, *MockUniversalValidatorKeys, sdk.AccAddress) { - mockKeys := &MockUniversalValidatorKeys{} - granteeAddr := sdk.MustAccAddressFromBech32("push1w7ku9j7jezma7mqv7yterhdvxu0wxzv6c6vrlw") - - mockKeys.On("GetAddress").Return(granteeAddr, nil) - - cdc := codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) - clientCtx := client.Context{}.WithTxConfig(mockTxConfig).WithCodec(cdc) - logger := zerolog.New(nil) - - txSigner := NewTxSigner(mockKeys, clientCtx, logger) - return txSigner, mockKeys, granteeAddr -} - -// setupTestTxBuilder creates a mock TxBuilder with common setup -func setupTestTxBuilder() (*MockTxBuilder, *MockTxConfig) { - mockTxBuilder := &MockTxBuilder{} - mockTxConfig := &MockTxConfig{} - mockTxConfig.On("NewTxBuilder").Return(mockTxBuilder) - - mockTxBuilder.On("SetMsgs", mock.Anything).Return(nil) - mockTxBuilder.On("SetMemo", mock.Anything) - mockTxBuilder.On("SetGasLimit", mock.Anything) - mockTxBuilder.On("SetFeeAmount", mock.Anything) - - return mockTxBuilder, mockTxConfig -} - -func (m *MockTxConfig) MarshalSignatureJSON([]signingtypes.SignatureV2) ([]byte, error) { - return []byte("{}"), nil -} - -func (m *MockTxConfig) UnmarshalSignatureJSON([]byte) ([]signingtypes.SignatureV2, error) { - return []signingtypes.SignatureV2{}, nil -} - -// MockClientContext is a mock implementation of client.Context -type MockClientContext struct { - mock.Mock - TxCfg client.TxConfig -} - -func (m *MockClientContext) BroadcastTx(txBytes []byte) (*sdk.TxResponse, error) { - args := m.Called(txBytes) - return args.Get(0).(*sdk.TxResponse), args.Error(1) -} - -func (m *MockClientContext) TxConfig() client.TxConfig { - return m.TxCfg -} - -// MockTxBuilder is a mock implementation of client.TxBuilder -type MockTxBuilder struct { - mock.Mock -} - -func (m *MockTxBuilder) GetTx() signing.Tx { - args := m.Called() - return args.Get(0).(signing.Tx) -} - -func (m *MockTxBuilder) SetMsgs(msgs ...sdk.Msg) error { - args := m.Called(msgs) - return args.Error(0) -} - -func (m *MockTxBuilder) SetMemo(memo string) { - m.Called(memo) -} - -func (m *MockTxBuilder) SetFeeAmount(amount sdk.Coins) { - m.Called(amount) -} - -func (m *MockTxBuilder) SetGasLimit(limit uint64) { - m.Called(limit) -} - -func (m *MockTxBuilder) SetTimeoutHeight(height uint64) { - m.Called(height) -} - -func (m *MockTxBuilder) SetFeeGranter(feeGranter sdk.AccAddress) { - m.Called(feeGranter) -} - -func (m *MockTxBuilder) SetFeePayer(feePayer sdk.AccAddress) { - m.Called(feePayer) -} - -func (m *MockTxBuilder) SetSignatures(signatures ...signingtypes.SignatureV2) error { - args := m.Called(signatures) - return args.Error(0) -} - -func (m *MockTxBuilder) AddAuxSignerData(aux tx.AuxSignerData) error { - args := m.Called(aux) - return args.Error(0) -} - -func TestNewTxSigner(t *testing.T) { - txSigner, mockKeys, _ := setupTestTxSigner() - - assert.NotNil(t, txSigner) - assert.Equal(t, mockKeys, txSigner.keys) - assert.NotNil(t, txSigner.txConfig) -} - -func TestTxSigner_WrapMessagesWithAuthZ(t *testing.T) { - txSigner, _, granteeAddr := setupTestTxSigner() - - tests := []struct { - name string - msgs []sdk.Msg - expectError bool - errorMsg string - }{ - { - name: "empty messages", - msgs: []sdk.Msg{}, - expectError: true, - errorMsg: "no messages to wrap", - }, - { - name: "valid allowed message", - msgs: []sdk.Msg{ - &uetypes.MsgVoteInbound{ - Signer: granteeAddr.String(), - Inbound: &uetypes.Inbound{ - SourceChain: "solana:mainnet", - TxHash: "0x1234567890abcdef1234567890abcdef12345678", - Sender: "sender_address", - Recipient: "recipient_address", - Amount: "1000", - AssetAddr: "asset_address", - LogIndex: "0", - TxType: uetypes.InboundTxType_FUNDS_AND_PAYLOAD, - }, - }, - }, - expectError: false, - }, - { - name: "disallowed message type", - msgs: []sdk.Msg{ - &authz.MsgGrant{ - Granter: "push1fl48vsnmsdzcv85q5d2q4z5ajdha8yu34mf0eh", - Grantee: granteeAddr.String(), - }, - }, - expectError: true, - errorMsg: "is not allowed for AuthZ", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := txSigner.wrapMessagesWithAuthZ(tt.msgs) - - if tt.expectError { - assert.Error(t, err) - if tt.errorMsg != "" { - assert.Contains(t, err.Error(), tt.errorMsg) - } - assert.Nil(t, result) - } else { - assert.NoError(t, err) - assert.NotNil(t, result) - assert.Len(t, result, len(tt.msgs)) - - // Check that messages are wrapped with MsgExec - for _, msg := range result { - execMsg, ok := msg.(*authz.MsgExec) - assert.True(t, ok, "Message should be wrapped with MsgExec") - assert.Equal(t, granteeAddr.String(), execMsg.Grantee) - } - } - }) - } -} - -func TestTxSigner_CreateTxBuilder(t *testing.T) { - mockTxBuilder, mockTxConfig := setupTestTxBuilder() - txSigner, _, granteeAddr := setupTestTxSignerWithTxConfig(mockTxConfig) - - msgs := []sdk.Msg{ - &authz.MsgExec{ - Grantee: granteeAddr.String(), - Msgs: []*codectypes.Any{}, - }, - } - - memo := "test memo" - gasLimit := uint64(200000) - feeAmount := sdk.NewCoins(sdk.NewInt64Coin("push", 1000)) - - result, err := txSigner.createTxBuilder(msgs, memo, gasLimit, feeAmount) - - assert.NoError(t, err) - assert.NotNil(t, result) - assert.Equal(t, mockTxBuilder, result) - - // Verify all mock expectations were met - mockTxConfig.AssertExpectations(t) - mockTxBuilder.AssertExpectations(t) -} - -func TestTxSigner_ValidateMessages(t *testing.T) { - txSigner, _, granteeAddr := setupTestTxSigner() - - tests := []struct { - name string - msgs []sdk.Msg - expectError bool - errorMsg string - }{ - { - name: "valid allowed messages", - msgs: []sdk.Msg{ - &uetypes.MsgVoteInbound{ - Signer: granteeAddr.String(), - Inbound: &uetypes.Inbound{ - SourceChain: "solana:mainnet", - TxHash: "0x1234567890abcdef1234567890abcdef12345678", - Sender: "sender_address", - Recipient: "recipient_address", - Amount: "1000", - AssetAddr: "asset_address", - LogIndex: "0", - TxType: uetypes.InboundTxType_FUNDS_AND_PAYLOAD, - }, - }, - }, - expectError: false, - }, - { - name: "all invalid messages", - msgs: []sdk.Msg{ - &authz.MsgGrant{Granter: "push1z7n2ahw28fveuaqra93nnd2x8ulrw9lkwg3tpp", Grantee: granteeAddr.String()}, - }, - expectError: true, - errorMsg: "at index 0 is not allowed for AuthZ", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Test validation through WrapMessagesWithAuthZ - _, err := txSigner.wrapMessagesWithAuthZ(tt.msgs) - - if tt.expectError { - assert.Error(t, err) - if tt.errorMsg != "" { - assert.Contains(t, err.Error(), tt.errorMsg) - } - } else { - assert.NoError(t, err) - } - }) - } -} - -// TestTxSigner_SignAndBroadcastAuthZTx tests transaction signing and broadcasting -func TestTxSigner_SignAndBroadcastAuthZTx(t *testing.T) { - _, mockTxConfig := setupTestTxBuilder() - txSigner, _, granteeAddr := setupTestTxSignerWithTxConfig(mockTxConfig) - - msgs := []sdk.Msg{ - &uetypes.MsgVoteInbound{ - Signer: granteeAddr.String(), - Inbound: &uetypes.Inbound{ - SourceChain: "solana:mainnet", - TxHash: "0x1234567890abcdef1234567890abcdef12345678", - Sender: "sender_address", - Recipient: "recipient_address", - Amount: "1000", - AssetAddr: "asset_address", - LogIndex: "0", - TxType: uetypes.InboundTxType_FUNDS_AND_PAYLOAD, - }, - }, - } - - // This will fail due to missing gRPC connection but tests the flow - ctx := context.Background() - _, err := txSigner.SignAndBroadcastAuthZTx(ctx, msgs, "test memo", 200000, sdk.NewCoins(sdk.NewInt64Coin("push", 1000))) - - // Should fail on signing due to no real implementation - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to sign transaction") -} - -// TestTxSigner_ErrorScenarios tests various error scenarios -func TestTxSigner_ErrorScenarios(t *testing.T) { - mockTxConfig := &MockTxConfig{} - txSigner, _, _ := setupTestTxSignerWithTxConfig(mockTxConfig) - - t.Run("CreateTxBuilder with nil messages", func(t *testing.T) { - mockTxBuilder := &MockTxBuilder{} - mockTxConfig.On("NewTxBuilder").Return(mockTxBuilder).Once() - mockTxBuilder.On("SetMsgs", mock.Anything).Return(nil).Once() - mockTxBuilder.On("SetMemo", "").Once() - mockTxBuilder.On("SetGasLimit", uint64(0)).Once() - mockTxBuilder.On("SetFeeAmount", sdk.NewCoins()).Once() - - result, err := txSigner.createTxBuilder(nil, "", 0, sdk.NewCoins()) - assert.NoError(t, err) - assert.NotNil(t, result) - - mockTxConfig.AssertExpectations(t) - mockTxBuilder.AssertExpectations(t) - }) -} diff --git a/universalClient/cache/cache.go b/universalClient/cache/cache.go deleted file mode 100644 index 1f1e90c7..00000000 --- a/universalClient/cache/cache.go +++ /dev/null @@ -1,96 +0,0 @@ -package cache - -import ( - "sync" - "time" - - uregistrytypes "github.com/pushchain/push-chain-node/x/uregistry/types" - "github.com/rs/zerolog" -) - -// ChainData holds a chain config and when it was last updated. -type ChainData struct { - Config *uregistrytypes.ChainConfig - UpdatedAt time.Time -} - -// Cache is a thread-safe store for chain configs. -// Data can only be changed via UpdateChains. -type Cache struct { - mu sync.RWMutex - chains map[string]*ChainData - lastUpdate time.Time - logger zerolog.Logger -} - -// New creates a new Cache instance. -func New(logger zerolog.Logger) *Cache { - return &Cache{ - chains: make(map[string]*ChainData), - logger: logger.With().Str("component", "cache").Logger(), - } -} - -// LastUpdated returns the last time the cache was refreshed. -func (c *Cache) LastUpdated() time.Time { - c.mu.RLock() - defer c.mu.RUnlock() - return c.lastUpdate -} - -// UpdateChains atomically replaces the entire cache. -func (c *Cache) UpdateChains(chains []*uregistrytypes.ChainConfig) { - c.mu.Lock() - defer c.mu.Unlock() - - now := time.Now() - newMap := make(map[string]*ChainData, len(chains)) - - for _, cfg := range chains { - if cfg == nil || cfg.Chain == "" { - continue - } - newMap[cfg.Chain] = &ChainData{ - Config: cfg, - UpdatedAt: now, - } - } - - c.chains = newMap - c.lastUpdate = now - - c.logger.Info(). - Int("chains", len(newMap)). - Time("updated_at", now). - Msg("cache updated") -} - -// GetChainData returns a pointer copy of a chain's data, safe for reading. -// If not found, returns nil. -func (c *Cache) GetChainData(chainID string) *ChainData { - c.mu.RLock() - defer c.mu.RUnlock() - - if data, ok := c.chains[chainID]; ok { - return &ChainData{ - Config: data.Config, - UpdatedAt: data.UpdatedAt, - } - } - return nil -} - -// GetAllChains returns a slice copy of all chain data. -func (c *Cache) GetAllChains() []*ChainData { - c.mu.RLock() - defer c.mu.RUnlock() - - out := make([]*ChainData, 0, len(c.chains)) - for _, v := range c.chains { - out = append(out, &ChainData{ - Config: v.Config, - UpdatedAt: v.UpdatedAt, - }) - } - return out -} diff --git a/universalClient/chains/common/outbound.go b/universalClient/chains/common/outbound.go new file mode 100644 index 00000000..a1e86eab --- /dev/null +++ b/universalClient/chains/common/outbound.go @@ -0,0 +1,72 @@ +package common + +import ( + "context" + "math/big" + + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" +) + +// OutboundTxData is an alias for uexecutortypes.OutboundCreatedEvent. +// This represents the data needed to create an outbound transaction. +type OutboundTxData = uexecutortypes.OutboundCreatedEvent + +// OutboundTxResult represents the result of building an outbound transaction. +type OutboundTxResult struct { + // RawTx is the serialized unsigned transaction ready for signing + RawTx []byte `json:"raw_tx"` + + // SigningHash is the hash that needs to be signed by TSS + // For EVM: keccak256 hash of the transaction + // For Solana: the message hash + SigningHash []byte `json:"signing_hash"` + + // Nonce is the transaction nonce (EVM only) + Nonce uint64 `json:"nonce,omitempty"` + + // GasPrice is the gas price used (EVM only) + GasPrice *big.Int `json:"gas_price,omitempty"` + + // GasLimit is the gas limit used + GasLimit uint64 `json:"gas_limit"` + + // ChainID is the destination chain ID + ChainID string `json:"chain_id"` + + // Blockhash is the recent blockhash used (Solana only) + Blockhash []byte `json:"blockhash,omitempty"` +} + +// OutboundTxBuilder defines the interface for building outbound transactions. +// Each chain type (EVM, SVM) implements this interface. +type OutboundTxBuilder interface { + // BuildTransaction creates an unsigned transaction from outbound data. + // gasPrice: the gas price from on-chain oracle (passed by coordinator) + // Fetches nonce from destination chain. + // Returns the transaction result containing the raw tx and signing hash. + BuildTransaction(ctx context.Context, data *OutboundTxData, gasPrice *big.Int) (*OutboundTxResult, error) + + // AssembleSignedTransaction combines the unsigned transaction with the TSS signature. + // Returns the fully signed transaction ready for broadcast. + AssembleSignedTransaction(unsignedTx []byte, signature []byte, recoveryID byte) ([]byte, error) + + // BroadcastTransaction sends the signed transaction to the network. + // Returns the transaction hash. + BroadcastTransaction(ctx context.Context, signedTx []byte) (string, error) + + // GetTxHash extracts the transaction hash from a signed transaction. + // This can be calculated before broadcasting, so we can always store it. + GetTxHash(signedTx []byte) (string, error) + + // GetChainID returns the chain identifier this builder is configured for. + GetChainID() string +} + +// OutboundTxBuilderFactory creates OutboundTxBuilder instances for different chains. +type OutboundTxBuilderFactory interface { + // CreateBuilder creates an OutboundTxBuilder for the specified chain. + CreateBuilder(chainID string) (OutboundTxBuilder, error) + + // SupportsChain returns true if the factory can create a builder for the chain. + SupportsChain(chainID string) bool +} diff --git a/universalClient/chains/evm/outbound_tx_builder.go b/universalClient/chains/evm/outbound_tx_builder.go new file mode 100644 index 00000000..52753dcb --- /dev/null +++ b/universalClient/chains/evm/outbound_tx_builder.go @@ -0,0 +1,300 @@ +package evm + +import ( + "context" + "crypto/ecdsa" + "encoding/hex" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rlp" + "github.com/rs/zerolog" + + chaincommon "github.com/pushchain/push-chain-node/universalClient/chains/common" +) + +// OutboundTxBuilder builds outbound transactions for EVM chains. +type OutboundTxBuilder struct { + client *Client + chainID *big.Int + caipChainID string + gatewayAddr common.Address + tssPublicKey *ecdsa.PublicKey // TSS public key for deriving sender address + logger zerolog.Logger +} + +// NewOutboundTxBuilder creates a new EVM outbound transaction builder. +func NewOutboundTxBuilder( + client *Client, + gatewayAddr common.Address, + tssPublicKey *ecdsa.PublicKey, + logger zerolog.Logger, +) *OutboundTxBuilder { + return &OutboundTxBuilder{ + client: client, + chainID: big.NewInt(client.chainID), + caipChainID: client.GetConfig().Chain, + gatewayAddr: gatewayAddr, + tssPublicKey: tssPublicKey, + logger: logger.With().Str("component", "evm_outbound_builder").Logger(), + } +} + +// BuildTransaction creates an unsigned EVM transaction from outbound data. +// gasPrice is provided by the caller (from pushcore oracle). +func (b *OutboundTxBuilder) BuildTransaction(ctx context.Context, data *chaincommon.OutboundTxData, gasPrice *big.Int) (*chaincommon.OutboundTxResult, error) { + if data == nil { + return nil, fmt.Errorf("outbound data is nil") + } + if gasPrice == nil { + return nil, fmt.Errorf("gas price is nil") + } + + b.logger.Debug(). + Str("tx_id", data.TxID). + Str("recipient", data.Recipient). + Str("amount", data.Amount). + Str("gas_price", gasPrice.String()). + Msg("building EVM outbound transaction") + + // Parse amount + amount, ok := new(big.Int).SetString(data.Amount, 10) + if !ok { + return nil, fmt.Errorf("invalid amount: %s", data.Amount) + } + + // Parse gas limit + gasLimit, ok := new(big.Int).SetString(data.GasLimit, 10) + if !ok { + gasLimit = big.NewInt(21000) // Default gas limit + } + + // Get nonce for TSS address + tssAddr := b.getTSSAddress() + nonce, err := b.getNonce(ctx, tssAddr) + if err != nil { + return nil, fmt.Errorf("failed to get nonce: %w", err) + } + + // Build transaction data (call to gateway contract) + txData, err := b.buildGatewayCallData(data) + if err != nil { + return nil, fmt.Errorf("failed to build gateway call data: %w", err) + } + + // Create the transaction + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + GasPrice: gasPrice, + Gas: gasLimit.Uint64(), + To: &b.gatewayAddr, + Value: amount, + Data: txData, + }) + + // Get the signer + signer := types.NewEIP155Signer(b.chainID) + + // Get the signing hash + signingHash := signer.Hash(tx) + + // Serialize the unsigned transaction + rawTx, err := rlp.EncodeToBytes(tx) + if err != nil { + return nil, fmt.Errorf("failed to encode transaction: %w", err) + } + + return &chaincommon.OutboundTxResult{ + RawTx: rawTx, + SigningHash: signingHash[:], + Nonce: nonce, + GasPrice: gasPrice, + GasLimit: gasLimit.Uint64(), + ChainID: b.caipChainID, + }, nil +} + +// AssembleSignedTransaction combines the unsigned transaction with the TSS signature. +func (b *OutboundTxBuilder) AssembleSignedTransaction(unsignedTx []byte, signature []byte, recoveryID byte) ([]byte, error) { + if len(signature) != 64 { + return nil, fmt.Errorf("invalid signature length: expected 64, got %d", len(signature)) + } + + // Decode the unsigned transaction + var tx types.Transaction + if err := rlp.DecodeBytes(unsignedTx, &tx); err != nil { + return nil, fmt.Errorf("failed to decode unsigned transaction: %w", err) + } + + // Create the signature with recovery ID + // EIP-155: V = chainID * 2 + 35 + recoveryID + v := new(big.Int).Mul(b.chainID, big.NewInt(2)) + v.Add(v, big.NewInt(35)) + v.Add(v, big.NewInt(int64(recoveryID))) + + r := new(big.Int).SetBytes(signature[:32]) + s := new(big.Int).SetBytes(signature[32:64]) + + // Create signed transaction + signer := types.NewEIP155Signer(b.chainID) + signedTx, err := tx.WithSignature(signer, append(append(r.Bytes(), s.Bytes()...), v.Bytes()...)) + if err != nil { + return nil, fmt.Errorf("failed to add signature to transaction: %w", err) + } + + // Serialize the signed transaction + signedTxBytes, err := rlp.EncodeToBytes(signedTx) + if err != nil { + return nil, fmt.Errorf("failed to encode signed transaction: %w", err) + } + + return signedTxBytes, nil +} + +// BroadcastTransaction sends the signed transaction to the network. +func (b *OutboundTxBuilder) BroadcastTransaction(ctx context.Context, signedTx []byte) (string, error) { + // Decode the signed transaction + var tx types.Transaction + if err := rlp.DecodeBytes(signedTx, &tx); err != nil { + return "", fmt.Errorf("failed to decode signed transaction: %w", err) + } + + // Send the transaction + var txHash string + err := b.client.executeWithFailover(ctx, "broadcast_tx", func(client *ethclient.Client) error { + if err := client.SendTransaction(ctx, &tx); err != nil { + return err + } + txHash = tx.Hash().Hex() + return nil + }) + if err != nil { + return "", fmt.Errorf("failed to broadcast transaction: %w", err) + } + + b.logger.Info(). + Str("tx_hash", txHash). + Msg("outbound transaction broadcasted") + + return txHash, nil +} + +// GetTxHash extracts the transaction hash from a signed transaction. +func (b *OutboundTxBuilder) GetTxHash(signedTx []byte) (string, error) { + var tx types.Transaction + if err := rlp.DecodeBytes(signedTx, &tx); err != nil { + return "", fmt.Errorf("failed to decode signed transaction: %w", err) + } + return tx.Hash().Hex(), nil +} + +// GetChainID returns the chain identifier. +func (b *OutboundTxBuilder) GetChainID() string { + return b.caipChainID +} + +// getTSSAddress derives the TSS address from the public key. +func (b *OutboundTxBuilder) getTSSAddress() common.Address { + if b.tssPublicKey == nil { + return common.Address{} + } + return crypto.PubkeyToAddress(*b.tssPublicKey) +} + +// getNonce gets the current nonce for an address. +func (b *OutboundTxBuilder) getNonce(ctx context.Context, addr common.Address) (uint64, error) { + var nonce uint64 + err := b.client.executeWithFailover(ctx, "get_nonce", func(client *ethclient.Client) error { + var innerErr error + nonce, innerErr = client.PendingNonceAt(ctx, addr) + return innerErr + }) + return nonce, err +} + +// buildGatewayCallData builds the call data for the gateway contract. +// This encodes the function call to execute the outbound transaction. +func (b *OutboundTxBuilder) buildGatewayCallData(data *chaincommon.OutboundTxData) ([]byte, error) { + // Parse recipient address + if !common.IsHexAddress(data.Recipient) { + return nil, fmt.Errorf("invalid recipient address: %s", data.Recipient) + } + recipient := common.HexToAddress(data.Recipient) + + // Parse amount + amount, ok := new(big.Int).SetString(data.Amount, 10) + if !ok { + return nil, fmt.Errorf("invalid amount: %s", data.Amount) + } + + // Parse asset address + var assetAddr common.Address + if data.AssetAddr != "" && data.AssetAddr != "0x" { + if !common.IsHexAddress(data.AssetAddr) { + return nil, fmt.Errorf("invalid asset address: %s", data.AssetAddr) + } + assetAddr = common.HexToAddress(data.AssetAddr) + } + + // Parse payload + var payload []byte + if data.Payload != "" && data.Payload != "0x" { + payloadHex := strings.TrimPrefix(data.Payload, "0x") + var err error + payload, err = hex.DecodeString(payloadHex) + if err != nil { + return nil, fmt.Errorf("invalid payload hex: %w", err) + } + } + + // Build the ABI-encoded call data + // Function: executeOutbound(bytes32 txId, address recipient, uint256 amount, address asset, bytes payload) + // Selector: first 4 bytes of keccak256("executeOutbound(bytes32,address,uint256,address,bytes)") + + // For now, return a simple transfer call data + // In production, this should be the actual gateway contract ABI encoding + callData := buildExecuteOutboundCallData(data.TxID, recipient, amount, assetAddr, payload) + + return callData, nil +} + +// buildExecuteOutboundCallData creates the ABI-encoded call data for executeOutbound. +func buildExecuteOutboundCallData(txID string, recipient common.Address, amount *big.Int, asset common.Address, payload []byte) []byte { + // Function selector for executeOutbound(bytes32,address,uint256,address,bytes) + // keccak256("executeOutbound(bytes32,address,uint256,address,bytes)")[:4] + selector := crypto.Keccak256([]byte("executeOutbound(bytes32,address,uint256,address,bytes)"))[:4] + + // Encode txID as bytes32 + txIDBytes := common.HexToHash(txID) + + // Build the call data + // This is a simplified encoding - in production use go-ethereum's abi package + data := make([]byte, 0, 4+32*5+len(payload)) + data = append(data, selector...) + data = append(data, txIDBytes[:]...) + data = append(data, common.LeftPadBytes(recipient[:], 32)...) + data = append(data, common.LeftPadBytes(amount.Bytes(), 32)...) + data = append(data, common.LeftPadBytes(asset[:], 32)...) + + // Dynamic data offset for payload (5 * 32 = 160) + offset := big.NewInt(160) + data = append(data, common.LeftPadBytes(offset.Bytes(), 32)...) + + // Payload length + payloadLen := big.NewInt(int64(len(payload))) + data = append(data, common.LeftPadBytes(payloadLen.Bytes(), 32)...) + + // Payload data (padded to 32 bytes) + if len(payload) > 0 { + paddedPayload := make([]byte, ((len(payload)+31)/32)*32) + copy(paddedPayload, payload) + data = append(data, paddedPayload...) + } + + return data +} diff --git a/universalClient/chains/push/client.go b/universalClient/chains/push/client.go new file mode 100644 index 00000000..68cd81de --- /dev/null +++ b/universalClient/chains/push/client.go @@ -0,0 +1,225 @@ +// Package push provides a client for listening to Push Chain events. +// It handles event polling, parsing, and persistence with proper error handling, +// graceful shutdown, and concurrent safety. +package push + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/pushchain/push-chain-node/universalClient/pushcore" + "github.com/pushchain/push-chain-node/universalClient/store" + "github.com/rs/zerolog" + "gorm.io/gorm" +) + +// Default configuration values. +const ( + DefaultPollInterval = 5 * time.Second + DefaultChunkSize = 1000 + DefaultQueryLimit = 100 + + minPollInterval = 1 * time.Second + maxPollInterval = 5 * time.Minute +) + +// Sentinel errors for the Push listener. +var ( + ErrAlreadyRunning = errors.New("push listener is already running") + ErrNotRunning = errors.New("push listener is not running") + ErrNilClient = errors.New("push client cannot be nil") + ErrNilDatabase = errors.New("database connection cannot be nil") + ErrInvalidInterval = errors.New("poll interval out of valid range") +) + +// Config holds configuration for the Push listener. +type Config struct { + // PollInterval is the duration between polling cycles. + // Must be between 1 second and 5 minutes. + PollInterval time.Duration + + // ChunkSize is the number of blocks to process in each batch. + // Defaults to 1000 if not specified. + ChunkSize uint64 + + // QueryLimit is the maximum number of transactions to fetch per query. + // Defaults to 100 if not specified. + QueryLimit uint64 +} + +// Validate validates the configuration and applies defaults where necessary. +func (c *Config) Validate() error { + if c.PollInterval < minPollInterval || c.PollInterval > maxPollInterval { + return fmt.Errorf("%w: must be between %v and %v, got %v", + ErrInvalidInterval, minPollInterval, maxPollInterval, c.PollInterval) + } + return nil +} + +// applyDefaults sets default values for zero-value fields. +func (c *Config) applyDefaults() { + if c.PollInterval == 0 { + c.PollInterval = DefaultPollInterval + } + if c.ChunkSize == 0 { + c.ChunkSize = DefaultChunkSize + } + if c.QueryLimit == 0 { + c.QueryLimit = DefaultQueryLimit + } +} + +// PushClient defines the interface for interacting with the Push chain. +// This allows for easier testing and dependency injection. +type PushClient interface { + GetLatestBlock() (uint64, error) + GetTxsByEvents(query string, minHeight, maxHeight uint64, limit uint64) ([]*pushcore.TxResult, error) +} + +// Listener listens for events from the Push chain and stores them in the database. +type Listener struct { + logger zerolog.Logger + pushClient PushClient + db *gorm.DB + cfg Config + + watcher *EventWatcher + mu sync.RWMutex + running atomic.Bool + stopCh chan struct{} +} + +// NewListener creates a new Push event listener. +// Returns an error if required dependencies are nil or configuration is invalid. +func NewListener( + client PushClient, + db *gorm.DB, + logger zerolog.Logger, + cfg *Config, +) (*Listener, error) { + if client == nil { + return nil, ErrNilClient + } + if db == nil { + return nil, ErrNilDatabase + } + + // Use default config if nil + if cfg == nil { + cfg = &Config{} + } + + // Apply defaults first + cfg.applyDefaults() + + // Then validate + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid configuration: %w", err) + } + + return &Listener{ + logger: logger.With().Str("component", "push_listener").Logger(), + pushClient: client, + db: db, + cfg: *cfg, + stopCh: make(chan struct{}), + }, nil +} + +// Start begins listening for events from the Push chain. +// Returns ErrAlreadyRunning if the listener is already running. +func (l *Listener) Start(ctx context.Context) error { + if !l.running.CompareAndSwap(false, true) { + return ErrAlreadyRunning + } + + l.mu.Lock() + defer l.mu.Unlock() + + // Load last processed block from chain_states + startBlock, err := l.getLastProcessedBlock() + if err != nil { + l.running.Store(false) + return fmt.Errorf("failed to get last processed block: %w", err) + } + + l.logger.Info(). + Uint64("start_block", startBlock). + Dur("poll_interval", l.cfg.PollInterval). + Uint64("chunk_size", l.cfg.ChunkSize). + Msg("starting Push event listener") + + // Reset stop channel for new run + l.stopCh = make(chan struct{}) + + // Create and start event watcher + l.watcher = NewEventWatcher( + l.pushClient, + l.db, + l.logger, + l.cfg, + startBlock, + ) + + if err := l.watcher.Start(ctx); err != nil { + l.running.Store(false) + return fmt.Errorf("failed to start event watcher: %w", err) + } + + l.logger.Info().Msg("Push event listener started successfully") + return nil +} + +// Stop gracefully stops the listener. +// Returns ErrNotRunning if the listener is not running. +func (l *Listener) Stop() error { + if !l.running.CompareAndSwap(true, false) { + return ErrNotRunning + } + + l.mu.Lock() + defer l.mu.Unlock() + + l.logger.Info().Msg("stopping Push event listener") + + // Signal stop + close(l.stopCh) + + // Stop the watcher + if l.watcher != nil { + l.watcher.Stop() + l.watcher = nil + } + + l.logger.Info().Msg("Push event listener stopped successfully") + return nil +} + +// IsRunning returns whether the listener is currently running. +func (l *Listener) IsRunning() bool { + return l.running.Load() +} + +// getLastProcessedBlock reads the last processed block from chain_states. +func (l *Listener) getLastProcessedBlock() (uint64, error) { + var chainState store.ChainState + result := l.db.First(&chainState) + + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + l.logger.Info().Msg("no previous state found, starting from block 0") + return 0, nil + } + return 0, fmt.Errorf("failed to query chain state: %w", result.Error) + } + + l.logger.Info(). + Uint64("block", chainState.LastBlock). + Msg("resuming from last processed block") + + return chainState.LastBlock, nil +} diff --git a/universalClient/chains/push/client_test.go b/universalClient/chains/push/client_test.go new file mode 100644 index 00000000..0feda53e --- /dev/null +++ b/universalClient/chains/push/client_test.go @@ -0,0 +1,246 @@ +package push + +import ( + "context" + "testing" + "time" + + "github.com/pushchain/push-chain-node/universalClient/pushcore" + "github.com/pushchain/push-chain-node/universalClient/store" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gorm.io/driver/sqlite" + "gorm.io/gorm" + gormlogger "gorm.io/gorm/logger" +) + +// mockPushClient implements PushClient for testing. +type mockPushClient struct { + latestBlock uint64 + txResults []*pushcore.TxResult + err error +} + +func (m *mockPushClient) GetLatestBlock() (uint64, error) { + return m.latestBlock, m.err +} + +func (m *mockPushClient) GetTxsByEvents(query string, minHeight, maxHeight uint64, limit uint64) ([]*pushcore.TxResult, error) { + return m.txResults, m.err +} + +func newTestDB(t *testing.T) *gorm.DB { + db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{ + Logger: gormlogger.Default.LogMode(gormlogger.Silent), + }) + require.NoError(t, err) + + // Use the actual store types for migration + err = db.AutoMigrate(&store.ChainState{}, &store.PCEvent{}) + require.NoError(t, err) + + return db +} + +func TestNewListener(t *testing.T) { + logger := zerolog.Nop() + db := newTestDB(t) + client := &mockPushClient{} + + t.Run("success with nil config", func(t *testing.T) { + listener, err := NewListener(client, db, logger, nil) + require.NoError(t, err) + require.NotNil(t, listener) + + // Verify defaults applied + assert.Equal(t, DefaultPollInterval, listener.cfg.PollInterval) + assert.Equal(t, uint64(DefaultChunkSize), listener.cfg.ChunkSize) + assert.Equal(t, uint64(DefaultQueryLimit), listener.cfg.QueryLimit) + }) + + t.Run("success with custom config", func(t *testing.T) { + cfg := &Config{ + PollInterval: 10 * time.Second, + ChunkSize: 500, + QueryLimit: 50, + } + listener, err := NewListener(client, db, logger, cfg) + require.NoError(t, err) + require.NotNil(t, listener) + + assert.Equal(t, 10*time.Second, listener.cfg.PollInterval) + assert.Equal(t, uint64(500), listener.cfg.ChunkSize) + assert.Equal(t, uint64(50), listener.cfg.QueryLimit) + }) + + t.Run("nil client error", func(t *testing.T) { + listener, err := NewListener(nil, db, logger, nil) + require.Error(t, err) + assert.ErrorIs(t, err, ErrNilClient) + assert.Nil(t, listener) + }) + + t.Run("nil database error", func(t *testing.T) { + listener, err := NewListener(client, nil, logger, nil) + require.Error(t, err) + assert.ErrorIs(t, err, ErrNilDatabase) + assert.Nil(t, listener) + }) + + t.Run("invalid poll interval - too short", func(t *testing.T) { + cfg := &Config{ + PollInterval: 100 * time.Millisecond, // Less than minPollInterval + } + listener, err := NewListener(client, db, logger, cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "poll interval") + assert.Nil(t, listener) + }) + + t.Run("invalid poll interval - too long", func(t *testing.T) { + cfg := &Config{ + PollInterval: 10 * time.Minute, // More than maxPollInterval + } + listener, err := NewListener(client, db, logger, cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "poll interval") + assert.Nil(t, listener) + }) +} + +func TestListener_StartStop(t *testing.T) { + logger := zerolog.Nop() + db := newTestDB(t) + client := &mockPushClient{latestBlock: 100} + + listener, err := NewListener(client, db, logger, nil) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t.Run("start successfully", func(t *testing.T) { + err := listener.Start(ctx) + require.NoError(t, err) + assert.True(t, listener.IsRunning()) + }) + + t.Run("start when already running returns error", func(t *testing.T) { + err := listener.Start(ctx) + require.Error(t, err) + assert.ErrorIs(t, err, ErrAlreadyRunning) + }) + + t.Run("stop successfully", func(t *testing.T) { + err := listener.Stop() + require.NoError(t, err) + assert.False(t, listener.IsRunning()) + }) + + t.Run("stop when not running returns error", func(t *testing.T) { + err := listener.Stop() + require.Error(t, err) + assert.ErrorIs(t, err, ErrNotRunning) + }) +} + +func TestListener_IsRunning(t *testing.T) { + logger := zerolog.Nop() + db := newTestDB(t) + client := &mockPushClient{latestBlock: 100} + + listener, err := NewListener(client, db, logger, nil) + require.NoError(t, err) + + assert.False(t, listener.IsRunning()) + + ctx := context.Background() + err = listener.Start(ctx) + require.NoError(t, err) + assert.True(t, listener.IsRunning()) + + err = listener.Stop() + require.NoError(t, err) + assert.False(t, listener.IsRunning()) +} + +func TestConfig_Validate(t *testing.T) { + tests := []struct { + name string + cfg Config + wantErr bool + }{ + { + name: "valid config", + cfg: Config{PollInterval: 5 * time.Second}, + wantErr: false, + }, + { + name: "min poll interval", + cfg: Config{PollInterval: 1 * time.Second}, + wantErr: false, + }, + { + name: "max poll interval", + cfg: Config{PollInterval: 5 * time.Minute}, + wantErr: false, + }, + { + name: "too short poll interval", + cfg: Config{PollInterval: 500 * time.Millisecond}, + wantErr: true, + }, + { + name: "too long poll interval", + cfg: Config{PollInterval: 10 * time.Minute}, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.cfg.Validate() + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestConfig_ApplyDefaults(t *testing.T) { + t.Run("all zero values", func(t *testing.T) { + cfg := &Config{} + cfg.applyDefaults() + + assert.Equal(t, DefaultPollInterval, cfg.PollInterval) + assert.Equal(t, uint64(DefaultChunkSize), cfg.ChunkSize) + assert.Equal(t, uint64(DefaultQueryLimit), cfg.QueryLimit) + }) + + t.Run("partial values", func(t *testing.T) { + cfg := &Config{ + PollInterval: 10 * time.Second, + } + cfg.applyDefaults() + + assert.Equal(t, 10*time.Second, cfg.PollInterval) + assert.Equal(t, uint64(DefaultChunkSize), cfg.ChunkSize) + assert.Equal(t, uint64(DefaultQueryLimit), cfg.QueryLimit) + }) + + t.Run("all values set", func(t *testing.T) { + cfg := &Config{ + PollInterval: 10 * time.Second, + ChunkSize: 500, + QueryLimit: 50, + } + cfg.applyDefaults() + + assert.Equal(t, 10*time.Second, cfg.PollInterval) + assert.Equal(t, uint64(500), cfg.ChunkSize) + assert.Equal(t, uint64(50), cfg.QueryLimit) + }) +} diff --git a/universalClient/chains/push/event_parser.go b/universalClient/chains/push/event_parser.go index 7e222572..5ca88955 100644 --- a/universalClient/chains/push/event_parser.go +++ b/universalClient/chains/push/event_parser.go @@ -2,113 +2,246 @@ package push import ( "encoding/json" + "errors" "fmt" "strconv" abci "github.com/cometbft/cometbft/abci/types" "github.com/pushchain/push-chain-node/universalClient/store" - "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" + utsstypes "github.com/pushchain/push-chain-node/x/utss/types" ) -// ParseTSSProcessInitiatedEvent parses a tss_process_initiated event from ABCI events. -// Returns nil if the event is not a tss_process_initiated event. -func ParseTSSProcessInitiatedEvent(events []abci.Event, blockHeight uint64, txHash string) (*TSSProcessEvent, error) { - for _, event := range events { - if event.Type != EventTypeTssProcessInitiated { - continue - } +// Event type constants +const ( + EventTypeTSSProcessInitiated = utsstypes.EventTypeTssProcessInitiated + EventTypeOutboundCreated = uexecutortypes.EventTypeOutboundCreated +) - parsed := &TSSProcessEvent{ - BlockHeight: blockHeight, - TxHash: txHash, - } +// TSS event attribute keys. +const ( + AttrKeyProcessID = "process_id" + AttrKeyProcessType = "process_type" + AttrKeyParticipants = "participants" + AttrKeyExpiryHeight = "expiry_height" +) - // Track which required fields were found (process_id=0 is valid!) - foundProcessID := false - - for _, attr := range event.Attributes { - switch attr.Key { - case AttrKeyProcessID: - id, err := strconv.ParseUint(attr.Value, 10, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse process_id: %w", err) - } - parsed.ProcessID = id - foundProcessID = true - - case AttrKeyProcessType: - parsed.ProcessType = convertProcessType(attr.Value) - - case AttrKeyParticipants: - var participants []string - if err := json.Unmarshal([]byte(attr.Value), &participants); err != nil { - return nil, fmt.Errorf("failed to parse participants: %w", err) - } - parsed.Participants = participants - - case AttrKeyExpiryHeight: - height, err := strconv.ParseUint(attr.Value, 10, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse expiry_height: %w", err) - } - parsed.ExpiryHeight = height - } - } +// TSS process type values as defined in the Push chain. +const ( + ChainProcessTypeKeygen = "TSS_PROCESS_KEYGEN" + ChainProcessTypeRefresh = "TSS_PROCESS_REFRESH" + ChainProcessTypeQuorumChange = "TSS_PROCESS_QUORUM_CHANGE" +) + +// Outbound event attribute keys. +const ( + AttrKeyTxID = "tx_id" + AttrKeyUniversalTxID = "utx_id" + AttrKeyOutboundID = "outbound_id" + AttrKeyDestinationChain = "destination_chain" + AttrKeyRecipient = "recipient" + AttrKeyAmount = "amount" + AttrKeyAssetAddr = "asset_addr" + AttrKeySender = "sender" + AttrKeyPayload = "payload" + AttrKeyGasLimit = "gas_limit" + AttrKeyTxType = "tx_type" + AttrKeyPcTxHash = "pc_tx_hash" + AttrKeyLogIndex = "log_index" + AttrKeyRevertMsg = "revert_msg" + AttrKeyData = "data" +) + +// Protocol type values for internal event classification. +const ( + ProtocolTypeKeygen = "KEYGEN" + ProtocolTypeKeyrefresh = "KEYREFRESH" + ProtocolTypeQuorumChange = "QUORUM_CHANGE" + ProtocolTypeSign = "SIGN" +) + +// Event status values. +const ( + StatusPending = "PENDING" +) + +// OutboundExpiryOffset is the number of blocks after event detection +// before an outbound event expires. +const OutboundExpiryOffset = 400 + +// Parser errors. +var ( + ErrMissingProcessID = errors.New("missing required attribute: process_id") + ErrMissingProcessType = errors.New("missing required attribute: process_type") + ErrMissingTxID = errors.New("missing required attribute: tx_id") + ErrInvalidProcessID = errors.New("invalid process_id format") + ErrInvalidExpiryHeight = errors.New("invalid expiry_height format") + ErrInvalidParticipants = errors.New("invalid participants format") +) + +// ParseEvent parses a Push chain event from an ABCI event. +// Returns nil if the event type is not recognized. +// Sets BlockHeight and Status on successfully parsed events. +func ParseEvent(event abci.Event, blockHeight uint64) (*store.PCEvent, error) { + var parsed *store.PCEvent + var err error + + switch event.Type { + case EventTypeTSSProcessInitiated: + parsed, err = parseTSSEvent(event) + case EventTypeOutboundCreated: + parsed, err = parseOutboundEvent(event) + default: + // Unknown event type - not an error, just skip + return nil, nil + } + + if err != nil { + return nil, fmt.Errorf("failed to parse %s event: %w", event.Type, err) + } + + if parsed == nil { + return nil, nil + } + + // Set common fields + parsed.BlockHeight = blockHeight + parsed.Status = StatusPending + + // Set expiry for outbound events (block seen + 400) + if event.Type == EventTypeOutboundCreated { + parsed.ExpiryBlockHeight = blockHeight + OutboundExpiryOffset + } + + return parsed, nil +} + +// parseTSSEvent parses a tss_process_initiated event. +func parseTSSEvent(event abci.Event) (*store.PCEvent, error) { + attrs := extractAttributes(event) + + // Parse required fields + processIDStr, ok := attrs[AttrKeyProcessID] + if !ok { + return nil, ErrMissingProcessID + } + + processID, err := strconv.ParseUint(processIDStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrInvalidProcessID, err) + } - // Validate required fields - if !foundProcessID { - return nil, fmt.Errorf("missing process_id in event") + processTypeStr, ok := attrs[AttrKeyProcessType] + if !ok { + return nil, ErrMissingProcessType + } + + protocolType := convertProcessType(processTypeStr) + + // Parse optional fields + var expiryHeight uint64 + if expiryStr, ok := attrs[AttrKeyExpiryHeight]; ok { + expiryHeight, err = strconv.ParseUint(expiryStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrInvalidExpiryHeight, err) } - if parsed.ProcessType == "" { - return nil, fmt.Errorf("missing process_type in event") + } + + var participants []string + if participantsStr, ok := attrs[AttrKeyParticipants]; ok { + if err := json.Unmarshal([]byte(participantsStr), &participants); err != nil { + return nil, fmt.Errorf("%w: %v", ErrInvalidParticipants, err) } + } - return parsed, nil + // Build event data + eventData, err := buildTSSEventData(processID, participants) + if err != nil { + return nil, fmt.Errorf("failed to build event data: %w", err) } - return nil, nil // No tss_process_initiated event found + return &store.PCEvent{ + EventID: fmt.Sprintf("%d", processID), + ExpiryBlockHeight: expiryHeight, + Type: protocolType, + EventData: eventData, + }, nil } -// convertProcessType converts chain process type to internal protocol type. -func convertProcessType(chainType string) string { - switch chainType { - case ProcessTypeKeygen: - return ProtocolTypeKeygen - case ProcessTypeRefresh: - return ProtocolTypeKeyrefresh - case ProcessTypeQuorumChange: - return ProtocolTypeQuorumChange - default: - return chainType // Return as-is if unknown +// parseOutboundEvent parses an outbound_created event. +func parseOutboundEvent(event abci.Event) (*store.PCEvent, error) { + attrs := extractAttributes(event) + + // Parse required field + txID, ok := attrs[AttrKeyTxID] + if !ok { + return nil, ErrMissingTxID + } + + // Build structured event data + outboundData := uexecutortypes.OutboundCreatedEvent{ + UniversalTxId: attrs[AttrKeyUniversalTxID], + OutboundId: attrs[AttrKeyOutboundID], + TxID: txID, + DestinationChain: attrs[AttrKeyDestinationChain], + Recipient: attrs[AttrKeyRecipient], + Amount: attrs[AttrKeyAmount], + AssetAddr: attrs[AttrKeyAssetAddr], + Sender: attrs[AttrKeySender], + Payload: attrs[AttrKeyPayload], + GasLimit: attrs[AttrKeyGasLimit], + TxType: attrs[AttrKeyTxType], + PcTxHash: attrs[AttrKeyPcTxHash], + LogIndex: attrs[AttrKeyLogIndex], + RevertMsg: attrs[AttrKeyRevertMsg], } + + eventData, err := json.Marshal(outboundData) + if err != nil { + return nil, fmt.Errorf("failed to marshal outbound event data: %w", err) + } + + return &store.PCEvent{ + EventID: txID, + Type: ProtocolTypeSign, + EventData: eventData, + }, nil } -// ToTSSEventRecord converts the parsed event to a TSSEvent database record. -func (e *TSSProcessEvent) ToTSSEventRecord() *store.TSSEvent { - // Serialize participants as event data - var eventData []byte - if len(e.Participants) > 0 { - data := map[string]interface{}{ - "process_id": e.ProcessID, - // TODO: Maybe while tss process participants can be read from this rather than chain - "participants": e.Participants, - "tx_hash": e.TxHash, - } - eventData, _ = json.Marshal(data) +// extractAttributes extracts all attributes from an ABCI event into a map. +func extractAttributes(event abci.Event) map[string]string { + attrs := make(map[string]string, len(event.Attributes)) + for _, attr := range event.Attributes { + attrs[attr.Key] = attr.Value + } + return attrs +} + +// buildTSSEventData constructs the JSON event data for TSS events. +func buildTSSEventData(processID uint64, participants []string) ([]byte, error) { + if len(participants) == 0 { + return nil, nil } - return &store.TSSEvent{ - EventID: e.EventID(), - BlockNumber: e.BlockHeight, - ProtocolType: e.ProcessType, - Status: eventstore.StatusPending, - ExpiryHeight: e.ExpiryHeight, - EventData: eventData, + data := map[string]interface{}{ + "process_id": processID, + "participants": participants, } + + return json.Marshal(data) } -// EventID returns the unique event ID for this process. -// Format: "{process_id}" -func (e *TSSProcessEvent) EventID() string { - return fmt.Sprintf("%d", e.ProcessID) +// convertProcessType converts a chain process type to an internal protocol type. +func convertProcessType(chainType string) string { + switch chainType { + case ChainProcessTypeKeygen: + return ProtocolTypeKeygen + case ChainProcessTypeRefresh: + return ProtocolTypeKeyrefresh + case ChainProcessTypeQuorumChange: + return ProtocolTypeQuorumChange + default: + // Return as-is for unknown types to maintain forward compatibility + return chainType + } } diff --git a/universalClient/chains/push/event_parser_test.go b/universalClient/chains/push/event_parser_test.go new file mode 100644 index 00000000..9ab91f25 --- /dev/null +++ b/universalClient/chains/push/event_parser_test.go @@ -0,0 +1,392 @@ +package push + +import ( + "encoding/json" + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" +) + +func TestParseEvent_TSSEvent(t *testing.T) { + tests := []struct { + name string + event abci.Event + blockHeight uint64 + wantEventID string + wantType string + wantExpiry uint64 + wantErr bool + errContains string + }{ + { + name: "valid keygen event", + event: abci.Event{ + Type: EventTypeTSSProcessInitiated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyProcessID, Value: "123"}, + {Key: AttrKeyProcessType, Value: ChainProcessTypeKeygen}, + {Key: AttrKeyExpiryHeight, Value: "1000"}, + {Key: AttrKeyParticipants, Value: `["val1","val2","val3"]`}, + }, + }, + blockHeight: 500, + wantEventID: "123", + wantType: ProtocolTypeKeygen, + wantExpiry: 1000, + wantErr: false, + }, + { + name: "valid refresh event", + event: abci.Event{ + Type: EventTypeTSSProcessInitiated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyProcessID, Value: "456"}, + {Key: AttrKeyProcessType, Value: ChainProcessTypeRefresh}, + }, + }, + blockHeight: 600, + wantEventID: "456", + wantType: ProtocolTypeKeyrefresh, + wantExpiry: 0, + wantErr: false, + }, + { + name: "valid quorum change event", + event: abci.Event{ + Type: EventTypeTSSProcessInitiated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyProcessID, Value: "789"}, + {Key: AttrKeyProcessType, Value: ChainProcessTypeQuorumChange}, + {Key: AttrKeyExpiryHeight, Value: "2000"}, + }, + }, + blockHeight: 700, + wantEventID: "789", + wantType: ProtocolTypeQuorumChange, + wantExpiry: 2000, + wantErr: false, + }, + { + name: "missing process_id", + event: abci.Event{ + Type: EventTypeTSSProcessInitiated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyProcessType, Value: ChainProcessTypeKeygen}, + }, + }, + blockHeight: 500, + wantErr: true, + errContains: "process_id", + }, + { + name: "missing process_type", + event: abci.Event{ + Type: EventTypeTSSProcessInitiated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyProcessID, Value: "123"}, + }, + }, + blockHeight: 500, + wantErr: true, + errContains: "process_type", + }, + { + name: "invalid process_id", + event: abci.Event{ + Type: EventTypeTSSProcessInitiated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyProcessID, Value: "not-a-number"}, + {Key: AttrKeyProcessType, Value: ChainProcessTypeKeygen}, + }, + }, + blockHeight: 500, + wantErr: true, + errContains: "process_id", + }, + { + name: "invalid expiry_height", + event: abci.Event{ + Type: EventTypeTSSProcessInitiated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyProcessID, Value: "123"}, + {Key: AttrKeyProcessType, Value: ChainProcessTypeKeygen}, + {Key: AttrKeyExpiryHeight, Value: "invalid"}, + }, + }, + blockHeight: 500, + wantErr: true, + errContains: "expiry_height", + }, + { + name: "invalid participants json", + event: abci.Event{ + Type: EventTypeTSSProcessInitiated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyProcessID, Value: "123"}, + {Key: AttrKeyProcessType, Value: ChainProcessTypeKeygen}, + {Key: AttrKeyParticipants, Value: "not-valid-json"}, + }, + }, + blockHeight: 500, + wantErr: true, + errContains: "participants", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := ParseEvent(tt.event, tt.blockHeight) + + if tt.wantErr { + require.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + return + } + + require.NoError(t, err) + require.NotNil(t, result) + + assert.Equal(t, tt.wantEventID, result.EventID) + assert.Equal(t, tt.wantType, result.Type) + assert.Equal(t, tt.wantExpiry, result.ExpiryBlockHeight) + assert.Equal(t, tt.blockHeight, result.BlockHeight) + assert.Equal(t, StatusPending, result.Status) + }) + } +} + +func TestParseEvent_OutboundEvent(t *testing.T) { + tests := []struct { + name string + event abci.Event + blockHeight uint64 + wantEventID string + wantExpiry uint64 + wantErr bool + errContains string + }{ + { + name: "valid outbound event", + event: abci.Event{ + Type: EventTypeOutboundCreated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyTxID, Value: "0x123abc"}, + {Key: AttrKeyUniversalTxID, Value: "utx-001"}, + {Key: AttrKeyOutboundID, Value: "out-001"}, + {Key: AttrKeyDestinationChain, Value: "ethereum"}, + {Key: AttrKeyRecipient, Value: "0xrecipient"}, + {Key: AttrKeyAmount, Value: "1000000"}, + {Key: AttrKeyAssetAddr, Value: "0xtoken"}, + {Key: AttrKeySender, Value: "0xsender"}, + {Key: AttrKeyPayload, Value: "0x"}, + {Key: AttrKeyGasLimit, Value: "21000"}, + {Key: AttrKeyTxType, Value: "TRANSFER"}, + {Key: AttrKeyPcTxHash, Value: "0xpctxhash"}, + {Key: AttrKeyLogIndex, Value: "0"}, + {Key: AttrKeyRevertMsg, Value: ""}, + }, + }, + blockHeight: 1000, + wantEventID: "0x123abc", + wantExpiry: 1000 + OutboundExpiryOffset, // blockHeight + 400 + wantErr: false, + }, + { + name: "minimal outbound event (only tx_id)", + event: abci.Event{ + Type: EventTypeOutboundCreated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyTxID, Value: "0xminimal"}, + }, + }, + blockHeight: 500, + wantEventID: "0xminimal", + wantExpiry: 500 + OutboundExpiryOffset, + wantErr: false, + }, + { + name: "missing tx_id", + event: abci.Event{ + Type: EventTypeOutboundCreated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyUniversalTxID, Value: "utx-001"}, + }, + }, + blockHeight: 500, + wantErr: true, + errContains: "tx_id", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := ParseEvent(tt.event, tt.blockHeight) + + if tt.wantErr { + require.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + return + } + + require.NoError(t, err) + require.NotNil(t, result) + + assert.Equal(t, tt.wantEventID, result.EventID) + assert.Equal(t, ProtocolTypeSign, result.Type) + assert.Equal(t, tt.wantExpiry, result.ExpiryBlockHeight) + assert.Equal(t, tt.blockHeight, result.BlockHeight) + assert.Equal(t, StatusPending, result.Status) + }) + } +} + +func TestParseEvent_OutboundEventData(t *testing.T) { + event := abci.Event{ + Type: EventTypeOutboundCreated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyTxID, Value: "0x123abc"}, + {Key: AttrKeyUniversalTxID, Value: "utx-001"}, + {Key: AttrKeyOutboundID, Value: "out-001"}, + {Key: AttrKeyDestinationChain, Value: "ethereum"}, + {Key: AttrKeyRecipient, Value: "0xrecipient"}, + {Key: AttrKeyAmount, Value: "1000000"}, + {Key: AttrKeyAssetAddr, Value: "0xtoken"}, + {Key: AttrKeySender, Value: "0xsender"}, + {Key: AttrKeyPayload, Value: "0xpayload"}, + {Key: AttrKeyGasLimit, Value: "21000"}, + {Key: AttrKeyTxType, Value: "TRANSFER"}, + {Key: AttrKeyPcTxHash, Value: "0xpctxhash"}, + {Key: AttrKeyLogIndex, Value: "5"}, + {Key: AttrKeyRevertMsg, Value: "revert reason"}, + }, + } + + result, err := ParseEvent(event, 1000) + require.NoError(t, err) + require.NotNil(t, result) + + // Unmarshal event data and verify all fields + var data uexecutortypes.OutboundCreatedEvent + err = json.Unmarshal(result.EventData, &data) + require.NoError(t, err) + + assert.Equal(t, "0x123abc", data.TxID) + assert.Equal(t, "utx-001", data.UniversalTxId) + assert.Equal(t, "out-001", data.OutboundId) + assert.Equal(t, "ethereum", data.DestinationChain) + assert.Equal(t, "0xrecipient", data.Recipient) + assert.Equal(t, "1000000", data.Amount) + assert.Equal(t, "0xtoken", data.AssetAddr) + assert.Equal(t, "0xsender", data.Sender) + assert.Equal(t, "0xpayload", data.Payload) + assert.Equal(t, "21000", data.GasLimit) + assert.Equal(t, "TRANSFER", data.TxType) + assert.Equal(t, "0xpctxhash", data.PcTxHash) + assert.Equal(t, "5", data.LogIndex) + assert.Equal(t, "revert reason", data.RevertMsg) +} + +func TestParseEvent_UnknownEventType(t *testing.T) { + event := abci.Event{ + Type: "unknown_event_type", + Attributes: []abci.EventAttribute{ + {Key: "some_key", Value: "some_value"}, + }, + } + + result, err := ParseEvent(event, 1000) + require.NoError(t, err) + assert.Nil(t, result, "unknown event types should return nil without error") +} + +func TestConvertProcessType(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {ChainProcessTypeKeygen, ProtocolTypeKeygen}, + {ChainProcessTypeRefresh, ProtocolTypeKeyrefresh}, + {ChainProcessTypeQuorumChange, ProtocolTypeQuorumChange}, + {"UNKNOWN_TYPE", "UNKNOWN_TYPE"}, // Unknown types returned as-is + {"", ""}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + result := convertProcessType(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestExtractAttributes(t *testing.T) { + event := abci.Event{ + Type: "test", + Attributes: []abci.EventAttribute{ + {Key: "key1", Value: "value1"}, + {Key: "key2", Value: "value2"}, + {Key: "key3", Value: ""}, + }, + } + + attrs := extractAttributes(event) + + assert.Len(t, attrs, 3) + assert.Equal(t, "value1", attrs["key1"]) + assert.Equal(t, "value2", attrs["key2"]) + assert.Equal(t, "", attrs["key3"]) +} + +func TestBuildTSSEventData(t *testing.T) { + t.Run("with participants", func(t *testing.T) { + data, err := buildTSSEventData(123, []string{"val1", "val2"}) + require.NoError(t, err) + require.NotNil(t, data) + + var result map[string]interface{} + err = json.Unmarshal(data, &result) + require.NoError(t, err) + + assert.Equal(t, float64(123), result["process_id"]) + participants := result["participants"].([]interface{}) + assert.Len(t, participants, 2) + assert.Equal(t, "val1", participants[0]) + assert.Equal(t, "val2", participants[1]) + }) + + t.Run("without participants", func(t *testing.T) { + data, err := buildTSSEventData(123, nil) + require.NoError(t, err) + assert.Nil(t, data) + + data, err = buildTSSEventData(123, []string{}) + require.NoError(t, err) + assert.Nil(t, data) + }) +} + +func TestOutboundExpiryOffset(t *testing.T) { + // Verify the constant is set correctly + assert.Equal(t, uint64(400), uint64(OutboundExpiryOffset)) + + // Verify expiry calculation + blockHeight := uint64(1000) + event := abci.Event{ + Type: EventTypeOutboundCreated, + Attributes: []abci.EventAttribute{ + {Key: AttrKeyTxID, Value: "0xtest"}, + }, + } + + result, err := ParseEvent(event, blockHeight) + require.NoError(t, err) + assert.Equal(t, blockHeight+OutboundExpiryOffset, result.ExpiryBlockHeight) +} diff --git a/universalClient/chains/push/event_watcher.go b/universalClient/chains/push/event_watcher.go index 68100b93..4a5b9a4a 100644 --- a/universalClient/chains/push/event_watcher.go +++ b/universalClient/chains/push/event_watcher.go @@ -2,162 +2,250 @@ package push import ( "context" + "errors" + "fmt" + "sync" + "sync/atomic" "time" abci "github.com/cometbft/cometbft/abci/types" "github.com/pushchain/push-chain-node/universalClient/pushcore" - "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" + "github.com/pushchain/push-chain-node/universalClient/store" "github.com/rs/zerolog" + "gorm.io/gorm" ) -// EventWatcher polls the Push chain for TSS events and stores them in the database. +// Event queries for fetching specific event types from the chain. +const ( + TSSEventQuery = EventTypeTSSProcessInitiated + ".process_id>=0" + OutboundEventQuery = EventTypeOutboundCreated + ".tx_id EXISTS" +) + +// EventWatcher polls the Push chain for events and stores them in the database. +// It handles graceful shutdown, concurrent safety, and persistent state tracking. type EventWatcher struct { - logger zerolog.Logger - pushClient *pushcore.Client - eventStore *eventstore.Store - pollInterval time.Duration - lastBlock uint64 - - ctx context.Context - cancel context.CancelFunc + logger zerolog.Logger + pushClient PushClient + db *gorm.DB + cfg Config + + lastBlock atomic.Uint64 + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + stopped atomic.Bool } // NewEventWatcher creates a new event watcher. func NewEventWatcher( - client *pushcore.Client, - store *eventstore.Store, + client PushClient, + db *gorm.DB, logger zerolog.Logger, + cfg Config, + startBlock uint64, ) *EventWatcher { - return &EventWatcher{ - logger: logger.With().Str("component", "push_event_watcher").Logger(), - pushClient: client, - eventStore: store, - pollInterval: DefaultPollInterval, - lastBlock: 0, + w := &EventWatcher{ + logger: logger.With().Str("component", "event_watcher").Logger(), + pushClient: client, + db: db, + cfg: cfg, } -} - -// SetPollInterval sets the polling interval. -func (w *EventWatcher) SetPollInterval(interval time.Duration) { - w.pollInterval = interval -} - -// SetLastBlock sets the starting block for polling. -func (w *EventWatcher) SetLastBlock(block uint64) { - w.lastBlock = block + w.lastBlock.Store(startBlock) + return w } // Start begins the event watching loop. -func (w *EventWatcher) Start(ctx context.Context) { +// The watcher will continue until Stop is called or the context is cancelled. +func (w *EventWatcher) Start(ctx context.Context) error { w.ctx, w.cancel = context.WithCancel(ctx) + w.stopped.Store(false) + + w.wg.Add(1) go w.watchLoop() + + return nil } -// Stop stops the event watching loop. +// Stop gracefully stops the event watcher and waits for the watch loop to exit. func (w *EventWatcher) Stop() { + if w.stopped.Swap(true) { + return // Already stopped + } + if w.cancel != nil { w.cancel() } + + // Wait for the watch loop to complete + w.wg.Wait() +} + +// LastProcessedBlock returns the last block number that was successfully processed. +func (w *EventWatcher) LastProcessedBlock() uint64 { + return w.lastBlock.Load() } -// watchLoop is the main polling loop that queries for TSS events. +// watchLoop is the main polling loop that queries for Push chain events. func (w *EventWatcher) watchLoop() { - ticker := time.NewTicker(w.pollInterval) + defer w.wg.Done() + + ticker := time.NewTicker(w.cfg.PollInterval) defer ticker.Stop() - // Initial poll - w.pollForEvents() + // Perform initial poll on startup + if err := w.pollForEvents(); err != nil { + w.logger.Error().Err(err).Msg("initial poll failed") + } for { select { case <-w.ctx.Done(): - w.logger.Info().Msg("event watcher stopped") + w.logger.Info().Msg("event watcher shutting down") return case <-ticker.C: - w.pollForEvents() + if err := w.pollForEvents(); err != nil { + w.logger.Error().Err(err).Msg("poll cycle failed") + } } } } -// pollForEvents queries the chain for new TSS events. -func (w *EventWatcher) pollForEvents() { - // Get the latest block number - latestBlock, err := w.pushClient.GetLatestBlockNum() +// pollForEvents queries the chain for new events and stores them. +// Processes blocks in configurable chunks to avoid overwhelming the chain. +func (w *EventWatcher) pollForEvents() error { + latestBlock, err := w.pushClient.GetLatestBlock() if err != nil { - w.logger.Error().Err(err).Msg("failed to get latest block number") - return + return fmt.Errorf("failed to get latest block: %w", err) } - // Skip if we're already caught up - if w.lastBlock >= latestBlock { - return + currentBlock := w.lastBlock.Load() + + // Already caught up + if currentBlock >= latestBlock { + return nil } - // Query for TSS events since the last processed block - minHeight := w.lastBlock + 1 - if w.lastBlock == 0 { - // First run - only get events from recent blocks to avoid scanning entire chain - if latestBlock > 1000 { - minHeight = latestBlock - 1000 - } else { - minHeight = 1 + return w.processBlockRange(currentBlock, latestBlock) +} + +// processBlockRange processes all blocks from start to end in chunks. +func (w *EventWatcher) processBlockRange(start, end uint64) error { + processedBlock := start + + for processedBlock < end { + // Check for cancellation between chunks + select { + case <-w.ctx.Done(): + return w.ctx.Err() + default: + } + + // Calculate chunk boundaries + minHeight := processedBlock + 1 + maxHeight := min(processedBlock+w.cfg.ChunkSize, end) + + // Process this chunk + newEvents, err := w.processChunk(minHeight, maxHeight) + if err != nil { + return fmt.Errorf("failed to process blocks %d-%d: %w", minHeight, maxHeight, err) + } + + if newEvents > 0 { + w.logger.Info(). + Int("new_events", newEvents). + Uint64("from_block", minHeight). + Uint64("to_block", maxHeight). + Msg("processed events") + } + + // Update state + processedBlock = maxHeight + w.lastBlock.Store(processedBlock) + + // Persist progress to database + if err := w.persistBlockProgress(processedBlock); err != nil { + w.logger.Error(). + Err(err). + Uint64("block", processedBlock). + Msg("failed to persist block progress") + // Continue processing - state will be recovered on restart } } + return nil +} + +// processChunk processes a single chunk of blocks and returns the number of new events stored. +func (w *EventWatcher) processChunk(minHeight, maxHeight uint64) (int, error) { w.logger.Debug(). Uint64("min_height", minHeight). - Uint64("max_height", latestBlock). - Msg("polling for TSS events") + Uint64("max_height", maxHeight). + Msg("querying events") + + newEventsCount := 0 + + // Query TSS events + tssCount, err := w.queryAndStoreEvents(TSSEventQuery, minHeight, maxHeight, "TSS") + if err != nil { + return 0, fmt.Errorf("TSS query failed: %w", err) + } + newEventsCount += tssCount + + // Query outbound events + outboundCount, err := w.queryAndStoreEvents(OutboundEventQuery, minHeight, maxHeight, "outbound") + if err != nil { + return 0, fmt.Errorf("outbound query failed: %w", err) + } + newEventsCount += outboundCount + + return newEventsCount, nil +} - // Query transactions with tss_process_initiated events +// queryAndStoreEvents queries for events matching the query and stores them. +func (w *EventWatcher) queryAndStoreEvents(query string, minHeight, maxHeight uint64, eventType string) (int, error) { txResults, err := w.pushClient.GetTxsByEvents( - DefaultEventQuery, + query, minHeight, - latestBlock, - 100, // limit + maxHeight, + w.cfg.QueryLimit, ) if err != nil { - w.logger.Error().Err(err).Msg("failed to query TSS events") - return + return 0, err } - // Process each transaction newEventsCount := 0 for _, txResult := range txResults { events := w.extractEventsFromTx(txResult) for _, event := range events { - if w.storeEvent(event) { + if stored, err := w.storeEvent(event); err != nil { + w.logger.Error(). + Err(err). + Str("event_id", event.EventID). + Str("event_type", eventType). + Msg("failed to store event") + } else if stored { newEventsCount++ } } } - if newEventsCount > 0 { - w.logger.Info(). - Int("new_events", newEventsCount). - Uint64("from_block", minHeight). - Uint64("to_block", latestBlock). - Msg("processed TSS events") - } - - // Update the last processed block - w.lastBlock = latestBlock + return newEventsCount, nil } -// extractEventsFromTx extracts TSS events from a transaction result. -func (w *EventWatcher) extractEventsFromTx(txResult *pushcore.TxResult) []*TSSProcessEvent { +// extractEventsFromTx extracts Push chain events from a transaction result. +func (w *EventWatcher) extractEventsFromTx(txResult *pushcore.TxResult) []*store.PCEvent { if txResult == nil || txResult.TxResponse == nil || txResult.TxResponse.TxResponse == nil { return nil } - var events []*TSSProcessEvent - - // Get events from the transaction response txResp := txResult.TxResponse.TxResponse + blockHeight := uint64(txResult.Height) + txHash := txResult.TxHash + + events := make([]*store.PCEvent, 0, len(txResp.Events)) - // Convert SDK events to ABCI events for parsing - abciEvents := make([]abci.Event, 0, len(txResp.Events)) for _, evt := range txResp.Events { + // Convert SDK event attributes to ABCI format attrs := make([]abci.EventAttribute, 0, len(evt.Attributes)) for _, attr := range evt.Attributes { attrs = append(attrs, abci.EventAttribute{ @@ -165,50 +253,93 @@ func (w *EventWatcher) extractEventsFromTx(txResult *pushcore.TxResult) []*TSSPr Value: attr.Value, }) } - abciEvents = append(abciEvents, abci.Event{ + + abciEvent := abci.Event{ Type: evt.Type, Attributes: attrs, - }) + } + + parsed, err := ParseEvent(abciEvent, blockHeight) + if err != nil { + w.logger.Warn(). + Err(err). + Str("tx_hash", txHash). + Str("event_type", evt.Type). + Msg("failed to parse event") + continue + } + + if parsed != nil { + parsed.TxHash = txHash + events = append(events, parsed) + } } - // Parse TSS events - parsed, err := ParseTSSProcessInitiatedEvent(abciEvents, uint64(txResult.Height), txResult.TxHash) - if err != nil { - w.logger.Warn(). - Err(err). - Str("tx_hash", txResult.TxHash). - Msg("failed to parse TSS event") - return nil + return events +} + +// storeEvent stores a Push chain event in the database if it doesn't already exist. +// Returns (true, nil) if a new event was stored, (false, nil) if it already existed, +// or (false, error) if storage failed. +func (w *EventWatcher) storeEvent(event *store.PCEvent) (bool, error) { + // Check for existing event + var existing store.PCEvent + err := w.db.Where("event_id = ?", event.EventID).First(&existing).Error + if err == nil { + // Event already exists + return false, nil + } + if !errors.Is(err, gorm.ErrRecordNotFound) { + return false, fmt.Errorf("failed to check existing event: %w", err) } - if parsed != nil { - events = append(events, parsed) + // Store new event + if err := w.db.Create(event).Error; err != nil { + return false, fmt.Errorf("failed to create event: %w", err) } - return events + w.logger.Debug(). + Str("event_id", event.EventID). + Str("type", event.Type). + Uint64("block_height", event.BlockHeight). + Msg("stored new event") + + return true, nil } -// storeEvent stores a TSS event in the database if it doesn't already exist. -// Returns true if a new event was stored, false if it already existed. -func (w *EventWatcher) storeEvent(event *TSSProcessEvent) bool { - eventID := event.EventID() +// persistBlockProgress updates the last processed block in chain_states. +func (w *EventWatcher) persistBlockProgress(blockNumber uint64) error { + var chainState store.ChainState + + err := w.db.First(&chainState).Error + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + return fmt.Errorf("failed to query chain state: %w", err) + } - // Check if event already exists - existing, err := w.eventStore.GetEvent(eventID) - if err == nil && existing != nil { - return false + if errors.Is(err, gorm.ErrRecordNotFound) { + // Create new record + chainState = store.ChainState{LastBlock: blockNumber} + if err := w.db.Create(&chainState).Error; err != nil { + return fmt.Errorf("failed to create chain state: %w", err) + } + return nil } - // Use eventstore to create - record := event.ToTSSEventRecord() - if err := w.eventStore.CreateEvent(record); err != nil { - w.logger.Error().Err(err).Str("event_id", eventID).Msg("failed to store TSS event") - return false + // Update existing record if we've progressed + if blockNumber > chainState.LastBlock { + chainState.LastBlock = blockNumber + if err := w.db.Save(&chainState).Error; err != nil { + return fmt.Errorf("failed to update chain state: %w", err) + } } - return true + + return nil } -// GetLastBlock returns the last processed block height. -func (w *EventWatcher) GetLastBlock() uint64 { - return w.lastBlock +// min returns the smaller of two uint64 values. +func min(a, b uint64) uint64 { + if a < b { + return a + } + return b } diff --git a/universalClient/chains/push/event_watcher_test.go b/universalClient/chains/push/event_watcher_test.go new file mode 100644 index 00000000..c082c2ec --- /dev/null +++ b/universalClient/chains/push/event_watcher_test.go @@ -0,0 +1,349 @@ +package push + +import ( + "context" + "testing" + "time" + + "github.com/pushchain/push-chain-node/universalClient/pushcore" + "github.com/pushchain/push-chain-node/universalClient/store" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gorm.io/driver/sqlite" + "gorm.io/gorm" + gormlogger "gorm.io/gorm/logger" +) + +func newTestDBForWatcher(t *testing.T) *gorm.DB { + db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{ + Logger: gormlogger.Default.LogMode(gormlogger.Silent), + }) + require.NoError(t, err) + + err = db.AutoMigrate(&store.ChainState{}, &store.PCEvent{}) + require.NoError(t, err) + + return db +} + +type mockPushClientForWatcher struct { + latestBlock uint64 + txResults map[string][]*pushcore.TxResult // query -> results + getBlockErr error + getTxsErr error + queriesMade []string +} + +func (m *mockPushClientForWatcher) GetLatestBlock() (uint64, error) { + return m.latestBlock, m.getBlockErr +} + +func (m *mockPushClientForWatcher) GetTxsByEvents(query string, minHeight, maxHeight uint64, limit uint64) ([]*pushcore.TxResult, error) { + m.queriesMade = append(m.queriesMade, query) + if m.getTxsErr != nil { + return nil, m.getTxsErr + } + if results, ok := m.txResults[query]; ok { + return results, nil + } + return nil, nil +} + +func TestNewEventWatcher(t *testing.T) { + logger := zerolog.Nop() + db := newTestDBForWatcher(t) + client := &mockPushClientForWatcher{} + cfg := Config{ + PollInterval: 5 * time.Second, + ChunkSize: 1000, + QueryLimit: 100, + } + + watcher := NewEventWatcher(client, db, logger, cfg, 100) + + require.NotNil(t, watcher) + assert.Equal(t, uint64(100), watcher.LastProcessedBlock()) +} + +func TestEventWatcher_StartStop(t *testing.T) { + logger := zerolog.Nop() + db := newTestDBForWatcher(t) + client := &mockPushClientForWatcher{latestBlock: 100} + cfg := Config{ + PollInterval: 100 * time.Millisecond, + ChunkSize: 1000, + QueryLimit: 100, + } + + watcher := NewEventWatcher(client, db, logger, cfg, 100) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := watcher.Start(ctx) + require.NoError(t, err) + + // Give it time to start + time.Sleep(50 * time.Millisecond) + + watcher.Stop() + + // Verify it can be stopped multiple times without issue + watcher.Stop() +} + +func TestEventWatcher_LastProcessedBlock(t *testing.T) { + logger := zerolog.Nop() + db := newTestDBForWatcher(t) + client := &mockPushClientForWatcher{latestBlock: 100} + cfg := Config{ + PollInterval: 5 * time.Second, + ChunkSize: 1000, + QueryLimit: 100, + } + + watcher := NewEventWatcher(client, db, logger, cfg, 50) + assert.Equal(t, uint64(50), watcher.LastProcessedBlock()) +} + +func TestEventWatcher_StoreEvent(t *testing.T) { + logger := zerolog.Nop() + db := newTestDBForWatcher(t) + client := &mockPushClientForWatcher{latestBlock: 100} + cfg := Config{ + PollInterval: 5 * time.Second, + ChunkSize: 1000, + QueryLimit: 100, + } + + watcher := NewEventWatcher(client, db, logger, cfg, 0) + + t.Run("store new event", func(t *testing.T) { + event := &store.PCEvent{ + EventID: "test-event-1", + TxHash: "0xhash1", + BlockHeight: 100, + Type: ProtocolTypeSign, + Status: StatusPending, + EventData: []byte(`{"test": "data"}`), + } + + stored, err := watcher.storeEvent(event) + require.NoError(t, err) + assert.True(t, stored) + + // Verify it's in the database + var found store.PCEvent + err = db.Where("event_id = ?", "test-event-1").First(&found).Error + require.NoError(t, err) + assert.Equal(t, "test-event-1", found.EventID) + }) + + t.Run("skip duplicate event", func(t *testing.T) { + event := &store.PCEvent{ + EventID: "test-event-1", // Same as above + TxHash: "0xhash2", + BlockHeight: 101, + Type: ProtocolTypeSign, + Status: StatusPending, + } + + stored, err := watcher.storeEvent(event) + require.NoError(t, err) + assert.False(t, stored, "duplicate event should not be stored") + }) + + t.Run("store different event", func(t *testing.T) { + event := &store.PCEvent{ + EventID: "test-event-2", + TxHash: "0xhash3", + BlockHeight: 102, + Type: ProtocolTypeKeygen, + Status: StatusPending, + } + + stored, err := watcher.storeEvent(event) + require.NoError(t, err) + assert.True(t, stored) + }) +} + +func TestEventWatcher_PersistBlockProgress(t *testing.T) { + logger := zerolog.Nop() + db := newTestDBForWatcher(t) + client := &mockPushClientForWatcher{latestBlock: 100} + cfg := Config{ + PollInterval: 5 * time.Second, + ChunkSize: 1000, + QueryLimit: 100, + } + + watcher := NewEventWatcher(client, db, logger, cfg, 0) + + t.Run("create new chain state", func(t *testing.T) { + err := watcher.persistBlockProgress(100) + require.NoError(t, err) + + var state store.ChainState + err = db.First(&state).Error + require.NoError(t, err) + assert.Equal(t, uint64(100), state.LastBlock) + }) + + t.Run("update existing chain state", func(t *testing.T) { + err := watcher.persistBlockProgress(200) + require.NoError(t, err) + + var state store.ChainState + err = db.First(&state).Error + require.NoError(t, err) + assert.Equal(t, uint64(200), state.LastBlock) + }) + + t.Run("skip update if not progressed", func(t *testing.T) { + err := watcher.persistBlockProgress(150) // Less than 200 + require.NoError(t, err) + + var state store.ChainState + err = db.First(&state).Error + require.NoError(t, err) + assert.Equal(t, uint64(200), state.LastBlock) // Should still be 200 + }) +} + +func TestEventWatcher_QueryAndStoreEvents(t *testing.T) { + logger := zerolog.Nop() + db := newTestDBForWatcher(t) + client := &mockPushClientForWatcher{ + latestBlock: 100, + txResults: make(map[string][]*pushcore.TxResult), + } + cfg := Config{ + PollInterval: 5 * time.Second, + ChunkSize: 1000, + QueryLimit: 100, + } + + watcher := NewEventWatcher(client, db, logger, cfg, 0) + + t.Run("no results", func(t *testing.T) { + count, err := watcher.queryAndStoreEvents(TSSEventQuery, 1, 100, "TSS") + require.NoError(t, err) + assert.Equal(t, 0, count) + }) +} + +func TestEventWatcher_ProcessChunk(t *testing.T) { + logger := zerolog.Nop() + db := newTestDBForWatcher(t) + client := &mockPushClientForWatcher{ + latestBlock: 100, + txResults: make(map[string][]*pushcore.TxResult), + } + cfg := Config{ + PollInterval: 5 * time.Second, + ChunkSize: 1000, + QueryLimit: 100, + } + + watcher := NewEventWatcher(client, db, logger, cfg, 0) + + t.Run("processes both TSS and outbound queries", func(t *testing.T) { + client.queriesMade = nil // Reset + + _, err := watcher.processChunk(1, 100) + require.NoError(t, err) + + // Should have made two queries - one for TSS, one for outbound + assert.Len(t, client.queriesMade, 2) + assert.Contains(t, client.queriesMade, TSSEventQuery) + assert.Contains(t, client.queriesMade, OutboundEventQuery) + }) +} + +func TestEventWatcher_PollForEvents_CaughtUp(t *testing.T) { + logger := zerolog.Nop() + db := newTestDBForWatcher(t) + client := &mockPushClientForWatcher{ + latestBlock: 100, + txResults: make(map[string][]*pushcore.TxResult), + } + cfg := Config{ + PollInterval: 5 * time.Second, + ChunkSize: 1000, + QueryLimit: 100, + } + + // Start at block 100, latest is 100 - should be caught up + watcher := NewEventWatcher(client, db, logger, cfg, 100) + + err := watcher.pollForEvents() + require.NoError(t, err) + + // No queries should have been made since we're caught up + assert.Len(t, client.queriesMade, 0) +} + +func TestEventWatcher_PollForEvents_ProcessesNewBlocks(t *testing.T) { + logger := zerolog.Nop() + db := newTestDBForWatcher(t) + client := &mockPushClientForWatcher{ + latestBlock: 200, + txResults: make(map[string][]*pushcore.TxResult), + } + cfg := Config{ + PollInterval: 5 * time.Second, + ChunkSize: 1000, + QueryLimit: 100, + } + + // Start at block 100, latest is 200 - should process new blocks + watcher := NewEventWatcher(client, db, logger, cfg, 100) + + // Need to start the watcher to initialize context (then stop it to run manual poll) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := watcher.Start(ctx) + require.NoError(t, err) + watcher.Stop() + + // Re-create watcher for clean test + client.queriesMade = nil + watcher = NewEventWatcher(client, db, logger, cfg, 100) + watcher.ctx, watcher.cancel = context.WithCancel(context.Background()) + defer watcher.cancel() + + err = watcher.pollForEvents() + require.NoError(t, err) + + // Should have processed blocks and updated last block + assert.Equal(t, uint64(200), watcher.LastProcessedBlock()) + + // Should have made queries (2 per chunk: TSS + outbound) + assert.GreaterOrEqual(t, len(client.queriesMade), 2) +} + +func TestMin(t *testing.T) { + tests := []struct { + a, b uint64 + expected uint64 + }{ + {1, 2, 1}, + {2, 1, 1}, + {0, 0, 0}, + {100, 100, 100}, + {0, 100, 0}, + } + + for _, tt := range tests { + result := min(tt.a, tt.b) + assert.Equal(t, tt.expected, result) + } +} + +func TestEventQueries(t *testing.T) { + // Verify query constants are correctly formed + assert.Equal(t, "tss_process_initiated.process_id>=0", TSSEventQuery) + assert.Equal(t, "outbound_created.tx_id EXISTS", OutboundEventQuery) +} diff --git a/universalClient/chains/push/listener.go b/universalClient/chains/push/listener.go deleted file mode 100644 index f0b37121..00000000 --- a/universalClient/chains/push/listener.go +++ /dev/null @@ -1,128 +0,0 @@ -package push - -import ( - "context" - "sync" - "time" - - "github.com/pushchain/push-chain-node/universalClient/pushcore" - "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" - "github.com/rs/zerolog" -) - -// PushTSSEventListener listens for TSS events from the Push chain -// and stores them in the database for processing. -type PushTSSEventListener struct { - logger zerolog.Logger - watcher *EventWatcher - - mu sync.RWMutex - running bool - healthy bool -} - -// NewPushTSSEventListener creates a new Push TSS event listener. -func NewPushTSSEventListener( - client *pushcore.Client, - store *eventstore.Store, - logger zerolog.Logger, -) *PushTSSEventListener { - return &PushTSSEventListener{ - logger: logger.With().Str("component", "push_tss_listener").Logger(), - watcher: NewEventWatcher(client, store, logger), - running: false, - healthy: false, - } -} - -// Config holds configuration for the listener. -type Config struct { - PollInterval time.Duration - StartBlock uint64 -} - -// DefaultConfig returns the default listener configuration. -func DefaultConfig() Config { - return Config{ - PollInterval: DefaultPollInterval, - StartBlock: 0, // Start from the beginning (or recent blocks) - } -} - -// WithConfig applies configuration to the listener. -func (l *PushTSSEventListener) WithConfig(cfg Config) *PushTSSEventListener { - if cfg.PollInterval > 0 { - l.watcher.SetPollInterval(cfg.PollInterval) - } - if cfg.StartBlock > 0 { - l.watcher.SetLastBlock(cfg.StartBlock) - } - return l -} - -// Start begins listening for TSS events from the Push chain. -func (l *PushTSSEventListener) Start(ctx context.Context) error { - l.mu.Lock() - defer l.mu.Unlock() - - if l.running { - return nil // Already running - } - - l.logger.Info().Msg("starting Push TSS event listener") - - // Start the event watcher - l.watcher.Start(ctx) - - l.running = true - l.healthy = true - - l.logger.Info().Msg("Push TSS event listener started") - return nil -} - -// Stop stops the listener. -func (l *PushTSSEventListener) Stop() error { - l.mu.Lock() - defer l.mu.Unlock() - - if !l.running { - return nil // Already stopped - } - - l.logger.Info().Msg("stopping Push TSS event listener") - - l.watcher.Stop() - - l.running = false - l.healthy = false - - l.logger.Info().Msg("Push TSS event listener stopped") - return nil -} - -// IsHealthy returns whether the listener is operating normally. -func (l *PushTSSEventListener) IsHealthy() bool { - l.mu.RLock() - defer l.mu.RUnlock() - return l.healthy -} - -// IsRunning returns whether the listener is currently running. -func (l *PushTSSEventListener) IsRunning() bool { - l.mu.RLock() - defer l.mu.RUnlock() - return l.running -} - -// GetLastProcessedBlock returns the last block height that was processed. -func (l *PushTSSEventListener) GetLastProcessedBlock() uint64 { - return l.watcher.GetLastBlock() -} - -// SetHealthy sets the health status (useful for testing or external health checks). -func (l *PushTSSEventListener) SetHealthy(healthy bool) { - l.mu.Lock() - defer l.mu.Unlock() - l.healthy = healthy -} diff --git a/universalClient/chains/push/types.go b/universalClient/chains/push/types.go deleted file mode 100644 index 15bf4b63..00000000 --- a/universalClient/chains/push/types.go +++ /dev/null @@ -1,43 +0,0 @@ -package push - -import "time" - -// Event type constants from the utss module. -const ( - // EventTypeTssProcessInitiated is emitted when a TSS key process is initiated on-chain. - EventTypeTssProcessInitiated = "tss_process_initiated" - - // Event attribute keys - AttrKeyProcessID = "process_id" - AttrKeyProcessType = "process_type" - AttrKeyParticipants = "participants" - AttrKeyExpiryHeight = "expiry_height" - - // Process type values from the chain - ProcessTypeKeygen = "TSS_PROCESS_KEYGEN" - ProcessTypeRefresh = "TSS_PROCESS_REFRESH" - ProcessTypeQuorumChange = "TSS_PROCESS_QUORUM_CHANGE" -) - -// Protocol type values for TSSEvent.ProtocolType field. -const ( - ProtocolTypeKeygen = "keygen" - ProtocolTypeKeyrefresh = "keyrefresh" - ProtocolTypeQuorumChange = "quorumchange" -) - -// Default configuration values. -const ( - DefaultPollInterval = 5 * time.Second - DefaultEventQuery = EventTypeTssProcessInitiated + ".process_id>=0" -) - -// TSSProcessEvent represents a parsed tss_process_initiated event from the chain. -type TSSProcessEvent struct { - ProcessID uint64 // Process ID from the event - ProcessType string // "keygen" or "keyrefresh" - Participants []string // List of validator addresses - ExpiryHeight uint64 // Block height when this process expires - BlockHeight uint64 // Block height when the event occurred - TxHash string // Transaction hash containing this event -} diff --git a/universalClient/chains/svm/outbound_tx_builder.go b/universalClient/chains/svm/outbound_tx_builder.go new file mode 100644 index 00000000..223441cf --- /dev/null +++ b/universalClient/chains/svm/outbound_tx_builder.go @@ -0,0 +1,306 @@ +package svm + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "math/big" + "strings" + + bin "github.com/gagliardetto/binary" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/rs/zerolog" + + chaincommon "github.com/pushchain/push-chain-node/universalClient/chains/common" +) + +// OutboundTxBuilder builds outbound transactions for Solana chains. +type OutboundTxBuilder struct { + client *Client + caipChainID string + gatewayProgram solana.PublicKey + tssPublicKey solana.PublicKey // TSS public key (ed25519) + logger zerolog.Logger +} + +// NewOutboundTxBuilder creates a new Solana outbound transaction builder. +func NewOutboundTxBuilder( + client *Client, + gatewayProgram solana.PublicKey, + tssPublicKey solana.PublicKey, + logger zerolog.Logger, +) *OutboundTxBuilder { + return &OutboundTxBuilder{ + client: client, + caipChainID: client.GetConfig().Chain, + gatewayProgram: gatewayProgram, + tssPublicKey: tssPublicKey, + logger: logger.With().Str("component", "svm_outbound_builder").Logger(), + } +} + +// BuildTransaction creates an unsigned Solana transaction from outbound data. +// gasPrice is accepted for interface compatibility but not used for Solana (uses compute units instead). +func (b *OutboundTxBuilder) BuildTransaction(ctx context.Context, data *chaincommon.OutboundTxData, gasPrice *big.Int) (*chaincommon.OutboundTxResult, error) { + if data == nil { + return nil, fmt.Errorf("outbound data is nil") + } + // Note: gasPrice is not used for Solana transactions (they use compute units) + + b.logger.Debug(). + Str("tx_id", data.TxID). + Str("recipient", data.Recipient). + Str("amount", data.Amount). + Msg("building Solana outbound transaction") + + // Get recent blockhash + recentBlockhash, err := b.getRecentBlockhash(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get recent blockhash: %w", err) + } + + // Build the instruction for the gateway program + instruction, err := b.buildGatewayInstruction(data) + if err != nil { + return nil, fmt.Errorf("failed to build gateway instruction: %w", err) + } + + // Create the transaction + tx, err := solana.NewTransaction( + []solana.Instruction{instruction}, + recentBlockhash, + solana.TransactionPayer(b.tssPublicKey), + ) + if err != nil { + return nil, fmt.Errorf("failed to create transaction: %w", err) + } + + // Get the message to sign + messageBytes, err := tx.Message.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to marshal message: %w", err) + } + + // The signing hash is the message itself for Solana (ed25519 signs the message directly) + signingHash := messageBytes + + // Serialize the unsigned transaction + rawTx, err := tx.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to marshal transaction: %w", err) + } + + return &chaincommon.OutboundTxResult{ + RawTx: rawTx, + SigningHash: signingHash, + ChainID: b.caipChainID, + Blockhash: recentBlockhash[:], + }, nil +} + +// AssembleSignedTransaction combines the unsigned transaction with the TSS signature. +func (b *OutboundTxBuilder) AssembleSignedTransaction(unsignedTx []byte, signature []byte, recoveryID byte) ([]byte, error) { + if len(signature) != 64 { + return nil, fmt.Errorf("invalid signature length: expected 64, got %d", len(signature)) + } + + // Decode the unsigned transaction + tx, err := solana.TransactionFromDecoder(bin.NewBinDecoder(unsignedTx)) + if err != nil { + return nil, fmt.Errorf("failed to decode unsigned transaction: %w", err) + } + + // Create Solana signature from the 64-byte signature + var sig solana.Signature + copy(sig[:], signature) + + // Add the signature to the transaction + tx.Signatures = []solana.Signature{sig} + + // Serialize the signed transaction + signedTx, err := tx.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to marshal signed transaction: %w", err) + } + + return signedTx, nil +} + +// BroadcastTransaction sends the signed transaction to the network. +func (b *OutboundTxBuilder) BroadcastTransaction(ctx context.Context, signedTx []byte) (string, error) { + // Decode the signed transaction + tx, err := solana.TransactionFromDecoder(bin.NewBinDecoder(signedTx)) + if err != nil { + return "", fmt.Errorf("failed to decode signed transaction: %w", err) + } + + // Get RPC client + rpcClient, err := b.client.getRPCClient() + if err != nil { + return "", fmt.Errorf("failed to get RPC client: %w", err) + } + + // Send the transaction + sig, err := rpcClient.SendTransaction(ctx, tx) + if err != nil { + return "", fmt.Errorf("failed to send transaction: %w", err) + } + + txHash := sig.String() + b.logger.Info(). + Str("tx_hash", txHash). + Msg("outbound transaction broadcasted") + + return txHash, nil +} + +// GetTxHash extracts the transaction hash from a signed transaction. +// For Solana, the txHash is the signature of the transaction. +func (b *OutboundTxBuilder) GetTxHash(signedTx []byte) (string, error) { + tx, err := solana.TransactionFromDecoder(bin.NewBinDecoder(signedTx)) + if err != nil { + return "", fmt.Errorf("failed to decode signed transaction: %w", err) + } + if len(tx.Signatures) == 0 { + return "", fmt.Errorf("transaction has no signatures") + } + return tx.Signatures[0].String(), nil +} + +// GetChainID returns the chain identifier. +func (b *OutboundTxBuilder) GetChainID() string { + return b.caipChainID +} + +// getRecentBlockhash gets a recent blockhash for the transaction. +func (b *OutboundTxBuilder) getRecentBlockhash(ctx context.Context) (solana.Hash, error) { + rpcClient, err := b.client.getRPCClient() + if err != nil { + return solana.Hash{}, fmt.Errorf("failed to get RPC client: %w", err) + } + + resp, err := rpcClient.GetLatestBlockhash(ctx, rpc.CommitmentFinalized) + if err != nil { + return solana.Hash{}, fmt.Errorf("failed to get latest blockhash: %w", err) + } + + return resp.Value.Blockhash, nil +} + +// buildGatewayInstruction builds the instruction for the gateway program. +func (b *OutboundTxBuilder) buildGatewayInstruction(data *chaincommon.OutboundTxData) (solana.Instruction, error) { + // Parse recipient as Solana public key + recipient, err := solana.PublicKeyFromBase58(data.Recipient) + if err != nil { + return nil, fmt.Errorf("invalid recipient address: %w", err) + } + + // Parse amount + amount, ok := new(big.Int).SetString(data.Amount, 10) + if !ok { + return nil, fmt.Errorf("invalid amount: %s", data.Amount) + } + + // Parse asset address (if provided) + var assetMint solana.PublicKey + if data.AssetAddr != "" && data.AssetAddr != "0x" { + assetMint, err = solana.PublicKeyFromBase58(data.AssetAddr) + if err != nil { + return nil, fmt.Errorf("invalid asset address: %w", err) + } + } + + // Parse payload + var payload []byte + if data.Payload != "" && data.Payload != "0x" { + payloadHex := strings.TrimPrefix(data.Payload, "0x") + payload, err = hex.DecodeString(payloadHex) + if err != nil { + return nil, fmt.Errorf("invalid payload hex: %w", err) + } + } + + // Build the instruction data + instructionData := buildExecuteOutboundInstructionData(data.TxID, amount, payload) + + // Build account metas + accounts := []*solana.AccountMeta{ + {PublicKey: b.tssPublicKey, IsSigner: true, IsWritable: true}, // Payer/Signer + {PublicKey: recipient, IsSigner: false, IsWritable: true}, // Recipient + {PublicKey: b.gatewayProgram, IsSigner: false, IsWritable: false}, // Gateway program + } + + // Add asset mint if token transfer + if assetMint != (solana.PublicKey{}) { + accounts = append(accounts, &solana.AccountMeta{ + PublicKey: assetMint, + IsSigner: false, + IsWritable: false, + }) + } + + return &gatewayInstruction{ + programID: b.gatewayProgram, + accounts: accounts, + data: instructionData, + }, nil +} + +// gatewayInstruction implements solana.Instruction for the gateway program. +type gatewayInstruction struct { + programID solana.PublicKey + accounts []*solana.AccountMeta + data []byte +} + +func (i *gatewayInstruction) ProgramID() solana.PublicKey { + return i.programID +} + +func (i *gatewayInstruction) Accounts() []*solana.AccountMeta { + return i.accounts +} + +func (i *gatewayInstruction) Data() ([]byte, error) { + return i.data, nil +} + +// buildExecuteOutboundInstructionData creates the instruction data for executeOutbound. +func buildExecuteOutboundInstructionData(txID string, amount *big.Int, payload []byte) []byte { + // Instruction discriminator for "execute_outbound" + // This is typically the first 8 bytes of sha256("global:execute_outbound") + discriminator := sha256.Sum256([]byte("global:execute_outbound")) + + // Build instruction data + data := make([]byte, 0, 8+32+8+4+len(payload)) + + // Discriminator (8 bytes) + data = append(data, discriminator[:8]...) + + // TxID as bytes32 (32 bytes) + txIDBytes := make([]byte, 32) + txIDHex := strings.TrimPrefix(txID, "0x") + if decoded, err := hex.DecodeString(txIDHex); err == nil { + copy(txIDBytes, decoded) + } + data = append(data, txIDBytes...) + + // Amount as u64 (8 bytes, little-endian) + amountBytes := make([]byte, 8) + amountU64 := amount.Uint64() + for i := 0; i < 8; i++ { + amountBytes[i] = byte(amountU64 >> (8 * i)) + } + data = append(data, amountBytes...) + + // Payload length (4 bytes, little-endian) + payloadLen := uint32(len(payload)) + data = append(data, byte(payloadLen), byte(payloadLen>>8), byte(payloadLen>>16), byte(payloadLen>>24)) + + // Payload data + data = append(data, payload...) + + return data +} diff --git a/universalClient/config/config.go b/universalClient/config/config.go index 8138ab57..009eab14 100644 --- a/universalClient/config/config.go +++ b/universalClient/config/config.go @@ -6,11 +6,8 @@ import ( "fmt" "os" "path/filepath" -) -const ( - configSubdir = "config" - configFileName = "pushuv_config.json" + "github.com/pushchain/push-chain-node/universalClient/constant" ) //go:embed default_config.json @@ -49,17 +46,6 @@ func validateConfig(cfg *Config, defaultCfg *Config) error { if cfg.MaxRetries == 0 && defaultCfg != nil { cfg.MaxRetries = defaultCfg.MaxRetries } - if cfg.RetryBackoffSeconds == 0 && defaultCfg != nil { - cfg.RetryBackoffSeconds = defaultCfg.RetryBackoffSeconds - } - - // Set defaults for startup config from default config - if cfg.InitialFetchRetries == 0 && defaultCfg != nil { - cfg.InitialFetchRetries = defaultCfg.InitialFetchRetries - } - if cfg.InitialFetchTimeoutSeconds == 0 && defaultCfg != nil { - cfg.InitialFetchTimeoutSeconds = defaultCfg.InitialFetchTimeoutSeconds - } // Set defaults for registry config from default config if len(cfg.PushChainGRPCURLs) == 0 && defaultCfg != nil { @@ -89,51 +75,14 @@ func validateConfig(cfg *Config, defaultCfg *Config) error { cfg.KeyringBackend = defaultCfg.KeyringBackend } - // Set defaults for event monitoring from default config - if cfg.EventPollingIntervalSeconds == 0 && defaultCfg != nil { - cfg.EventPollingIntervalSeconds = defaultCfg.EventPollingIntervalSeconds - } - - // Set defaults for transaction cleanup from default config - if cfg.TransactionCleanupIntervalSeconds == 0 && defaultCfg != nil { - cfg.TransactionCleanupIntervalSeconds = defaultCfg.TransactionCleanupIntervalSeconds - } - if cfg.TransactionRetentionPeriodSeconds == 0 && defaultCfg != nil { - cfg.TransactionRetentionPeriodSeconds = defaultCfg.TransactionRetentionPeriodSeconds - } - - // Initialize ChainConfigs if nil or empty - if (cfg.ChainConfigs == nil || len(cfg.ChainConfigs) == 0) && defaultCfg != nil { + // Initialize ChainConfigs if empty + if len(cfg.ChainConfigs) == 0 && defaultCfg != nil { cfg.ChainConfigs = defaultCfg.ChainConfigs } - // Set defaults for RPC pool config from default config - if defaultCfg != nil { - if cfg.RPCPoolConfig.HealthCheckIntervalSeconds == 0 { - cfg.RPCPoolConfig.HealthCheckIntervalSeconds = defaultCfg.RPCPoolConfig.HealthCheckIntervalSeconds - } - if cfg.RPCPoolConfig.UnhealthyThreshold == 0 { - cfg.RPCPoolConfig.UnhealthyThreshold = defaultCfg.RPCPoolConfig.UnhealthyThreshold - } - if cfg.RPCPoolConfig.RecoveryIntervalSeconds == 0 { - cfg.RPCPoolConfig.RecoveryIntervalSeconds = defaultCfg.RPCPoolConfig.RecoveryIntervalSeconds - } - if cfg.RPCPoolConfig.MinHealthyEndpoints == 0 { - cfg.RPCPoolConfig.MinHealthyEndpoints = defaultCfg.RPCPoolConfig.MinHealthyEndpoints - } - if cfg.RPCPoolConfig.RequestTimeoutSeconds == 0 { - cfg.RPCPoolConfig.RequestTimeoutSeconds = defaultCfg.RPCPoolConfig.RequestTimeoutSeconds - } - if cfg.RPCPoolConfig.LoadBalancingStrategy == "" { - cfg.RPCPoolConfig.LoadBalancingStrategy = defaultCfg.RPCPoolConfig.LoadBalancingStrategy - } - } - - // Validate load balancing strategy - if cfg.RPCPoolConfig.LoadBalancingStrategy != "" && - cfg.RPCPoolConfig.LoadBalancingStrategy != "round-robin" && - cfg.RPCPoolConfig.LoadBalancingStrategy != "weighted" { - return fmt.Errorf("load balancing strategy must be 'round-robin' or 'weighted'") + // Set NodeHome default + if cfg.NodeHome == "" { + cfg.NodeHome = constant.DefaultNodeHome } // Set TSS defaults @@ -172,12 +121,12 @@ func Save(cfg *Config, basePath string) error { return fmt.Errorf("invalid config: %w", err) } - configDir := filepath.Join(basePath, configSubdir) + configDir := filepath.Join(basePath, constant.ConfigSubdir) if err := os.MkdirAll(configDir, 0o750); err != nil { return fmt.Errorf("failed to create config directory: %w", err) } - configFile := filepath.Join(configDir, configFileName) + configFile := filepath.Join(configDir, constant.ConfigFileName) data, err := json.MarshalIndent(cfg, "", " ") if err != nil { return fmt.Errorf("failed to marshal config: %w", err) @@ -191,7 +140,7 @@ func Save(cfg *Config, basePath string) error { // Load reads and returns the config from /config/pushuv_config.json. func Load(basePath string) (Config, error) { - configFile := filepath.Join(basePath, configSubdir, configFileName) + configFile := filepath.Join(basePath, constant.ConfigSubdir, constant.ConfigFileName) data, err := os.ReadFile(filepath.Clean(configFile)) if err != nil { return Config{}, fmt.Errorf("failed to read config file: %w", err) diff --git a/universalClient/config/config_test.go b/universalClient/config/config_test.go index f444e0bb..f111b7e1 100644 --- a/universalClient/config/config_test.go +++ b/universalClient/config/config_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "testing" + "github.com/pushchain/push-chain-node/universalClient/constant" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -35,9 +36,6 @@ func TestConfigValidation(t *testing.T) { // Check that defaults were set assert.NotZero(t, cfg.ConfigRefreshIntervalSeconds) assert.NotZero(t, cfg.MaxRetries) - assert.NotZero(t, cfg.RetryBackoffSeconds) - assert.NotZero(t, cfg.InitialFetchRetries) - assert.NotZero(t, cfg.InitialFetchTimeoutSeconds) assert.NotZero(t, cfg.QueryServerPort) assert.Equal(t, KeyringBackendTest, cfg.KeyringBackend) // Defaults to test when empty assert.NotEmpty(t, cfg.PushChainGRPCURLs) @@ -56,9 +54,6 @@ func TestValidConfigScenarios(t *testing.T) { LogFormat: "json", ConfigRefreshIntervalSeconds: 30, MaxRetries: 5, - RetryBackoffSeconds: 2, - InitialFetchRetries: 3, - InitialFetchTimeoutSeconds: 20, PushChainGRPCURLs: []string{"localhost:9090"}, QueryServerPort: 8080, TSSP2PPrivateKeyHex: testTSSPrivateKeyHex, @@ -92,11 +87,8 @@ func TestValidConfigScenarios(t *testing.T) { }, validate: func(t *testing.T, cfg *Config) { // These should match the default config values - assert.Equal(t, 60, cfg.ConfigRefreshIntervalSeconds) // Default is 10 + assert.Equal(t, 60, cfg.ConfigRefreshIntervalSeconds) // Default is 60 assert.Equal(t, 3, cfg.MaxRetries) - assert.Equal(t, 1, cfg.RetryBackoffSeconds) - assert.Equal(t, 5, cfg.InitialFetchRetries) - assert.Equal(t, 30, cfg.InitialFetchTimeoutSeconds) assert.Equal(t, []string{"localhost:9090"}, cfg.PushChainGRPCURLs) assert.Equal(t, 8080, cfg.QueryServerPort) }, @@ -224,9 +216,6 @@ func TestSaveAndLoad(t *testing.T) { LogFormat: "json", ConfigRefreshIntervalSeconds: 20, MaxRetries: 5, - RetryBackoffSeconds: 2, - InitialFetchRetries: 10, - InitialFetchTimeoutSeconds: 60, PushChainGRPCURLs: []string{"localhost:9090", "localhost:9091"}, QueryServerPort: 8888, TSSP2PPrivateKeyHex: testTSSPrivateKeyHex, @@ -238,7 +227,7 @@ func TestSaveAndLoad(t *testing.T) { require.NoError(t, err) // Verify file exists - configPath := filepath.Join(tempDir, configSubdir, configFileName) + configPath := filepath.Join(tempDir, constant.ConfigSubdir, constant.ConfigFileName) _, err = os.Stat(configPath) assert.NoError(t, err) @@ -251,9 +240,6 @@ func TestSaveAndLoad(t *testing.T) { assert.Equal(t, cfg.LogFormat, loadedCfg.LogFormat) assert.Equal(t, cfg.ConfigRefreshIntervalSeconds, loadedCfg.ConfigRefreshIntervalSeconds) assert.Equal(t, cfg.MaxRetries, loadedCfg.MaxRetries) - assert.Equal(t, cfg.RetryBackoffSeconds, loadedCfg.RetryBackoffSeconds) - assert.Equal(t, cfg.InitialFetchRetries, loadedCfg.InitialFetchRetries) - assert.Equal(t, cfg.InitialFetchTimeoutSeconds, loadedCfg.InitialFetchTimeoutSeconds) assert.Equal(t, cfg.PushChainGRPCURLs, loadedCfg.PushChainGRPCURLs) assert.Equal(t, cfg.QueryServerPort, loadedCfg.QueryServerPort) }) @@ -278,12 +264,12 @@ func TestSaveAndLoad(t *testing.T) { t.Run("Load invalid JSON", func(t *testing.T) { // Create config directory - configDir := filepath.Join(tempDir, "invalid", configSubdir) + configDir := filepath.Join(tempDir, "invalid", constant.ConfigSubdir) err := os.MkdirAll(configDir, 0o750) require.NoError(t, err) // Write invalid JSON - configPath := filepath.Join(configDir, configFileName) + configPath := filepath.Join(configDir, constant.ConfigFileName) err = os.WriteFile(configPath, []byte("{invalid json}"), 0o600) require.NoError(t, err) @@ -305,7 +291,7 @@ func TestSaveAndLoad(t *testing.T) { require.NoError(t, err) // Verify directory was created - configDir := filepath.Join(newDir, configSubdir) + configDir := filepath.Join(newDir, constant.ConfigSubdir) _, err = os.Stat(configDir) assert.NoError(t, err) }) @@ -318,9 +304,6 @@ func TestConfigJSONMarshaling(t *testing.T) { LogFormat: "console", ConfigRefreshIntervalSeconds: 15, MaxRetries: 3, - RetryBackoffSeconds: 1, - InitialFetchRetries: 5, - InitialFetchTimeoutSeconds: 30, PushChainGRPCURLs: []string{"host1:9090", "host2:9090"}, QueryServerPort: 8080, } @@ -339,9 +322,6 @@ func TestConfigJSONMarshaling(t *testing.T) { assert.Equal(t, cfg.LogFormat, unmarshaledCfg.LogFormat) assert.Equal(t, cfg.ConfigRefreshIntervalSeconds, unmarshaledCfg.ConfigRefreshIntervalSeconds) assert.Equal(t, cfg.MaxRetries, unmarshaledCfg.MaxRetries) - assert.Equal(t, cfg.RetryBackoffSeconds, unmarshaledCfg.RetryBackoffSeconds) - assert.Equal(t, cfg.InitialFetchRetries, unmarshaledCfg.InitialFetchRetries) - assert.Equal(t, cfg.InitialFetchTimeoutSeconds, unmarshaledCfg.InitialFetchTimeoutSeconds) assert.Equal(t, cfg.PushChainGRPCURLs, unmarshaledCfg.PushChainGRPCURLs) assert.Equal(t, cfg.QueryServerPort, unmarshaledCfg.QueryServerPort) }) diff --git a/universalClient/config/default_config.json b/universalClient/config/default_config.json index 4c9e4368..9f4542a3 100644 --- a/universalClient/config/default_config.json +++ b/universalClient/config/default_config.json @@ -6,29 +6,15 @@ "push_chain_grpc_urls": [ "localhost:9090" ], + "push_valoper_address": "pushvaloper1vzuw2x3k2ccme70zcgswv8d88kyc07grdpvw3e", "tss_p2p_private_key_hex": "0101010101010101010101010101010101010101010101010101010101010101", "tss_password": "defaultpassword", "tss_p2p_listen": "/ip4/0.0.0.0/tcp/39000", "config_refresh_interval_seconds": 60, "max_retries": 3, - "retry_backoff_seconds": 1, - "initial_fetch_retries": 5, - "initial_fetch_timeout_seconds": 30, "query_server_port": 8080, "keyring_backend": "test", "key_check_interval": 30, - "event_polling_interval_seconds": 5, - "database_base_dir": "", - "transaction_cleanup_interval_seconds": 3600, - "transaction_retention_period_seconds": 86400, - "rpc_pool_config": { - "health_check_interval_seconds": 30, - "unhealthy_threshold": 3, - "recovery_interval_seconds": 300, - "min_healthy_endpoints": 1, - "request_timeout_seconds": 10, - "load_balancing_strategy": "round-robin" - }, "chain_configs": { "eip155:11155111": { "rpc_urls": [ @@ -37,6 +23,7 @@ ], "cleanup_interval_seconds": 1800, "retention_period_seconds": 43200, + "event_polling_interval_seconds": 5, "gas_price_interval_seconds": 60, "event_start_from": 9084430 }, @@ -46,6 +33,7 @@ ], "cleanup_interval_seconds": 1800, "retention_period_seconds": 43200, + "event_polling_interval_seconds": 5, "gas_price_interval_seconds": 60, "event_start_from": 203887665 }, @@ -55,6 +43,7 @@ ], "cleanup_interval_seconds": 1800, "retention_period_seconds": 43200, + "event_polling_interval_seconds": 5, "gas_price_interval_seconds": 60, "event_start_from": 32257378 }, @@ -64,6 +53,7 @@ ], "cleanup_interval_seconds": 1800, "retention_period_seconds": 43200, + "event_polling_interval_seconds": 5, "gas_price_interval_seconds": 60, "event_start_from": 68905309 }, @@ -73,8 +63,21 @@ ], "cleanup_interval_seconds": 7200, "retention_period_seconds": 172800, + "event_polling_interval_seconds": 5, "gas_price_interval_seconds": 30, "event_start_from": 403697270 + }, + "push_42101-1": { + "cleanup_interval_seconds": 1800, + "retention_period_seconds": 43200, + "event_polling_interval_seconds": 5, + "event_start_from": 100000 + }, + "localchain_9000-1": { + "cleanup_interval_seconds": 1800, + "retention_period_seconds": 43200, + "event_polling_interval_seconds": 5, + "event_start_from": 100000 } } } \ No newline at end of file diff --git a/universalClient/config/types.go b/universalClient/config/types.go index 8b7d28a6..92c094de 100644 --- a/universalClient/config/types.go +++ b/universalClient/config/types.go @@ -1,5 +1,7 @@ package config +import "fmt" + // KeyringBackend represents the type of keyring backend to use type KeyringBackend string @@ -17,97 +19,77 @@ type Config struct { LogFormat string `json:"log_format"` // "json" or "console" LogSampler bool `json:"log_sampler"` // if true, samples logs (e.g., 1 in 5) + // Node Config + NodeHome string `json:"node_home"` // Node home directory (default: ~/.puniversal) + // Push Chain configuration PushChainID string `json:"push_chain_id"` // Push Chain chain ID (default: localchain_9000-1) PushChainGRPCURLs []string `json:"push_chain_grpc_urls"` // Push Chain gRPC endpoints (default: ["localhost:9090"]) + PushValoperAddress string `json:"push_valoper_address"` // Push Chain validator operator address (pushvaloper1...) ConfigRefreshIntervalSeconds int `json:"config_refresh_interval_seconds"` // How often to refresh configs in seconds (default: 60) MaxRetries int `json:"max_retries"` // Max retry attempts for registry queries (default: 3) - RetryBackoffSeconds int `json:"retry_backoff_seconds"` // Initial retry backoff duration in seconds (default: 1) - - // Startup configuration - InitialFetchRetries int `json:"initial_fetch_retries"` // Number of retries for initial config fetch (default: 5) - InitialFetchTimeoutSeconds int `json:"initial_fetch_timeout_seconds"` // Timeout per initial fetch attempt in seconds (default: 30) + // Query Server Config QueryServerPort int `json:"query_server_port"` // Port for HTTP query server (default: 8080) // Keyring configuration - KeyringBackend KeyringBackend `json:"keyring_backend"` // Keyring backend type (file/test) - - // Event monitoring configuration - EventPollingIntervalSeconds int `json:"event_polling_interval_seconds"` // How often to poll for new events in seconds (default: 5) - - // Database configuration - DatabaseBaseDir string `json:"database_base_dir"` // Base directory for chain databases (default: ~/.puniversal/databases) - - // Transaction cleanup configuration (global defaults) - TransactionCleanupIntervalSeconds int `json:"transaction_cleanup_interval_seconds"` // Global default: How often to run cleanup in seconds (default: 3600) - TransactionRetentionPeriodSeconds int `json:"transaction_retention_period_seconds"` // Global default: How long to keep confirmed transactions in seconds (default: 86400) - - // RPC Pool configuration - RPCPoolConfig RPCPoolConfig `json:"rpc_pool_config"` // RPC pool configuration + KeyringBackend KeyringBackend `json:"keyring_backend"` // Keyring backend type (file/test) + KeyringPassword string `json:"keyring_password"` // Password for file backend keyring encryption // Unified per-chain configuration ChainConfigs map[string]ChainSpecificConfig `json:"chain_configs"` // Map of chain ID to all chain-specific settings // TSS Node configuration TSSP2PPrivateKeyHex string `json:"tss_p2p_private_key_hex"` // Ed25519 private key in hex for libp2p identity - TSSP2PListen string `json:"tss_p2p_listen"` // libp2p listen address (default: /ip4/0.0.0.0/tcp/39000) - TSSPassword string `json:"tss_password"` // Encryption password for keyshares - TSSHomeDir string `json:"tss_home_dir"` // Keyshare storage directory (default: ~/.puniversal/tss) + TSSP2PListen string `json:"tss_p2p_listen"` // libp2p listen address (default: /ip4/0.0.0.0/tcp/39000) + TSSPassword string `json:"tss_password"` // Encryption password for keyshares + TSSHomeDir string `json:"tss_home_dir"` // Keyshare storage directory (default: ~/.puniversal/tss) } // ChainSpecificConfig holds all chain-specific configuration in one place type ChainSpecificConfig struct { - // RPC Configuration - RPCURLs []string `json:"rpc_urls,omitempty"` // RPC endpoints for this chain + // RPC Configuration + RPCURLs []string `json:"rpc_urls,omitempty"` // RPC endpoints for this chain // Transaction Cleanup Configuration - CleanupIntervalSeconds *int `json:"cleanup_interval_seconds,omitempty"` // How often to run cleanup for this chain (optional, uses global default if not set) - RetentionPeriodSeconds *int `json:"retention_period_seconds,omitempty"` // How long to keep confirmed transactions for this chain (optional, uses global default if not set) + CleanupIntervalSeconds *int `json:"cleanup_interval_seconds,omitempty"` // How often to run cleanup for this chain (required) + RetentionPeriodSeconds *int `json:"retention_period_seconds,omitempty"` // How long to keep confirmed transactions for this chain (required) - // Event Monitoring Configuration - EventPollingIntervalSeconds *int `json:"event_polling_interval_seconds,omitempty"` // How often to poll for new events for this chain (optional, uses global default if not set) + // Event Monitoring Configuration + EventPollingIntervalSeconds *int `json:"event_polling_interval_seconds,omitempty"` // How often to poll for new events for this chain (required) - // Event Start Cursor - // If set to a non-negative value, gateway event watchers start from this - // block/slot for this chain. If set to -1 or not present, start from the - // latest block/slot (or from DB resume point when available). - EventStartFrom *int64 `json:"event_start_from,omitempty"` + // Event Start Cursor + // If set to a non-negative value, gateway event watchers start from this + // block/slot for this chain. If set to -1 or not present, start from the + // latest block/slot (or from DB resume point when available). + EventStartFrom *int64 `json:"event_start_from,omitempty"` - // Future chain-specific settings can be added here -} + // Gas Oracle Configuration + GasPriceIntervalSeconds *int `json:"gas_price_interval_seconds,omitempty"` // How often to fetch and vote on gas price (default: 30 seconds) -// RPCPoolConfig holds configuration for RPC endpoint pooling -type RPCPoolConfig struct { - HealthCheckIntervalSeconds int `json:"health_check_interval_seconds"` // How often to check endpoint health in seconds (default: 30) - UnhealthyThreshold int `json:"unhealthy_threshold"` // Consecutive failures before marking unhealthy (default: 3) - RecoveryIntervalSeconds int `json:"recovery_interval_seconds"` // How long to wait before retesting excluded endpoint in seconds (default: 300) - MinHealthyEndpoints int `json:"min_healthy_endpoints"` // Minimum healthy endpoints required (default: 1) - RequestTimeoutSeconds int `json:"request_timeout_seconds"` // Timeout for individual RPC requests in seconds (default: 10) - LoadBalancingStrategy string `json:"load_balancing_strategy"` // "round-robin" or "weighted" (default: "round-robin") + // Future chain-specific settings can be added here } // GetChainCleanupSettings returns cleanup settings for a specific chain -// Falls back to global defaults if no chain-specific settings exist -func (c *Config) GetChainCleanupSettings(chainID string) (cleanupInterval, retentionPeriod int) { - // Start with global defaults - cleanupInterval = c.TransactionCleanupIntervalSeconds - retentionPeriod = c.TransactionRetentionPeriodSeconds +// Returns chain-specific settings (required per chain) +func (c *Config) GetChainCleanupSettings(chainID string) (cleanupInterval, retentionPeriod int, err error) { + if c.ChainConfigs == nil { + return 0, 0, fmt.Errorf("no chain configs found") + } - // Check for chain-specific overrides in unified config - if c.ChainConfigs != nil { - if config, ok := c.ChainConfigs[chainID]; ok { - // Override with chain-specific values if provided - if config.CleanupIntervalSeconds != nil { - cleanupInterval = *config.CleanupIntervalSeconds - } - if config.RetentionPeriodSeconds != nil { - retentionPeriod = *config.RetentionPeriodSeconds - } - } + config, ok := c.ChainConfigs[chainID] + if !ok { + return 0, 0, fmt.Errorf("no config found for chain %s", chainID) + } + + if config.CleanupIntervalSeconds == nil { + return 0, 0, fmt.Errorf("cleanup_interval_seconds is required for chain %s", chainID) + } + if config.RetentionPeriodSeconds == nil { + return 0, 0, fmt.Errorf("retention_period_seconds is required for chain %s", chainID) } - return cleanupInterval, retentionPeriod + return *config.CleanupIntervalSeconds, *config.RetentionPeriodSeconds, nil } // GetChainConfig returns the complete configuration for a specific chain diff --git a/universalClient/constant/constant.go b/universalClient/constant/constant.go index 3b3b1950..f364ca43 100644 --- a/universalClient/constant/constant.go +++ b/universalClient/constant/constant.go @@ -2,19 +2,30 @@ package constant import "os" -// Node configuration constants +// / (e.g., /home/universal/.puniversal) +// └── config/ +// └── pushuv_config.json +// └── databases/ +// └── eip155_1.db +// └── eip155_97.db + const ( NodeDir = ".puniversal" -) -var ( - DefaultNodeHome = os.ExpandEnv("$HOME/") + NodeDir + ConfigSubdir = "config" + ConfigFileName = "pushuv_config.json" + + DatabasesSubdir = "databases" ) -// SupportedMessages contains all the supported message type URLs -// that the Universal Validator should process. -var SupportedMessages = []string{ +var DefaultNodeHome = os.ExpandEnv("$HOME/") + NodeDir + +// RequiredMsgGrants contains all the required message type URLs +// that must be granted via AuthZ for the Universal Validator to function. +// These messages are executed on behalf of the core validator by the grantee (hotkey of the Universal Validator). +var RequiredMsgGrants = []string{ "/uexecutor.v1.MsgVoteInbound", "/uexecutor.v1.MsgVoteGasPrice", + // "/uexecutor.v1.MsgVoteOutbound", // TODO: Uncomment this after auth perm "/utss.v1.MsgVoteTssKeyProcess", } diff --git a/universalClient/core/client.go b/universalClient/core/client.go index 61376b4a..b3a8af98 100644 --- a/universalClient/core/client.go +++ b/universalClient/core/client.go @@ -17,7 +17,6 @@ import ( "github.com/pushchain/push-chain-node/universalClient/db" "github.com/pushchain/push-chain-node/universalClient/pushcore" "github.com/pushchain/push-chain-node/universalClient/tss" - "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" "github.com/pushchain/push-chain-node/universalClient/tss/vote" "github.com/rs/zerolog" ) @@ -45,8 +44,8 @@ type UniversalClient struct { chainCacheJob *cron.ChainCacheJob chainRegistryJob *cron.ChainRegistryJob - // Push TSS event listener - pushTSSListener *push.PushTSSEventListener + // Push event listener + pushListener *push.Listener // TSS Node (optional, enabled via config) tssNode *tss.Node @@ -124,13 +123,15 @@ func NewUniversalClient(ctx context.Context, log zerolog.Logger, dbManager *db.C // Register as observer for chain addition events chainRegistry.SetObserver(uc) - // Create TSS event listener for Push chain events - tssDB, err := dbManager.GetChainDB("universal-validator") + // Create Push chain event listener + pushDB, err := dbManager.GetChainDB("push") if err != nil { - return nil, fmt.Errorf("failed to get TSS database: %w", err) + return nil, fmt.Errorf("failed to get Push database: %w", err) + } + uc.pushListener, err = push.NewListener(pushCore, pushDB.Client(), log, nil) + if err != nil { + return nil, fmt.Errorf("failed to create Push listener: %w", err) } - tssEventStore := eventstore.NewStore(tssDB.Client(), log) - uc.pushTSSListener = push.NewPushTSSEventListener(pushCore, tssEventStore, log) // Perform mandatory startup validation log.Info().Msg("🔐 Validating hotkey and AuthZ permissions...") @@ -139,7 +140,7 @@ func NewUniversalClient(ctx context.Context, log zerolog.Logger, dbManager *db.C ctx, log, cfg, - cfg.PushChainGRPCURLs[0], + pushCore, ) validationResult, err := startupValidator.ValidateStartupRequirements() @@ -183,7 +184,7 @@ func NewUniversalClient(ctx context.Context, log zerolog.Logger, dbManager *db.C LibP2PListen: cfg.TSSP2PListen, HomeDir: constant.DefaultNodeHome, Password: cfg.TSSPassword, - Database: tssDB, + Database: pushDB, PushCore: pushCore, Logger: log, VoteHandler: tssVoteHandler, @@ -208,7 +209,7 @@ func NewUniversalClient(ctx context.Context, log zerolog.Logger, dbManager *db.C // Create query server log.Info().Int("port", cfg.QueryServerPort).Msg("Creating query server") - uc.queryServer = api.NewServer(uc, log, cfg.QueryServerPort) + uc.queryServer = api.NewServer(log, cfg.QueryServerPort) return uc, nil } @@ -250,12 +251,12 @@ func (uc *UniversalClient) Start() error { } } - // Start the Push TSS event listener - if uc.pushTSSListener != nil { - if err := uc.pushTSSListener.Start(uc.ctx); err != nil { - uc.log.Error().Err(err).Msg("failed to start Push TSS listener") + // Start the Push event listener + if uc.pushListener != nil { + if err := uc.pushListener.Start(uc.ctx); err != nil { + uc.log.Error().Err(err).Msg("failed to start Push listener") } else { - uc.log.Info().Msg("✅ Push TSS event listener started") + uc.log.Info().Msg("✅ Push event listener started") } } @@ -303,9 +304,13 @@ func (uc *UniversalClient) Start() error { uc.gasPriceFetcher.Stop() } - // Stop Push TSS event listener - if uc.pushTSSListener != nil { - uc.pushTSSListener.Stop() + // Stop Push event listener + if uc.pushListener != nil { + if err := uc.pushListener.Stop(); err != nil { + uc.log.Error().Err(err).Msg("error stopping Push listener") + } else { + uc.log.Info().Msg("✅ Push listener stopped") + } } // Stop TSS node diff --git a/universalClient/core/startup_validator.go b/universalClient/core/startup_validator.go index 0f3ebe31..80c050e3 100644 --- a/universalClient/core/startup_validator.go +++ b/universalClient/core/startup_validator.go @@ -3,8 +3,10 @@ package core import ( "context" "fmt" + "time" "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/x/authz" "github.com/pushchain/push-chain-node/universalClient/config" @@ -24,12 +26,19 @@ type StartupValidationResult struct { Messages []string // List of authorized message types } +// GrantInfo represents information about a single AuthZ grant. +type GrantInfo struct { + Granter string // Address of the granter + MessageType string // Authorized message type + Expiration *time.Time // Grant expiration time (nil if no expiration) +} + // StartupValidator validates startup requirements type StartupValidator struct { - log zerolog.Logger - config *config.Config - grpcURL string - cdc *codec.ProtoCodec // Cached codec + log zerolog.Logger + config *config.Config + pushCore *pushcore.Client + cdc *codec.ProtoCodec // Cached codec } // NewStartupValidator creates a new startup validator @@ -37,7 +46,7 @@ func NewStartupValidator( ctx context.Context, log zerolog.Logger, config *config.Config, - grpcURL string, + pushCore *pushcore.Client, ) *StartupValidator { // Create codec once and cache it interfaceRegistry := keys.CreateInterfaceRegistryWithEVMSupport() @@ -45,10 +54,10 @@ func NewStartupValidator( uetypes.RegisterInterfaces(interfaceRegistry) return &StartupValidator{ - log: log.With().Str("component", "startup_validator").Logger(), - config: config, - grpcURL: grpcURL, - cdc: codec.NewProtoCodec(interfaceRegistry), + log: log.With().Str("component", "startup_validator").Logger(), + config: config, + pushCore: pushCore, + cdc: codec.NewProtoCodec(interfaceRegistry), } } @@ -84,8 +93,21 @@ func (sv *StartupValidator) ValidateStartupRequirements() (*StartupValidationRes Str("key_address", keyAddr.String()). Msg("Using hotkey from keyring") - // Validate AuthZ permissions using the pushcore package - granter, authorizedMsgs, err := pushcore.QueryGrantsWithRetry(sv.grpcURL, keyAddr.String(), sv.cdc, sv.log) + // Query AuthZ grants using pushcore (returns raw response) + grantResp, err := sv.pushCore.GetGranteeGrants(keyAddr.String()) + if err != nil { + return nil, fmt.Errorf("failed to query AuthZ grants: %w", err) + } + + // Extract grant information from the raw response + grants := extractGrantInfo(grantResp, sv.cdc) + + if len(grants) == 0 { + return nil, fmt.Errorf("no AuthZ grants found. Please grant permissions:\npuniversald tx authz grant %s generic --msg-type=/uexecutor.v1.MsgVoteInbound --from ", keyAddr.String()) + } + + // Validate that all required message types are authorized + granter, authorizedMsgs, err := sv.validateGrants(grants, keyAddr.String()) if err != nil { return nil, fmt.Errorf("AuthZ validation failed: %w", err) } @@ -104,3 +126,91 @@ func (sv *StartupValidator) ValidateStartupRequirements() (*StartupValidationRes }, nil } +// validateGrants validates that all required message types are present in the grants. +// It filters out expired grants and checks that all required messages are authorized. +func (sv *StartupValidator) validateGrants(grants []GrantInfo, granteeAddr string) (string, []string, error) { + now := time.Now() + authorizedMessages := make(map[string]string) // msgType -> granter + var granter string + + // Process grants and filter out expired ones + for _, grant := range grants { + // Skip expired grants + if grant.Expiration != nil && grant.Expiration.Before(now) { + continue + } + + // Check if this is a required message + for _, requiredMsg := range constant.RequiredMsgGrants { + if grant.MessageType == requiredMsg { + authorizedMessages[grant.MessageType] = grant.Granter + if granter == "" { + granter = grant.Granter + } + break + } + } + } + + // Check if all required messages are authorized + var missingMessages []string + for _, requiredMsg := range constant.RequiredMsgGrants { + if _, ok := authorizedMessages[requiredMsg]; !ok { + missingMessages = append(missingMessages, requiredMsg) + } + } + + if len(missingMessages) > 0 { + return "", nil, fmt.Errorf("missing AuthZ grants for: %v\nGrant permissions using:\npuniversald tx authz grant %s generic --msg-type= --from ", missingMessages, granteeAddr) + } + + // Return authorized messages + authorizedList := make([]string, 0, len(authorizedMessages)) + for msgType := range authorizedMessages { + authorizedList = append(authorizedList, msgType) + } + + return granter, authorizedList, nil +} + +// extractGrantInfo extracts grant information from the AuthZ grant response. +// It only processes GenericAuthorization grants and returns their message types. +func extractGrantInfo(grantResp *authz.QueryGranteeGrantsResponse, cdc *codec.ProtoCodec) []GrantInfo { + var grants []GrantInfo + + for _, grant := range grantResp.Grants { + if grant.Authorization == nil { + continue + } + + // Only process GenericAuthorization + if grant.Authorization.TypeUrl != "/cosmos.authz.v1beta1.GenericAuthorization" { + continue + } + + msgType, err := extractMessageType(grant.Authorization, cdc) + if err != nil { + continue // Skip if we can't extract the message type + } + + // grant.Expiration is already *time.Time, so we can use it directly + expiration := grant.Expiration + + grants = append(grants, GrantInfo{ + Granter: grant.Granter, + MessageType: msgType, + Expiration: expiration, + }) + } + + return grants +} + +// extractMessageType extracts the message type from a GenericAuthorization protobuf Any. +func extractMessageType(authzAny *codectypes.Any, cdc *codec.ProtoCodec) (string, error) { + var genericAuth authz.GenericAuthorization + if err := cdc.Unmarshal(authzAny.Value, &genericAuth); err != nil { + return "", err + } + return genericAuth.Msg, nil +} diff --git a/universalClient/core/vote_handler.go b/universalClient/core/vote_handler.go index 3875ef75..fa7a48a9 100644 --- a/universalClient/core/vote_handler.go +++ b/universalClient/core/vote_handler.go @@ -180,19 +180,19 @@ func (vh *VoteHandler) constructInbound(tx *store.ChainTransaction) (*uetypes.In // Map txType from eventData to proper enum value // Event data uses: 0=GAS, 1=GAS_AND_PAYLOAD, 2=FUNDS, 3=FUNDS_AND_PAYLOAD // Enum values are: 0=UNSPECIFIED_TX, 1=GAS, 2=FUNDS, 3=FUNDS_AND_PAYLOAD, 4=GAS_AND_PAYLOAD - txType := uetypes.InboundTxType_UNSPECIFIED_TX + txType := uetypes.TxType_UNSPECIFIED_TX switch eventData.TxType { case 0: - txType = uetypes.InboundTxType_GAS + txType = uetypes.TxType_GAS case 1: - txType = uetypes.InboundTxType_GAS_AND_PAYLOAD + txType = uetypes.TxType_GAS_AND_PAYLOAD case 2: - txType = uetypes.InboundTxType_FUNDS + txType = uetypes.TxType_FUNDS case 3: - txType = uetypes.InboundTxType_FUNDS_AND_PAYLOAD + txType = uetypes.TxType_FUNDS_AND_PAYLOAD default: // For any unknown value, default to GAS - txType = uetypes.InboundTxType_UNSPECIFIED_TX + txType = uetypes.TxType_UNSPECIFIED_TX } // Convert tx.TxHash to hex format if it's in base58 @@ -215,12 +215,12 @@ func (vh *VoteHandler) constructInbound(tx *store.ChainTransaction) (*uetypes.In TxType: txType, } - if txType == uetypes.InboundTxType_FUNDS_AND_PAYLOAD || txType == uetypes.InboundTxType_GAS_AND_PAYLOAD { + if txType == uetypes.TxType_FUNDS_AND_PAYLOAD || txType == uetypes.TxType_GAS_AND_PAYLOAD { inboundMsg.UniversalPayload = &eventData.Payload } // Set recipient for transactions that involve funds - if txType == uetypes.InboundTxType_FUNDS || txType == uetypes.InboundTxType_GAS { + if txType == uetypes.TxType_FUNDS || txType == uetypes.TxType_GAS { inboundMsg.Recipient = eventData.Recipient } diff --git a/universalClient/core/vote_handler_test.go b/universalClient/core/vote_handler_test.go index e33fc6e8..896618f8 100644 --- a/universalClient/core/vote_handler_test.go +++ b/universalClient/core/vote_handler_test.go @@ -127,12 +127,12 @@ func TestVoteHandler_VoteAndConfirm(t *testing.T) { { name: "vote transaction fails with non-zero code", tx: &store.ChainTransaction{ - TxHash: "0x456", - BlockNumber: 200, - EventIdentifier: "add_funds", - Status: "pending", - Confirmations: 15, - Data: json.RawMessage(`{}`), + TxHash: "0x456", + BlockNumber: 200, + EventIdentifier: "add_funds", + Status: "pending", + Confirmations: 15, + Data: json.RawMessage(`{}`), }, setupMock: func(m *MockTxSigner) { m.On("SignAndBroadcastAuthZTx", @@ -152,12 +152,12 @@ func TestVoteHandler_VoteAndConfirm(t *testing.T) { { name: "broadcast error", tx: &store.ChainTransaction{ - TxHash: "0x789", - BlockNumber: 300, - EventIdentifier: "addFunds", - Status: "pending", - Confirmations: 20, - Data: json.RawMessage(`{}`), + TxHash: "0x789", + BlockNumber: 300, + EventIdentifier: "addFunds", + Status: "pending", + Confirmations: 20, + Data: json.RawMessage(`{}`), }, setupMock: func(m *MockTxSigner) { m.On("SignAndBroadcastAuthZTx", @@ -228,7 +228,7 @@ func TestVoteHandler_constructInbound(t *testing.T) { { name: "complete data for EVM transaction", tx: &store.ChainTransaction{ - TxHash: "0xabc123", + TxHash: "0xabc123", EventIdentifier: "addFunds", Data: json.RawMessage(`{ "sourceChain": "eip155:1", @@ -248,15 +248,15 @@ func TestVoteHandler_constructInbound(t *testing.T) { Amount: "1000000", AssetAddr: "0x333", LogIndex: "5", - TxType: uetypes.InboundTxType_FUNDS_AND_PAYLOAD, + TxType: uetypes.TxType_FUNDS_AND_PAYLOAD, }, }, { name: "minimal data with defaults", tx: &store.ChainTransaction{ - TxHash: "0xdef456", + TxHash: "0xdef456", EventIdentifier: "add_funds", - Data: json.RawMessage(`{}`), + Data: json.RawMessage(`{}`), }, expected: &uetypes.Inbound{ SourceChain: "", @@ -266,15 +266,15 @@ func TestVoteHandler_constructInbound(t *testing.T) { Amount: "", AssetAddr: "", LogIndex: "0", - TxType: uetypes.InboundTxType_GAS, + TxType: uetypes.TxType_GAS, }, }, { name: "nil data returns error", tx: &store.ChainTransaction{ - TxHash: "0x789", + TxHash: "0x789", EventIdentifier: "addFunds", - Data: nil, + Data: nil, }, wantError: true, }, @@ -565,7 +565,7 @@ func TestVoteHandler_VoteTxHashNotOverwritten(t *testing.T) { // Create transaction that already has a vote tx hash existingVoteTxHash := "existing_vote_tx_123" tx := &store.ChainTransaction{ - TxHash: "0xalready_voted", + TxHash: "0xalready_voted", BlockNumber: 2000, EventIdentifier: "0xf9bfe8a7", Status: "confirmed", diff --git a/universalClient/cron/chain_cache_job.go b/universalClient/cron/chain_cache_job.go deleted file mode 100644 index d43b9c1a..00000000 --- a/universalClient/cron/chain_cache_job.go +++ /dev/null @@ -1,144 +0,0 @@ -// cron/chain_cache_job.go -package cron - -import ( - "context" - "errors" - "sync" - "time" - - "github.com/rs/zerolog" - - "github.com/pushchain/push-chain-node/universalClient/cache" - "github.com/pushchain/push-chain-node/universalClient/pushcore" -) - -type ChainCacheJob struct { - cache *cache.Cache - client *pushcore.Client - interval time.Duration - perSyncTimeout time.Duration - logger zerolog.Logger - - mu sync.Mutex - running bool - stopCh chan struct{} - wg sync.WaitGroup -} - -func NewChainCacheJob(ca *cache.Cache, cl *pushcore.Client, interval, perSyncTimeout time.Duration, logger zerolog.Logger) *ChainCacheJob { - if interval <= 0 { - interval = time.Minute - } - if perSyncTimeout <= 0 { - perSyncTimeout = 8 * time.Second - } - return &ChainCacheJob{ - cache: ca, - client: cl, - interval: interval, - perSyncTimeout: perSyncTimeout, - logger: logger.With().Str("component", "chain_cache_cron").Logger(), - } -} - -// Start launches the background loop and returns immediately (non-blocking). -// Safe to call multiple times; subsequent calls are no-ops. -func (j *ChainCacheJob) Start(ctx context.Context) error { - j.mu.Lock() - defer j.mu.Unlock() - if j.running { - return nil - } - if j.cache == nil || j.client == nil { - return errors.New("cron: cache and client must be non-nil") - } - - j.stopCh = make(chan struct{}) - j.running = true - j.wg.Add(1) - - go j.run(ctx) - return nil -} - -// Stop signals the loop to exit and waits for it to finish. -// Safe to call multiple times. -func (j *ChainCacheJob) Stop() { - j.mu.Lock() - if !j.running { - j.mu.Unlock() - return - } - close(j.stopCh) - j.running = false - j.mu.Unlock() - j.wg.Wait() -} - -func (j *ChainCacheJob) run(parent context.Context) { - defer j.wg.Done() - - // Initial sync with 3 retries (1s, 2s, 4s) - if err := j.initialSync(parent); err != nil { - j.logger.Warn().Err(err).Msg("initial chain config sync failed; continuing with empty/stale cache") - } - - t := time.NewTicker(j.interval) - defer t.Stop() - - for { - select { - case <-parent.Done(): - j.logger.Info().Msg("chain cache cron: context canceled; stopping") - return - case <-j.stopCh: - j.logger.Info().Msg("chain cache cron: stop requested; stopping") - return - case <-t.C: - if err := j.syncOnce(parent); err != nil { - j.logger.Warn().Err(err).Msg("periodic chain config refresh failed; keeping previous cache") - } - } - } -} - -func (j *ChainCacheJob) initialSync(ctx context.Context) error { - backoff := time.Second - var lastErr error - for attempt := 1; attempt <= 3; attempt++ { - if err := j.syncOnce(ctx); err != nil { - lastErr = err - j.logger.Warn().Int("attempt", attempt).Err(err).Msg("initial chain config sync attempt failed") - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(backoff): - backoff *= 2 - } - continue - } - j.logger.Info().Int("attempt", attempt).Msg("initial chain config sync successful") - return nil - } - return lastErr -} - -func (j *ChainCacheJob) syncOnce(parent context.Context) error { - timeout := j.perSyncTimeout - if dl, ok := parent.Deadline(); ok { - if remain := time.Until(dl); remain > 0 && remain < timeout { - timeout = remain - } - } - ctx, cancel := context.WithTimeout(parent, timeout) - defer cancel() - - cfgs, err := j.client.GetAllChainConfigs(ctx) - if err != nil { - return err - } - - j.cache.UpdateChains(cfgs) - return nil -} diff --git a/universalClient/cron/chain_registry_job.go b/universalClient/cron/chain_registry_job.go deleted file mode 100644 index c2dc676e..00000000 --- a/universalClient/cron/chain_registry_job.go +++ /dev/null @@ -1,187 +0,0 @@ -// cron/chain_cache_job.go -package cron - -import ( - "context" - "errors" - "sync" - "time" - - "github.com/rs/zerolog" - - "github.com/pushchain/push-chain-node/universalClient/cache" - "github.com/pushchain/push-chain-node/universalClient/chains" -) - -type ChainRegistryJob struct { - cache *cache.Cache - chainRegistry *chains.ChainRegistry - interval time.Duration - perSyncTimeout time.Duration - logger zerolog.Logger - - mu sync.Mutex - running bool - stopCh chan struct{} - forceCh chan struct{} - wg sync.WaitGroup -} - -func NewChainRegistryJob(ca *cache.Cache, cr *chains.ChainRegistry, interval, perSyncTimeout time.Duration, logger zerolog.Logger) *ChainRegistryJob { - if interval <= 0 { - interval = time.Minute - } - if perSyncTimeout <= 0 { - perSyncTimeout = 8 * time.Second - } - return &ChainRegistryJob{ - cache: ca, - chainRegistry: cr, - interval: interval, - perSyncTimeout: perSyncTimeout, - logger: logger.With().Str("component", "chain_registry_cron").Logger(), - } -} - -// Start launches the background loop and returns immediately (non-blocking). -// Safe to call multiple times; subsequent calls are no-ops. -func (j *ChainRegistryJob) Start(ctx context.Context) error { - j.mu.Lock() - defer j.mu.Unlock() - if j.running { - return nil - } - if j.cache == nil || j.chainRegistry == nil { - return errors.New("cron: cache and chainRegistry must be non-nil") - } - - j.stopCh = make(chan struct{}) - j.forceCh = make(chan struct{}, 1) // buffered so ForceSync won't block - j.running = true - j.wg.Add(1) - - go j.run(ctx) - return nil -} - -// Stop signals the loop to exit and waits for it to finish. -// Safe to call multiple times. -func (j *ChainRegistryJob) Stop() { - j.mu.Lock() - if !j.running { - j.mu.Unlock() - return - } - close(j.stopCh) - j.running = false - j.mu.Unlock() - j.wg.Wait() -} - -func (j *ChainRegistryJob) run(parent context.Context) { - defer j.wg.Done() - - // Initial sync with 3 retries (1s, 2s, 4s) to populate registry immediately at startup - if err := j.initialSync(parent); err != nil { - j.logger.Warn().Err(err).Msg("initial chain registry sync failed; continuing with empty/stale registry") - } - - t := time.NewTicker(j.interval) - defer t.Stop() - - for { - select { - case <-parent.Done(): - j.logger.Info().Msg("chain registry cron: context canceled; stopping") - return - case <-j.stopCh: - j.logger.Info().Msg("chain registry cron: stop requested; stopping") - return - case <-t.C: - if err := j.syncOnce(parent); err != nil { - j.logger.Warn().Err(err).Msg("periodic chain registry refresh failed; keeping previous registry") - } - case <-j.forceCh: - if err := j.syncOnce(parent); err != nil { - j.logger.Warn().Err(err).Msg("forced chain registry refresh failed; keeping previous registry") - } - } - } -} - -// initialSync performs the initial synchronization with retries -func (j *ChainRegistryJob) initialSync(ctx context.Context) error { - backoff := time.Second - var lastErr error - for attempt := 1; attempt <= 3; attempt++ { - if err := j.syncOnce(ctx); err != nil { - lastErr = err - j.logger.Warn().Int("attempt", attempt).Err(err).Msg("initial chain registry sync attempt failed") - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(backoff): - backoff *= 2 - } - continue - } - j.logger.Info().Int("attempt", attempt).Msg("initial chain registry sync successful") - return nil - } - return lastErr -} - -func (j *ChainRegistryJob) syncOnce(parent context.Context) error { - timeout := j.perSyncTimeout - if dl, ok := parent.Deadline(); ok { - if remain := time.Until(dl); remain > 0 && remain < timeout { - timeout = remain - } - } - ctx, cancel := context.WithTimeout(parent, timeout) - defer cancel() - - chainConfigs := j.cache.GetAllChains() - - // Track which chains we've seen - seenChains := make(map[string]bool) - - for _, chain := range chainConfigs { - config := chain.Config - if config == nil || config.Chain == "" { - continue - } - - seenChains[config.Chain] = true - - if config.Enabled == nil || (!config.Enabled.IsInboundEnabled && !config.Enabled.IsOutboundEnabled) { - j.logger.Debug(). - Str("chain", config.Chain). - Msg("chain is disabled, removing if exists") - j.chainRegistry.RemoveChain(config.Chain) - continue - } - - // Add or update the chain - if err := j.chainRegistry.AddOrUpdateChain(ctx, config); err != nil { - j.logger.Error(). - Err(err). - Str("chain", config.Chain). - Msg("failed to add/update chain") - // Continue with other chains - } - } - - // Remove chains that no longer exist in the config - allChains := j.chainRegistry.GetAllChains() - for chainID := range allChains { - if !seenChains[chainID] { - j.logger.Info(). - Str("chain", chainID). - Msg("removing chain no longer in config") - j.chainRegistry.RemoveChain(chainID) - } - } - - return nil -} diff --git a/universalClient/db/chain_db_manager.go b/universalClient/db/chain_db_manager.go deleted file mode 100644 index 5300135a..00000000 --- a/universalClient/db/chain_db_manager.go +++ /dev/null @@ -1,187 +0,0 @@ -// Package db provides a lightweight GORM-based SQLite wrapper for persisting -// state required by the Push Universal Validator (UV), with per-chain database isolation. -package db - -import ( - "path/filepath" - "sync" - - "github.com/pkg/errors" - "github.com/pushchain/push-chain-node/universalClient/config" - "github.com/rs/zerolog" -) - -// ChainDBManager manages per-chain database instances for traffic isolation -type ChainDBManager struct { - baseDir string - databases map[string]*DB // chainID -> DB instance - mu sync.RWMutex - logger zerolog.Logger - inMemory bool // For testing with in-memory databases - appConfig *config.Config -} - -// NewChainDBManager creates a new manager for per-chain databases -func NewChainDBManager(baseDir string, logger zerolog.Logger, cfg *config.Config) *ChainDBManager { - return &ChainDBManager{ - baseDir: baseDir, - databases: make(map[string]*DB), - logger: logger.With().Str("component", "chain_db_manager").Logger(), - appConfig: cfg, - } -} - -// NewInMemoryChainDBManager creates a manager with in-memory databases (for testing) -func NewInMemoryChainDBManager(logger zerolog.Logger, cfg *config.Config) *ChainDBManager { - return &ChainDBManager{ - databases: make(map[string]*DB), - logger: logger.With().Str("component", "chain_db_manager").Logger(), - inMemory: true, - appConfig: cfg, - } -} - -// GetChainDB returns a database instance for a specific chain -// Creates the database lazily if it doesn't exist -func (m *ChainDBManager) GetChainDB(chainID string) (*DB, error) { - // Check if database already exists - m.mu.RLock() - if db, exists := m.databases[chainID]; exists { - m.mu.RUnlock() - return db, nil - } - m.mu.RUnlock() - - // Need to create new database - m.mu.Lock() - defer m.mu.Unlock() - - // Double-check after acquiring write lock - if db, exists := m.databases[chainID]; exists { - return db, nil - } - - // Create new database for this chain - var db *DB - var err error - - if m.inMemory { - // For testing - create in-memory database - db, err = OpenInMemoryDB(true) - if err != nil { - return nil, errors.Wrapf(err, "failed to create in-memory database for chain %s", chainID) - } - m.logger.Debug(). - Str("chain_id", chainID). - Msg("created in-memory database for chain") - } else { - // Create chain-specific directory and database file - chainDir := filepath.Join(m.baseDir, "chains", sanitizeChainID(chainID)) - dbFilename := "chain_data.db" - - db, err = OpenFileDB(chainDir, dbFilename, true) - if err != nil { - return nil, errors.Wrapf(err, "failed to create database for chain %s", chainID) - } - - m.logger.Info(). - Str("chain_id", chainID). - Str("db_path", filepath.Join(chainDir, dbFilename)). - Msg("created file database for chain") - } - - // Store in map - m.databases[chainID] = db - - return db, nil -} - -// GetAllDatabases returns all active database instances -func (m *ChainDBManager) GetAllDatabases() map[string]*DB { - m.mu.RLock() - defer m.mu.RUnlock() - - // Return a copy to prevent external modification - result := make(map[string]*DB) - for k, v := range m.databases { - result[k] = v - } - return result -} - -// CloseChainDB closes and removes a specific chain's database from the manager -func (m *ChainDBManager) CloseChainDB(chainID string) error { - m.mu.Lock() - defer m.mu.Unlock() - - db, exists := m.databases[chainID] - if !exists { - return nil // Already closed or never opened - } - - if err := db.Close(); err != nil { - return errors.Wrapf(err, "failed to close database for chain %s", chainID) - } - - delete(m.databases, chainID) - m.logger.Info(). - Str("chain_id", chainID). - Msg("closed database for chain") - - return nil -} - -// CloseAll closes all database connections -func (m *ChainDBManager) CloseAll() error { - m.mu.Lock() - defer m.mu.Unlock() - - var errs []error - for chainID, db := range m.databases { - if err := db.Close(); err != nil { - errs = append(errs, errors.Wrapf(err, "failed to close database for chain %s", chainID)) - } - } - - // Clear the map - m.databases = make(map[string]*DB) - - if len(errs) > 0 { - return errors.Errorf("failed to close %d databases", len(errs)) - } - - return nil -} - -// GetDatabaseStats returns statistics about managed databases -func (m *ChainDBManager) GetDatabaseStats() map[string]interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - - chains := make([]string, 0, len(m.databases)) - for chainID := range m.databases { - chains = append(chains, chainID) - } - - return map[string]interface{}{ - "total_databases": len(m.databases), - "chains": chains, - "in_memory": m.inMemory, - "base_directory": m.baseDir, - } -} - -// sanitizeChainID converts chain ID to filesystem-safe format -// e.g., "eip155:1" -> "eip155_1" -func sanitizeChainID(chainID string) string { - // Replace colons and other special characters with underscores - result := "" - for _, r := range chainID { - if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '-' || r == '_' { - result += string(r) - } else { - result += "_" - } - } - return result -} \ No newline at end of file diff --git a/universalClient/db/chain_db_manager_test.go b/universalClient/db/chain_db_manager_test.go deleted file mode 100644 index 282df389..00000000 --- a/universalClient/db/chain_db_manager_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package db - -import ( - "path/filepath" - "testing" - - "github.com/pushchain/push-chain-node/universalClient/config" - "github.com/pushchain/push-chain-node/universalClient/logger" - "github.com/pushchain/push-chain-node/universalClient/store" - "github.com/stretchr/testify/require" -) - -func TestChainDBManager(t *testing.T) { - // Setup test config and logger - cfg := &config.Config{ - LogLevel: 0, // Debug level - LogFormat: "console", - } - log := logger.Init(*cfg) - - t.Run("InMemoryManager", func(t *testing.T) { - manager := NewInMemoryChainDBManager(log, cfg) - defer manager.CloseAll() - - // Test getting database for chain - chainID := "eip155:1" - db1, err := manager.GetChainDB(chainID) - require.NoError(t, err) - require.NotNil(t, db1) - - // Test getting same database again - db2, err := manager.GetChainDB(chainID) - require.NoError(t, err) - require.Equal(t, db1, db2) // Should return same instance - - // Test different chain - chainID2 := "eip155:137" - db3, err := manager.GetChainDB(chainID2) - require.NoError(t, err) - require.NotNil(t, db3) - require.NotEqual(t, db1, db3) // Should be different instance - - // Test stats - stats := manager.GetDatabaseStats() - require.Equal(t, 2, stats["total_databases"]) - require.Equal(t, true, stats["in_memory"]) - - chains, ok := stats["chains"].([]string) - require.True(t, ok) - require.Len(t, chains, 2) - require.Contains(t, chains, chainID) - require.Contains(t, chains, chainID2) - }) - - t.Run("FileManagerWithTempDir", func(t *testing.T) { - tempDir := t.TempDir() - manager := NewChainDBManager(tempDir, log, cfg) - defer manager.CloseAll() - - // Test getting database for chain - chainID := "eip155:1" - db1, err := manager.GetChainDB(chainID) - require.NoError(t, err) - require.NotNil(t, db1) - - // Test that database file was created - expectedPath := filepath.Join(tempDir, "chains", "eip155_1", "chain_data.db") - require.FileExists(t, expectedPath) - - // Test special characters in chain ID - chainID2 := "solana:mainnet-beta" - db2, err := manager.GetChainDB(chainID2) - require.NoError(t, err) - require.NotNil(t, db2) - - // Test stats - stats := manager.GetDatabaseStats() - require.Equal(t, 2, stats["total_databases"]) - require.Equal(t, false, stats["in_memory"]) - require.Equal(t, tempDir, stats["base_directory"]) - }) - - t.Run("CloseSpecificDatabase", func(t *testing.T) { - manager := NewInMemoryChainDBManager(log, cfg) - defer manager.CloseAll() - - chainID := "eip155:1" - db, err := manager.GetChainDB(chainID) - require.NoError(t, err) - require.NotNil(t, db) - - // Close specific database - err = manager.CloseChainDB(chainID) - require.NoError(t, err) - - // Stats should show no databases - stats := manager.GetDatabaseStats() - require.Equal(t, 0, stats["total_databases"]) - }) - - t.Run("DatabaseOperations", func(t *testing.T) { - manager := NewInMemoryChainDBManager(log, cfg) - defer manager.CloseAll() - - chainID := "eip155:1" - db, err := manager.GetChainDB(chainID) - require.NoError(t, err) - - // Test basic database operations - tx := &store.ChainTransaction{ - TxHash: "0x123", - BlockNumber: 1000, - EventIdentifier: "event1", - Status: "pending", - Confirmations: 0, - } - - // Create transaction - err = db.Client().Create(tx).Error - require.NoError(t, err) - - // Query transaction - var retrieved store.ChainTransaction - err = db.Client().Where("tx_hash = ?", "0x123").First(&retrieved).Error - require.NoError(t, err) - require.Equal(t, "0x123", retrieved.TxHash) - }) - - t.Run("ChainIDSanitization", func(t *testing.T) { - testCases := []struct { - input string - expected string - }{ - {"eip155:1", "eip155_1"}, - {"solana:mainnet-beta", "solana_mainnet-beta"}, - {"cosmos:cosmoshub-4", "cosmos_cosmoshub-4"}, - {"special:chars@#$", "special_chars___"}, - } - - for _, tc := range testCases { - result := sanitizeChainID(tc.input) - require.Equal(t, tc.expected, result, "Input: %s", tc.input) - } - }) -} - -func TestChainDBManagerConcurrency(t *testing.T) { - cfg := &config.Config{ - LogLevel: 0, - LogFormat: "console", - } - log := logger.Init(*cfg) - - manager := NewInMemoryChainDBManager(log, cfg) - defer manager.CloseAll() - - // Test concurrent access to same chain - chainID := "eip155:1" - - // Pre-initialize the database to ensure schema is migrated before concurrent access - initDB, err := manager.GetChainDB(chainID) - require.NoError(t, err) - require.NotNil(t, initDB) - - done := make(chan bool, 10) - - for i := 0; i < 10; i++ { - go func(id int) { - defer func() { done <- true }() - - db, err := manager.GetChainDB(chainID) - require.NoError(t, err) - require.NotNil(t, db) - - // Perform some database operation - tx := &store.ChainTransaction{ - TxHash: string(rune('a'+id)) + "123", - BlockNumber: uint64(1000 + id), - EventIdentifier: "event1", - Status: "pending", - Confirmations: 0, - } - - err = db.Client().Create(tx).Error - require.NoError(t, err) - }(i) - } - - // Wait for all goroutines - for i := 0; i < 10; i++ { - <-done - } - - // Verify all transactions were created - db, err := manager.GetChainDB(chainID) - require.NoError(t, err) - - var count int64 - err = db.Client().Model(&store.ChainTransaction{}).Count(&count).Error - require.NoError(t, err) - require.Equal(t, int64(10), count) - - // Stats should show only one database - stats := manager.GetDatabaseStats() - require.Equal(t, 1, stats["total_databases"]) -} \ No newline at end of file diff --git a/universalClient/db/db.go b/universalClient/db/db.go index 6f77aefa..aa598fee 100644 --- a/universalClient/db/db.go +++ b/universalClient/db/db.go @@ -7,7 +7,6 @@ import ( "fmt" "os" "strings" - "time" "github.com/pkg/errors" "github.com/pushchain/push-chain-node/universalClient/store" @@ -32,11 +31,8 @@ var ( // schemaModels lists the structs to be auto-migrated into the database. schemaModels = []any{ - &store.ChainState{}, - &store.ChainTransaction{}, - &store.GasVoteTransaction{}, - &store.TSSEvent{}, - &store.ChainTSSTransaction{}, + &store.State{}, + &store.Event{}, // Add additional models here as needed. } ) @@ -179,19 +175,3 @@ func prepareFilePath(dir, filename string) (string, error) { return fmt.Sprintf("%s/%s", dir, filename), nil } - -// DeleteOldConfirmedTransactions removes confirmed gateway transactions older than the specified retention period. -// Only transactions with status "confirmed" that were last updated before the cutoff time are deleted. -// Returns the number of deleted records and any error encountered. -func (d *DB) DeleteOldConfirmedTransactions(retentionPeriod time.Duration) (int64, error) { - cutoffTime := time.Now().Add(-retentionPeriod) - - result := d.client.Where("status = ? AND updated_at < ?", "confirmed", cutoffTime). - Delete(&store.ChainTransaction{}) - - if result.Error != nil { - return 0, errors.Wrap(result.Error, "failed to delete old confirmed transactions") - } - - return result.RowsAffected, nil -} diff --git a/universalClient/db/db_test.go b/universalClient/db/db_test.go index 477c436b..3e1f46ba 100644 --- a/universalClient/db/db_test.go +++ b/universalClient/db/db_test.go @@ -3,7 +3,6 @@ package db import ( "path/filepath" "testing" - "time" "github.com/pushchain/push-chain-node/universalClient/store" "github.com/stretchr/testify/assert" @@ -57,8 +56,8 @@ func TestDB_OpenModes(t *testing.T) { func runSampleInsertSelectTest(t *testing.T, db *DB) { // Given a sample row - entry := store.ChainState{ - LastBlock: 10101, + entry := store.State{ + BlockHeight: 10101, } // ACT: Insert @@ -66,234 +65,8 @@ func runSampleInsertSelectTest(t *testing.T, db *DB) { require.NoError(t, err) // ACT: Select - var result store.ChainState + var result store.State err = db.Client().First(&result).Error require.NoError(t, err) - assert.Equal(t, uint64(10101), result.LastBlock) -} - -func TestDeleteOldConfirmedTransactions(t *testing.T) { - // Setup in-memory test database - database, err := OpenInMemoryDB(true) - require.NoError(t, err) - defer database.Close() - - now := time.Now() - retentionPeriod := 24 * time.Hour - - // Create test transactions - testCases := []struct { - name string - txHash string - status string - age time.Duration - shouldDelete bool - }{ - { - name: "old confirmed", - txHash: "0x1111", - status: "confirmed", - age: 25 * time.Hour, // Older than retention period - shouldDelete: true, - }, - { - name: "recent confirmed", - txHash: "0x2222", - status: "confirmed", - age: 23 * time.Hour, // Within retention period - shouldDelete: false, - }, - { - name: "old pending", - txHash: "0x3333", - status: "pending", - age: 25 * time.Hour, // Older than retention period but pending - shouldDelete: false, - }, - { - name: "old fast_confirmed", - txHash: "0x4444", - status: "fast_confirmed", - age: 25 * time.Hour, // Older than retention period but not "confirmed" - shouldDelete: false, - }, - { - name: "old failed", - txHash: "0x5555", - status: "failed", - age: 25 * time.Hour, // Older than retention period but failed - shouldDelete: false, - }, - { - name: "old reorged", - txHash: "0x6666", - status: "reorged", - age: 25 * time.Hour, // Older than retention period but reorged - shouldDelete: false, - }, - } - - // Insert test transactions - var insertedTransactions []*store.ChainTransaction - for i, tc := range testCases { - tx := &store.ChainTransaction{ - - TxHash: tc.txHash, - BlockNumber: uint64(100 + i), - EventIdentifier: "event_" + tc.txHash, - Status: tc.status, - Confirmations: 10, - } - - require.NoError(t, database.Client().Create(tx).Error) - - // Set the UpdatedAt timestamp manually to simulate age - targetTime := now.Add(-tc.age) - require.NoError(t, database.Client().Model(tx).Update("updated_at", targetTime).Error) - - insertedTransactions = append(insertedTransactions, tx) - } - - // Verify initial count - var initialCount int64 - require.NoError(t, database.Client().Model(&store.ChainTransaction{}).Count(&initialCount).Error) - require.Equal(t, int64(len(testCases)), initialCount) - - t.Run("DeleteOldConfirmedTransactions", func(t *testing.T) { - // Perform deletion - deletedCount, err := database.DeleteOldConfirmedTransactions(retentionPeriod) - require.NoError(t, err) - - // Count expected deletions - expectedDeleted := 0 - for _, tc := range testCases { - if tc.shouldDelete { - expectedDeleted++ - } - } - require.Equal(t, int64(expectedDeleted), deletedCount) - - // Verify remaining transactions - var remaining []store.ChainTransaction - require.NoError(t, database.Client().Find(&remaining).Error) - - expectedRemaining := len(testCases) - expectedDeleted - require.Len(t, remaining, expectedRemaining) - - // Verify correct transactions remain - remainingHashes := make(map[string]bool) - for _, tx := range remaining { - remainingHashes[tx.TxHash] = true - } - - for _, tc := range testCases { - if tc.shouldDelete { - require.False(t, remainingHashes[tc.txHash], "Transaction %s should have been deleted", tc.name) - } else { - require.True(t, remainingHashes[tc.txHash], "Transaction %s should not have been deleted", tc.name) - } - } - }) -} - -func TestDeleteOldConfirmedTransactionsEdgeCases(t *testing.T) { - database, err := OpenInMemoryDB(true) - require.NoError(t, err) - defer database.Close() - - t.Run("EmptyDatabase", func(t *testing.T) { - deletedCount, err := database.DeleteOldConfirmedTransactions(24 * time.Hour) - require.NoError(t, err) - require.Equal(t, int64(0), deletedCount) - }) - - t.Run("NoMatchingTransactions", func(t *testing.T) { - // Insert only recent or non-confirmed transactions - recentConfirmed := &store.ChainTransaction{ - - TxHash: "0x7777", - BlockNumber: 500, - EventIdentifier: "recent", - Status: "confirmed", - Confirmations: 12, - } - require.NoError(t, database.Client().Create(recentConfirmed).Error) - - oldPending := &store.ChainTransaction{ - - TxHash: "0x8888", - BlockNumber: 501, - EventIdentifier: "old_pending", - Status: "pending", - Confirmations: 5, - } - require.NoError(t, database.Client().Create(oldPending).Error) - - // Set old timestamp for pending transaction - oldTime := time.Now().Add(-25 * time.Hour) - require.NoError(t, database.Client().Model(oldPending).Update("updated_at", oldTime).Error) - - // Should delete nothing - deletedCount, err := database.DeleteOldConfirmedTransactions(time.Hour) - require.NoError(t, err) - require.Equal(t, int64(0), deletedCount) - - // Verify both transactions still exist - var count int64 - require.NoError(t, database.Client().Model(&store.ChainTransaction{}).Count(&count).Error) - require.Equal(t, int64(2), count) - }) - - t.Run("ZeroRetentionPeriod", func(t *testing.T) { - // Clean up - database.Client().Exec("DELETE FROM chain_transactions") - - // Create a confirmed transaction - confirmedTx := &store.ChainTransaction{ - - TxHash: "0x9999", - BlockNumber: 600, - EventIdentifier: "zero_retention", - Status: "confirmed", - Confirmations: 15, - } - require.NoError(t, database.Client().Create(confirmedTx).Error) - - // With zero retention period, even recent confirmed transactions should be deleted - deletedCount, err := database.DeleteOldConfirmedTransactions(0) - require.NoError(t, err) - require.Equal(t, int64(1), deletedCount) - - // Verify database is empty - var count int64 - require.NoError(t, database.Client().Model(&store.ChainTransaction{}).Count(&count).Error) - require.Equal(t, int64(0), count) - }) - - t.Run("VeryLongRetentionPeriod", func(t *testing.T) { - // Create an old confirmed transaction - oldConfirmed := &store.ChainTransaction{ - - TxHash: "0xAAAA", - BlockNumber: 700, - EventIdentifier: "very_old", - Status: "confirmed", - Confirmations: 20, - } - require.NoError(t, database.Client().Create(oldConfirmed).Error) - - // Set to 1 year ago - oldTime := time.Now().Add(-365 * 24 * time.Hour) - require.NoError(t, database.Client().Model(oldConfirmed).Update("updated_at", oldTime).Error) - - // With very long retention period, nothing should be deleted - deletedCount, err := database.DeleteOldConfirmedTransactions(400 * 24 * time.Hour) // 400 days - require.NoError(t, err) - require.Equal(t, int64(0), deletedCount) - - // Verify transaction still exists - var count int64 - require.NoError(t, database.Client().Model(&store.ChainTransaction{}).Count(&count).Error) - require.Equal(t, int64(1), count) - }) + assert.Equal(t, uint64(10101), result.BlockHeight) } diff --git a/universalClient/db/per_chain_transaction_cleaner.go b/universalClient/db/per_chain_transaction_cleaner.go deleted file mode 100644 index 9ddf8610..00000000 --- a/universalClient/db/per_chain_transaction_cleaner.go +++ /dev/null @@ -1,338 +0,0 @@ -package db - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/rs/zerolog" - - "github.com/pushchain/push-chain-node/universalClient/config" -) - -// chainCleaner handles cleanup for a single chain -type chainCleaner struct { - chainID string - database *DB - ticker *time.Ticker - stopCh chan struct{} - cleanupInterval time.Duration - retentionPeriod time.Duration - logger zerolog.Logger -} - -// PerChainTransactionCleaner handles periodic cleanup of old confirmed transactions with per-chain configuration -type PerChainTransactionCleaner struct { - dbManager *ChainDBManager - config *config.Config - chainCleaners map[string]*chainCleaner - mu sync.RWMutex - logger zerolog.Logger - ctx context.Context - cancel context.CancelFunc -} - -// NewPerChainTransactionCleaner creates a new per-chain transaction cleaner -func NewPerChainTransactionCleaner( - dbManager *ChainDBManager, - cfg *config.Config, - logger zerolog.Logger, -) *PerChainTransactionCleaner { - ctx, cancel := context.WithCancel(context.Background()) - - return &PerChainTransactionCleaner{ - dbManager: dbManager, - config: cfg, - chainCleaners: make(map[string]*chainCleaner), - logger: logger.With().Str("component", "per_chain_transaction_cleaner").Logger(), - ctx: ctx, - cancel: cancel, - } -} - -// Start begins the per-chain cleanup process -func (tc *PerChainTransactionCleaner) Start(ctx context.Context) error { - tc.logger.Info().Msg("starting per-chain transaction cleaner") - - // Get all active databases and start cleaners for each - databases := tc.dbManager.GetAllDatabases() - - for chainID, chainDB := range databases { - if err := tc.startChainCleaner(chainID, chainDB); err != nil { - tc.logger.Error(). - Err(err). - Str("chain_id", chainID). - Msg("failed to start cleaner for chain") - // Continue with other chains even if one fails - } - } - - // Monitor for new chains being added - go tc.monitorChainUpdates(ctx) - - return nil -} - -// startChainCleaner starts a cleaner for a specific chain -func (tc *PerChainTransactionCleaner) startChainCleaner(chainID string, database *DB) error { - tc.mu.Lock() - defer tc.mu.Unlock() - - // Check if cleaner already exists for this chain - if _, exists := tc.chainCleaners[chainID]; exists { - tc.logger.Debug(). - Str("chain_id", chainID). - Msg("cleaner already exists for chain") - return nil - } - - // Get chain-specific settings (with fallback to global defaults) - cleanupInterval, retentionPeriod := tc.config.GetChainCleanupSettings(chainID) - - cleaner := &chainCleaner{ - chainID: chainID, - database: database, - cleanupInterval: time.Duration(cleanupInterval) * time.Second, - retentionPeriod: time.Duration(retentionPeriod) * time.Second, - stopCh: make(chan struct{}), - logger: tc.logger.With(). - Str("chain_id", chainID). - Logger(), - } - - tc.logger.Info(). - Str("chain_id", chainID). - Str("cleanup_interval", cleaner.cleanupInterval.String()). - Str("retention_period", cleaner.retentionPeriod.String()). - Msg("starting cleaner for chain") - - // Perform initial cleanup - if err := tc.performChainCleanup(cleaner); err != nil { - cleaner.logger.Error().Err(err).Msg("failed to perform initial cleanup") - // Don't fail startup on cleanup error, just log it - } - - // Start periodic cleanup - cleaner.ticker = time.NewTicker(cleaner.cleanupInterval) - - go func() { - defer cleaner.ticker.Stop() - for { - select { - case <-tc.ctx.Done(): - cleaner.logger.Info().Msg("context cancelled, stopping chain cleaner") - return - case <-cleaner.stopCh: - cleaner.logger.Info().Msg("stop signal received, stopping chain cleaner") - return - case <-cleaner.ticker.C: - if err := tc.performChainCleanup(cleaner); err != nil { - cleaner.logger.Error().Err(err).Msg("failed to perform scheduled cleanup") - } - } - } - }() - - tc.chainCleaners[chainID] = cleaner - return nil -} - -// performChainCleanup executes cleanup for a specific chain -func (tc *PerChainTransactionCleaner) performChainCleanup(cleaner *chainCleaner) error { - start := time.Now() - - cleaner.logger.Debug(). - Str("retention_period", cleaner.retentionPeriod.String()). - Msg("performing transaction cleanup for chain") - - deletedCount, err := cleaner.database.DeleteOldConfirmedTransactions(cleaner.retentionPeriod) - if err != nil { - return fmt.Errorf("failed to cleanup transactions: %w", err) - } - - duration := time.Since(start) - - if deletedCount > 0 { - cleaner.logger.Info(). - Int64("deleted_count", deletedCount). - Str("duration", duration.String()). - Msg("transaction cleanup completed for chain") - - // Checkpoint WAL after cleanup - tc.checkpointWALForDB(cleaner.database, cleaner.chainID) - } else { - cleaner.logger.Debug(). - Str("duration", duration.String()). - Msg("transaction cleanup completed - no transactions to delete") - } - - return nil -} - -// checkpointWALForDB performs WAL checkpointing for a specific database -func (tc *PerChainTransactionCleaner) checkpointWALForDB(database *DB, chainID string) { - tc.logger.Debug(). - Str("chain_id", chainID). - Msg("performing WAL checkpoint") - - // Use PRAGMA wal_checkpoint(TRUNCATE) to force a checkpoint and truncate the WAL - if err := database.Client().Exec("PRAGMA wal_checkpoint(TRUNCATE)").Error; err != nil { - tc.logger.Warn(). - Err(err). - Str("chain_id", chainID). - Msg("failed to checkpoint WAL") - } else { - tc.logger.Debug(). - Str("chain_id", chainID). - Msg("WAL checkpoint completed") - } -} - -// monitorChainUpdates monitors for new chains being added and starts cleaners for them -func (tc *PerChainTransactionCleaner) monitorChainUpdates(ctx context.Context) { - // Check for new chains every minute - ticker := time.NewTicker(60 * time.Second) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-tc.ctx.Done(): - return - case <-ticker.C: - tc.checkForNewChains() - } - } -} - -// checkForNewChains checks for new chains and starts cleaners for them -func (tc *PerChainTransactionCleaner) checkForNewChains() { - databases := tc.dbManager.GetAllDatabases() - - for chainID, chainDB := range databases { - tc.mu.RLock() - _, exists := tc.chainCleaners[chainID] - tc.mu.RUnlock() - - if !exists { - tc.logger.Info(). - Str("chain_id", chainID). - Msg("detected new chain, starting cleaner") - - if err := tc.startChainCleaner(chainID, chainDB); err != nil { - tc.logger.Error(). - Err(err). - Str("chain_id", chainID). - Msg("failed to start cleaner for new chain") - } - } - } -} - -// UpdateChainConfig updates the configuration for a specific chain's cleaner -func (tc *PerChainTransactionCleaner) UpdateChainConfig(chainID string) { - tc.mu.Lock() - defer tc.mu.Unlock() - - cleaner, exists := tc.chainCleaners[chainID] - if !exists { - tc.logger.Debug(). - Str("chain_id", chainID). - Msg("no cleaner exists for chain, skipping config update") - return - } - - // Get updated settings - newCleanupInterval, newRetentionPeriod := tc.config.GetChainCleanupSettings(chainID) - - // Check if settings have changed - if time.Duration(newCleanupInterval)*time.Second == cleaner.cleanupInterval && - time.Duration(newRetentionPeriod)*time.Second == cleaner.retentionPeriod { - return // No changes - } - - tc.logger.Info(). - Str("chain_id", chainID). - Str("old_cleanup_interval", cleaner.cleanupInterval.String()). - Str("new_cleanup_interval", (time.Duration(newCleanupInterval) * time.Second).String()). - Str("old_retention_period", cleaner.retentionPeriod.String()). - Str("new_retention_period", (time.Duration(newRetentionPeriod) * time.Second).String()). - Msg("updating cleaner configuration for chain") - - // Stop the old cleaner - close(cleaner.stopCh) - if cleaner.ticker != nil { - cleaner.ticker.Stop() - } - - // Remove from map - delete(tc.chainCleaners, chainID) - - // Get the database for this chain - databases := tc.dbManager.GetAllDatabases() - if chainDB, ok := databases[chainID]; ok { - // Start a new cleaner with updated settings - tc.mu.Unlock() // Unlock before calling startChainCleaner to avoid deadlock - if err := tc.startChainCleaner(chainID, chainDB); err != nil { - tc.logger.Error(). - Err(err). - Str("chain_id", chainID). - Msg("failed to restart cleaner with updated config") - } - tc.mu.Lock() // Re-lock for consistency, even though we're about to return - } -} - -// Stop gracefully stops all chain cleaners -func (tc *PerChainTransactionCleaner) Stop() { - tc.logger.Info().Msg("stopping per-chain transaction cleaner") - - // Cancel the context to stop monitoring - tc.cancel() - - tc.mu.Lock() - defer tc.mu.Unlock() - - // Stop all chain cleaners - for chainID, cleaner := range tc.chainCleaners { - tc.logger.Debug(). - Str("chain_id", chainID). - Msg("stopping cleaner for chain") - - close(cleaner.stopCh) - if cleaner.ticker != nil { - cleaner.ticker.Stop() - } - } - - // Clear the map - tc.chainCleaners = make(map[string]*chainCleaner) -} - -// GetCleanerStatus returns the status of all chain cleaners -func (tc *PerChainTransactionCleaner) GetCleanerStatus() map[string]struct { - CleanupInterval time.Duration - RetentionPeriod time.Duration -} { - tc.mu.RLock() - defer tc.mu.RUnlock() - - status := make(map[string]struct { - CleanupInterval time.Duration - RetentionPeriod time.Duration - }) - - for chainID, cleaner := range tc.chainCleaners { - status[chainID] = struct { - CleanupInterval time.Duration - RetentionPeriod time.Duration - }{ - CleanupInterval: cleaner.cleanupInterval, - RetentionPeriod: cleaner.retentionPeriod, - } - } - - return status -} \ No newline at end of file diff --git a/universalClient/db/per_chain_transaction_cleaner_test.go b/universalClient/db/per_chain_transaction_cleaner_test.go deleted file mode 100644 index bb7c42f3..00000000 --- a/universalClient/db/per_chain_transaction_cleaner_test.go +++ /dev/null @@ -1,486 +0,0 @@ -package db - -import ( - "context" - "testing" - "time" - - "github.com/pushchain/push-chain-node/universalClient/config" - "github.com/pushchain/push-chain-node/universalClient/logger" - "github.com/pushchain/push-chain-node/universalClient/store" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestPerChainTransactionCleaner(t *testing.T) { - log := zerolog.New(zerolog.NewTestWriter(t)) - - t.Run("GetChainCleanupSettings", func(t *testing.T) { - // Test with chain-specific configuration - cfg := &config.Config{ - TransactionCleanupIntervalSeconds: 3600, // Global default: 1 hour - TransactionRetentionPeriodSeconds: 86400, // Global default: 24 hours - ChainConfigs: map[string]config.ChainSpecificConfig{ - "eip155:11155111": { - CleanupIntervalSeconds: intPtr(1800), // Chain-specific: 30 minutes - RetentionPeriodSeconds: intPtr(43200), // Chain-specific: 12 hours - }, - "solana:devnet": { - CleanupIntervalSeconds: intPtr(7200), // Chain-specific: 2 hours - // RetentionPeriodSeconds not set, should use global default - }, - }, - } - - // Test chain with full override - cleanup, retention := cfg.GetChainCleanupSettings("eip155:11155111") - assert.Equal(t, 1800, cleanup, "should use chain-specific cleanup interval") - assert.Equal(t, 43200, retention, "should use chain-specific retention period") - - // Test chain with partial override - cleanup, retention = cfg.GetChainCleanupSettings("solana:devnet") - assert.Equal(t, 7200, cleanup, "should use chain-specific cleanup interval") - assert.Equal(t, 86400, retention, "should use global default retention period") - - // Test chain without specific config (uses global defaults) - cleanup, retention = cfg.GetChainCleanupSettings("unknown:chain") - assert.Equal(t, 3600, cleanup, "should use global default cleanup interval") - assert.Equal(t, 86400, retention, "should use global default retention period") - - // Test with nil ChainConfigs - cfgNoChainConfig := &config.Config{ - TransactionCleanupIntervalSeconds: 3600, - TransactionRetentionPeriodSeconds: 86400, - ChainConfigs: nil, - } - cleanup, retention = cfgNoChainConfig.GetChainCleanupSettings("any:chain") - assert.Equal(t, 3600, cleanup, "should use global default when no chain config exists") - assert.Equal(t, 86400, retention, "should use global default when no chain config exists") - }) - - t.Run("PerChainCleaner_Creation", func(t *testing.T) { - tempDir := t.TempDir() - - cfg := &config.Config{ - DatabaseBaseDir: tempDir, - TransactionCleanupIntervalSeconds: 3600, - TransactionRetentionPeriodSeconds: 86400, - ChainConfigs: map[string]config.ChainSpecificConfig{ - "eip155:11155111": { - CleanupIntervalSeconds: intPtr(1800), - RetentionPeriodSeconds: intPtr(43200), - }, - }, - } - - dbManager := NewChainDBManager(tempDir, log, cfg) - defer dbManager.CloseAll() - - cleaner := NewPerChainTransactionCleaner(dbManager, cfg, log) - require.NotNil(t, cleaner) - - // Test that the cleaner is properly initialized - assert.NotNil(t, cleaner.dbManager) - assert.NotNil(t, cleaner.config) - assert.NotNil(t, cleaner.chainCleaners) - assert.NotNil(t, cleaner.ctx) - assert.NotNil(t, cleaner.cancel) - }) - - t.Run("PerChainCleaner_StartStop", func(t *testing.T) { - tempDir := t.TempDir() - - cfg := &config.Config{ - DatabaseBaseDir: tempDir, - TransactionCleanupIntervalSeconds: 1, // Very short for testing - TransactionRetentionPeriodSeconds: 1, - ChainConfigs: map[string]config.ChainSpecificConfig{ - "eip155:11155111": { - CleanupIntervalSeconds: intPtr(1), - RetentionPeriodSeconds: intPtr(1), - }, - }, - } - - dbManager := NewChainDBManager(tempDir, log, cfg) - defer dbManager.CloseAll() - - // Create databases for testing - db1, err := dbManager.GetChainDB("eip155:11155111") - require.NoError(t, err) - require.NotNil(t, db1) - - db2, err := dbManager.GetChainDB("solana:devnet") - require.NoError(t, err) - require.NotNil(t, db2) - - cleaner := NewPerChainTransactionCleaner(dbManager, cfg, log) - - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - // Start the cleaner - err = cleaner.Start(ctx) - require.NoError(t, err) - - // Give it time to start cleaners - time.Sleep(100 * time.Millisecond) - - // Check that cleaners were created for existing databases - status := cleaner.GetCleanerStatus() - assert.Len(t, status, 2, "should have cleaners for both chains") - - // Verify chain-specific settings are applied - if eth, ok := status["eip155:11155111"]; ok { - assert.Equal(t, 1*time.Second, eth.CleanupInterval) - assert.Equal(t, 1*time.Second, eth.RetentionPeriod) - } - - // Verify global defaults are used for chain without specific config - if sol, ok := status["solana:devnet"]; ok { - assert.Equal(t, 1*time.Second, sol.CleanupInterval) - assert.Equal(t, 1*time.Second, sol.RetentionPeriod) - } - - // Stop the cleaner - cleaner.Stop() - - // Verify all cleaners are stopped - status = cleaner.GetCleanerStatus() - assert.Len(t, status, 0, "all cleaners should be stopped") - }) - - t.Run("PerChainCleaner_DynamicChainAddition", func(t *testing.T) { - tempDir := t.TempDir() - - cfg := &config.Config{ - DatabaseBaseDir: tempDir, - TransactionCleanupIntervalSeconds: 2, - TransactionRetentionPeriodSeconds: 2, - } - - dbManager := NewChainDBManager(tempDir, log, cfg) - defer dbManager.CloseAll() - - cleaner := NewPerChainTransactionCleaner(dbManager, cfg, log) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // Start with no databases - err := cleaner.Start(ctx) - require.NoError(t, err) - - // Initially should have no cleaners - status := cleaner.GetCleanerStatus() - assert.Len(t, status, 0, "should have no cleaners initially") - - // Add a new database - db1, err := dbManager.GetChainDB("eip155:11155111") - require.NoError(t, err) - require.NotNil(t, db1) - - // Manually trigger check for new chains (normally done periodically) - cleaner.checkForNewChains() - - // Should now have a cleaner for the new chain - status = cleaner.GetCleanerStatus() - assert.Len(t, status, 1, "should have cleaner for new chain") - - // Stop the cleaner - cleaner.Stop() - }) - - t.Run("PerChainCleaner_ConfigUpdate", func(t *testing.T) { - tempDir := t.TempDir() - - cfg := &config.Config{ - DatabaseBaseDir: tempDir, - TransactionCleanupIntervalSeconds: 3600, - TransactionRetentionPeriodSeconds: 86400, - ChainConfigs: map[string]config.ChainSpecificConfig{ - "eip155:11155111": { - CleanupIntervalSeconds: intPtr(1800), - RetentionPeriodSeconds: intPtr(43200), - }, - }, - } - - dbManager := NewChainDBManager(tempDir, log, cfg) - defer dbManager.CloseAll() - - // Create database - db1, err := dbManager.GetChainDB("eip155:11155111") - require.NoError(t, err) - require.NotNil(t, db1) - - cleaner := NewPerChainTransactionCleaner(dbManager, cfg, log) - - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - // Start the cleaner - err = cleaner.Start(ctx) - require.NoError(t, err) - - // Verify initial settings - status := cleaner.GetCleanerStatus() - if eth, ok := status["eip155:11155111"]; ok { - assert.Equal(t, 30*time.Minute, eth.CleanupInterval) - assert.Equal(t, 12*time.Hour, eth.RetentionPeriod) - } - - // Update configuration - cfg.ChainConfigs["eip155:11155111"] = config.ChainSpecificConfig{ - CleanupIntervalSeconds: intPtr(900), // 15 minutes - RetentionPeriodSeconds: intPtr(21600), // 6 hours - } - - // Update the cleaner configuration - cleaner.UpdateChainConfig("eip155:11155111") - - // Give it time to restart - time.Sleep(100 * time.Millisecond) - - // Verify updated settings - status = cleaner.GetCleanerStatus() - if eth, ok := status["eip155:11155111"]; ok { - assert.Equal(t, 15*time.Minute, eth.CleanupInterval) - assert.Equal(t, 6*time.Hour, eth.RetentionPeriod) - } - - // Stop the cleaner - cleaner.Stop() - }) -} - -// Helper function to create int pointers -func intPtr(i int) *int { - return &i -} - -// TestPerChainTransactionCleanerDatabaseOperations tests actual database cleanup functionality -func TestPerChainTransactionCleanerDatabaseOperations(t *testing.T) { - // Setup test config - cfg := &config.Config{ - TransactionCleanupIntervalSeconds: 1, // 1 second for testing - TransactionRetentionPeriodSeconds: 3600, // 1 hour - LogLevel: 0, // Debug level - LogFormat: "console", - } - - // Setup logger - log := logger.Init(*cfg) - - // Setup ChainDBManager - dbManager := NewInMemoryChainDBManager(log, cfg) - defer dbManager.CloseAll() - - // Get database for test chain - chainID := "eip155:1" - database, err := dbManager.GetChainDB(chainID) - require.NoError(t, err) - - // Create test transactions - now := time.Now() - - // Old confirmed transaction (should be deleted) - oldConfirmed := &store.ChainTransaction{ - TxHash: "0x111", - BlockNumber: 100, - EventIdentifier: "event1", - Status: "confirmed", - Confirmations: 15, - } - // Set UpdatedAt to 25 hours ago (older than retention period) - oldTime := now.Add(-25 * time.Hour) - - // Recent confirmed transaction (should NOT be deleted) - recentConfirmed := &store.ChainTransaction{ - TxHash: "0x222", - BlockNumber: 200, - EventIdentifier: "event2", - Status: "confirmed", - Confirmations: 10, - } - // Set UpdatedAt to 30 minutes ago (clearly within retention period) - recentTime := now.Add(-30 * time.Minute) - - // Old pending transaction (should NOT be deleted regardless of age) - oldPending := &store.ChainTransaction{ - TxHash: "0x333", - BlockNumber: 150, - EventIdentifier: "event3", - Status: "pending", - Confirmations: 5, - } - - // Insert test transactions - require.NoError(t, database.Client().Create(oldConfirmed).Error) - require.NoError(t, database.Client().Create(recentConfirmed).Error) - require.NoError(t, database.Client().Create(oldPending).Error) - - // Manually set the UpdatedAt timestamps since GORM auto-sets them - require.NoError(t, database.Client().Model(oldConfirmed).Update("updated_at", oldTime).Error) - require.NoError(t, database.Client().Model(recentConfirmed).Update("updated_at", recentTime).Error) - require.NoError(t, database.Client().Model(oldPending).Update("updated_at", oldTime).Error) - - // Verify initial state - var count int64 - require.NoError(t, database.Client().Model(&store.ChainTransaction{}).Count(&count).Error) - require.Equal(t, int64(3), count) - - t.Run("DeleteOldConfirmedTransactions", func(t *testing.T) { - // Test the database method directly - deletedCount, err := database.DeleteOldConfirmedTransactions(time.Duration(cfg.TransactionRetentionPeriodSeconds) * time.Second) - require.NoError(t, err) - require.Equal(t, int64(1), deletedCount) // Only old confirmed should be deleted - - // Verify remaining transactions - var remaining []store.ChainTransaction - require.NoError(t, database.Client().Find(&remaining).Error) - require.Len(t, remaining, 2) - - // Check that the right transactions remain - txHashes := make(map[string]bool) - for _, tx := range remaining { - txHashes[tx.TxHash] = true - } - require.True(t, txHashes["0x222"]) // Recent confirmed should remain - require.True(t, txHashes["0x333"]) // Old pending should remain - require.False(t, txHashes["0x111"]) // Old confirmed should be gone - }) - - // Create a new old confirmed transaction for the cleaner service test - newOldConfirmed := &store.ChainTransaction{ - - TxHash: "0x111_new", // Use different hash to avoid constraint violation - BlockNumber: 100, - EventIdentifier: "event1_new", - Status: "confirmed", - Confirmations: 15, - } - require.NoError(t, database.Client().Create(newOldConfirmed).Error) - require.NoError(t, database.Client().Model(newOldConfirmed).Update("updated_at", oldTime).Error) - - t.Run("PerChainTransactionCleanerService", func(t *testing.T) { - // Create per-chain transaction cleaner - cleaner := NewPerChainTransactionCleaner(dbManager, cfg, log) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // Start the cleaner - require.NoError(t, cleaner.Start(ctx)) - - // Wait for at least one cleanup cycle - time.Sleep(200 * time.Millisecond) - - // Stop the cleaner - cleaner.Stop() - - // Verify cleanup occurred - var finalCount int64 - require.NoError(t, database.Client().Model(&store.ChainTransaction{}).Count(&finalCount).Error) - require.Equal(t, int64(2), finalCount) // Should have 2 transactions left - - // Verify the correct transactions remain - var final []store.ChainTransaction - require.NoError(t, database.Client().Find(&final).Error) - - txHashes := make(map[string]bool) - for _, tx := range final { - txHashes[tx.TxHash] = true - } - require.True(t, txHashes["0x222"]) // Recent confirmed should remain - require.True(t, txHashes["0x333"]) // Old pending should remain - }) -} - -// TestPerChainTransactionCleanerEdgeCases tests edge cases for transaction cleanup -func TestPerChainTransactionCleanerEdgeCases(t *testing.T) { - // Setup test config - cfg := &config.Config{ - TransactionCleanupIntervalSeconds: 1, // 1 second for testing - TransactionRetentionPeriodSeconds: 3600, // 1 hour - LogLevel: 0, - LogFormat: "console", - } - - log := logger.Init(*cfg) - - // Setup ChainDBManager - dbManager := NewInMemoryChainDBManager(log, cfg) - defer dbManager.CloseAll() - - // Get database for test chain - chainID := "eip155:1" - database, err := dbManager.GetChainDB(chainID) - require.NoError(t, err) - - t.Run("EmptyDatabase", func(t *testing.T) { - // Test cleanup with no transactions - deletedCount, err := database.DeleteOldConfirmedTransactions(time.Duration(cfg.TransactionRetentionPeriodSeconds) * time.Second) - require.NoError(t, err) - require.Equal(t, int64(0), deletedCount) - }) - - t.Run("OnlyRecentTransactions", func(t *testing.T) { - // Create only recent transactions - recent := &store.ChainTransaction{ - TxHash: "0x456", - BlockNumber: 300, - EventIdentifier: "withdraw", - Status: "confirmed", - Confirmations: 12, - } - require.NoError(t, database.Client().Create(recent).Error) - - deletedCount, err := database.DeleteOldConfirmedTransactions(time.Duration(cfg.TransactionRetentionPeriodSeconds) * time.Second) - require.NoError(t, err) - require.Equal(t, int64(0), deletedCount) // Nothing should be deleted - - // Verify transaction still exists - var count int64 - require.NoError(t, database.Client().Model(&store.ChainTransaction{}).Count(&count).Error) - require.Equal(t, int64(1), count) - }) - - t.Run("DifferentStatuses", func(t *testing.T) { - // Clean up from previous test - database.Client().Exec("DELETE FROM chain_transactions") - - now := time.Now() - oldTime := now.Add(-25 * time.Hour) - - // Create transactions with different statuses, all old - statuses := []string{"pending", "fast_confirmed", "confirmed", "failed", "reorged"} - - for i, status := range statuses { - tx := &store.ChainTransaction{ - TxHash: string(rune('a' + i)) + "00", - BlockNumber: uint64(400 + i), - EventIdentifier: "test_event" + string(rune('5' + i)), - Status: status, - Confirmations: 10, - } - require.NoError(t, database.Client().Create(tx).Error) - require.NoError(t, database.Client().Model(tx).Update("updated_at", oldTime).Error) - } - - // Only "confirmed" should be deleted - deletedCount, err := database.DeleteOldConfirmedTransactions(time.Duration(cfg.TransactionRetentionPeriodSeconds) * time.Second) - require.NoError(t, err) - require.Equal(t, int64(1), deletedCount) // Only the "confirmed" one - - // Verify remaining transactions - var remaining []store.ChainTransaction - require.NoError(t, database.Client().Find(&remaining).Error) - require.Len(t, remaining, 4) // All except "confirmed" - - for _, tx := range remaining { - require.NotEqual(t, "confirmed", tx.Status) - } - }) -} \ No newline at end of file diff --git a/universalClient/keys/keys.go b/universalClient/keys/keys.go deleted file mode 100644 index f8a173d2..00000000 --- a/universalClient/keys/keys.go +++ /dev/null @@ -1,89 +0,0 @@ -package keys - -import ( - "fmt" - - "github.com/cosmos/cosmos-sdk/crypto" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - - -var _ UniversalValidatorKeys = &Keys{} - -// Keys manages all the keys used by Universal Validator -type Keys struct { - signerName string // Hot key name in keyring - kb keyring.Keyring // Cosmos SDK keyring - OperatorAddress sdk.AccAddress // Operator (validator) address for reference - hotkeyPassword string // Password for file backend -} - - -// NewKeysWithKeybase creates a new instance of Keys -func NewKeysWithKeybase( - kb keyring.Keyring, - operatorAddress sdk.AccAddress, - hotkeyName string, - hotkeyPassword string, -) *Keys { - return &Keys{ - signerName: hotkeyName, - kb: kb, - OperatorAddress: operatorAddress, - hotkeyPassword: hotkeyPassword, - } -} - - - -// GetAddress returns the hot key address -func (k *Keys) GetAddress() (sdk.AccAddress, error) { - info, err := k.kb.Key(k.signerName) - if err != nil { - return nil, fmt.Errorf("failed to get key %s: %w", k.signerName, err) - } - - addr, err := info.GetAddress() - if err != nil { - return nil, fmt.Errorf("failed to get address from key info: %w", err) - } - - return addr, nil -} - -// GetPrivateKey returns the private key (requires password for file backend) -func (k *Keys) GetPrivateKey(password string) (cryptotypes.PrivKey, error) { - // For file backend, use provided password; for test backend, password is ignored - var actualPassword string - if k.kb.Backend() == keyring.BackendFile { - if password == "" { - return nil, fmt.Errorf("password is required for file backend") - } - actualPassword = password - } - - privKeyArmor, err := k.kb.ExportPrivKeyArmor(k.signerName, actualPassword) - if err != nil { - return nil, fmt.Errorf("failed to export private key: %w", err) - } - - priKey, _, err := crypto.UnarmorDecryptPrivKey(privKeyArmor, actualPassword) - if err != nil { - return nil, fmt.Errorf("failed to unarmor private key: %w", err) - } - - return priKey, nil -} - - -// GetHotkeyPassword returns the password to be used -// returns empty if no password is needed (test backend) -func (k *Keys) GetHotkeyPassword() string { - if k.kb.Backend() == keyring.BackendFile { - return k.hotkeyPassword - } - return "" -} - diff --git a/universalClient/logger/logger.go b/universalClient/logger/logger.go index 12604b21..ca97defe 100644 --- a/universalClient/logger/logger.go +++ b/universalClient/logger/logger.go @@ -6,15 +6,13 @@ import ( "time" "github.com/rs/zerolog" - - "github.com/pushchain/push-chain-node/universalClient/config" ) -// Init sets up the global zerolog logger based on config. +// New creates a new zerolog logger with the specified configuration. // Supports console/json format, level filtering, and optional sampling. -func Init(cfg config.Config) zerolog.Logger { +func New(logLevel int, logFormat string, logSampler bool) zerolog.Logger { var writer io.Writer = os.Stdout - if cfg.LogFormat != "json" { + if logFormat != "json" { writer = zerolog.ConsoleWriter{ Out: os.Stdout, TimeFormat: time.RFC3339, @@ -22,12 +20,12 @@ func Init(cfg config.Config) zerolog.Logger { } logger := zerolog.New(writer). - Level(zerolog.Level(cfg.LogLevel)). + Level(zerolog.Level(logLevel)). With(). Timestamp(). Logger() - if cfg.LogSampler { + if logSampler { logger = logger.Sample(&zerolog.BasicSampler{N: 5}) } return logger diff --git a/universalClient/logger/logger_test.go b/universalClient/logger/logger_test.go index 13300a71..6b1fdae5 100644 --- a/universalClient/logger/logger_test.go +++ b/universalClient/logger/logger_test.go @@ -6,12 +6,11 @@ import ( "strings" "testing" - "github.com/pushchain/push-chain-node/universalClient/config" "github.com/rs/zerolog" "github.com/stretchr/testify/require" ) -func TestInitVariants(t *testing.T) { +func TestNewVariants(t *testing.T) { t.Run("json format logs expected fields", func(t *testing.T) { r, w, _ := os.Pipe() defer r.Close() @@ -20,11 +19,7 @@ func TestInitVariants(t *testing.T) { os.Stdout = w defer func() { os.Stdout = stdout }() - logger := Init(config.Config{ - LogFormat: "json", - LogLevel: int(zerolog.InfoLevel), - LogSampler: false, - }) + logger := New(int(zerolog.InfoLevel), "json", false) logger.Info().Str("key", "value").Msg("json_test") @@ -45,11 +40,7 @@ func TestInitVariants(t *testing.T) { os.Stdout = w defer func() { os.Stdout = stdout }() - logger := Init(config.Config{ - LogFormat: "console", - LogLevel: int(zerolog.DebugLevel), - LogSampler: false, - }) + logger := New(int(zerolog.DebugLevel), "console", false) logger.Debug().Str("env", "test").Msg("console_log") @@ -70,11 +61,7 @@ func TestInitVariants(t *testing.T) { os.Stdout = w defer func() { os.Stdout = stdout }() - logger := Init(config.Config{ - LogFormat: "json", - LogLevel: int(zerolog.InfoLevel), - LogSampler: true, - }) + logger := New(int(zerolog.InfoLevel), "json", true) for i := 0; i < 20; i++ { logger.Info().Int("count", i).Msg("sampled") diff --git a/universalClient/pushcore/pushCore.go b/universalClient/pushcore/pushCore.go index b2be90ba..6721cef9 100644 --- a/universalClient/pushcore/pushCore.go +++ b/universalClient/pushcore/pushCore.go @@ -1,21 +1,23 @@ +// Package pushcore provides a client for interacting with Push Chain gRPC endpoints. +// It implements a fan-out pattern that tries multiple endpoints in round-robin order +// to provide high availability and fault tolerance. package pushcore import ( "context" "errors" "fmt" + "math/big" "net/url" "strings" "sync/atomic" - "time" cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/types/query" "github.com/cosmos/cosmos-sdk/types/tx" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/cosmos/cosmos-sdk/x/authz" - "github.com/pushchain/push-chain-node/universalClient/constant" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" uregistrytypes "github.com/pushchain/push-chain-node/x/uregistry/types" utsstypes "github.com/pushchain/push-chain-node/x/utss/types" uvalidatortypes "github.com/pushchain/push-chain-node/x/uvalidator/types" @@ -25,22 +27,38 @@ import ( "google.golang.org/grpc/credentials/insecure" ) -// Client is a minimal fan-out client over multiple gRPC endpoints. -// Each call tries endpoints in round-robin order and returns the first success. +// Client is a fan-out client that connects to multiple Push Chain gRPC endpoints. +// It implements round-robin failover, trying each endpoint in sequence until one succeeds. type Client struct { - logger zerolog.Logger - eps []uregistrytypes.QueryClient - uvalidatorClients []uvalidatortypes.QueryClient - utssClients []utsstypes.QueryClient - cmtClients []cmtservice.ServiceClient - txClients []tx.ServiceClient // for querying transactions by events - conns []*grpc.ClientConn // owned connections for Close() - rr uint32 // round-robin counter + logger zerolog.Logger // Logger for client operations + eps []uregistrytypes.QueryClient // Registry query clients + uvalidatorClients []uvalidatortypes.QueryClient // Universal validator query clients + utssClients []utsstypes.QueryClient // TSS query clients + uexecutorClients []uexecutortypes.QueryClient // Executor query clients (for gas price queries) + cmtClients []cmtservice.ServiceClient // CometBFT service clients + txClients []tx.ServiceClient // Transaction service clients + conns []*grpc.ClientConn // Owned gRPC connections (for cleanup) + rr uint32 // Round-robin counter for endpoint selection } -// New dials the provided gRPC URLs (best-effort) and builds a Client. -// - Uses insecure transport by default. -// - Skips endpoints that fail to dial; requires at least one success. +// TxResult represents a transaction result with its associated events and metadata. +type TxResult struct { + TxHash string // Transaction hash + Height int64 // Block height where the transaction was included + TxResponse *tx.GetTxResponse // Full transaction response from the chain +} + +// New creates a new Client by dialing the provided gRPC URLs. +// It attempts to connect to all endpoints and skips any that fail to dial. +// At least one endpoint must succeed, otherwise an error is returned. +// +// Parameters: +// - urls: List of gRPC endpoint URLs (schemes are automatically detected) +// - logger: Logger instance for client operations +// +// Returns: +// - *Client: A configured client instance, or nil on error +// - error: Error if all endpoints fail to connect func New(urls []string, logger zerolog.Logger) (*Client, error) { if len(urls) == 0 { return nil, errors.New("pushcore: at least one gRPC URL is required") @@ -61,6 +79,7 @@ func New(urls []string, logger zerolog.Logger) (*Client, error) { c.eps = append(c.eps, uregistrytypes.NewQueryClient(conn)) c.uvalidatorClients = append(c.uvalidatorClients, uvalidatortypes.NewQueryClient(conn)) c.utssClients = append(c.utssClients, utsstypes.NewQueryClient(conn)) + c.uexecutorClients = append(c.uexecutorClients, uexecutortypes.NewQueryClient(conn)) c.cmtClients = append(c.cmtClients, cmtservice.NewServiceClient(conn)) c.txClients = append(c.txClients, tx.NewServiceClient(conn)) } @@ -74,7 +93,8 @@ func New(urls []string, logger zerolog.Logger) (*Client, error) { return c, nil } -// Close closes all owned connections. +// Close gracefully closes all gRPC connections owned by the client. +// Returns the first error encountered, if any. func (c *Client) Close() error { var firstErr error for _, conn := range c.conns { @@ -86,49 +106,354 @@ func (c *Client) Close() error { c.eps = nil c.uvalidatorClients = nil c.utssClients = nil + c.uexecutorClients = nil c.cmtClients = nil c.txClients = nil return firstErr } -// GetAllChainConfigs tries each endpoint once in round-robin order. -// If all endpoints fail, returns the last error. -func (c *Client) GetAllChainConfigs(ctx context.Context) ([]*uregistrytypes.ChainConfig, error) { - if len(c.eps) == 0 { - return nil, errors.New("pushcore: no endpoints configured") - } - - start := int(atomic.AddUint32(&c.rr, 1)-1) % len(c.eps) +// retryWithRoundRobin executes a function across multiple endpoints in round-robin order. +// It tries each endpoint until one succeeds or all fail. +// +// Parameters: +// - numClients: Number of client endpoints available +// - rrCounter: Pointer to round-robin counter (atomic) +// - operation: Function to execute for each attempt, receives the endpoint index +// - operationName: Name of the operation (for logging and error messages) +// - logger: Logger for debug messages +// +// Returns: +// - T: Result from the operation if successful +// - error: Error if all endpoints fail +func retryWithRoundRobin[T any]( + numClients int, + rrCounter *uint32, + operation func(idx int) (T, error), + operationName string, + logger zerolog.Logger, +) (T, error) { + var zero T + if numClients == 0 { + return zero, errors.New("pushcore: no endpoints configured") + } + + start := int(atomic.AddUint32(rrCounter, 1)-1) % numClients var lastErr error - for i := 0; i < len(c.eps); i++ { - idx := (start + i) % len(c.eps) - qc := c.eps[idx] + for i := 0; i < numClients; i++ { + idx := (start + i) % numClients - resp, err := qc.AllChainConfigs(ctx, &uregistrytypes.QueryAllChainConfigsRequest{}) + result, err := operation(idx) if err == nil { - return resp.Configs, nil + return result, nil } lastErr = err - c.logger.Debug(). + logger.Debug(). Int("attempt", i+1). Int("endpoint_index", idx). Err(err). - Msg("GetAllChainConfigs failed; trying next endpoint") + Msgf("%s failed; trying next endpoint", operationName) } - return nil, fmt.Errorf("pushcore: GetAllChainConfigs failed on all %d endpoints: %w", len(c.eps), lastErr) + return zero, fmt.Errorf("pushcore: %s failed on all %d endpoints: %w", operationName, numClients, lastErr) +} + +// GetAllChainConfigs retrieves all chain configurations from Push Chain. +// It tries each endpoint in round-robin order until one succeeds. +// +// Parameters: +// - ctx: Context for the request +// +// Returns: +// - []*uregistrytypes.ChainConfig: List of chain configurations +// - error: Error if all endpoints fail +func (c *Client) GetAllChainConfigs(ctx context.Context) ([]*uregistrytypes.ChainConfig, error) { + return retryWithRoundRobin( + len(c.eps), + &c.rr, + func(idx int) ([]*uregistrytypes.ChainConfig, error) { + resp, err := c.eps[idx].AllChainConfigs(ctx, &uregistrytypes.QueryAllChainConfigsRequest{}) + if err != nil { + return nil, err + } + return resp.Configs, nil + }, + "GetAllChainConfigs", + c.logger, + ) +} + +// GetLatestBlock retrieves the latest block from Push Chain. +// It tries each endpoint in round-robin order until one succeeds. +// +// Parameters: +// - ctx: Context for the request +// +// Returns: +// - uint64: Latest block height +// - error: Error if all endpoints fail +func (c *Client) GetLatestBlock(ctx context.Context) (uint64, error) { + return retryWithRoundRobin( + len(c.cmtClients), + &c.rr, + func(idx int) (uint64, error) { + resp, err := c.cmtClients[idx].GetLatestBlock(ctx, &cmtservice.GetLatestBlockRequest{}) + if err != nil { + return 0, err + } + if resp.SdkBlock == nil { + return 0, errors.New("pushcore: SdkBlock is nil") + } + return uint64(resp.SdkBlock.Header.Height), nil + }, + "GetLatestBlock", + c.logger, + ) +} + +// GetAllUniversalValidators retrieves all universal validators from Push Chain. +// It tries each endpoint in round-robin order until one succeeds. +// +// Parameters: +// - ctx: Context for the request +// +// Returns: +// - []*uvalidatortypes.UniversalValidator: List of universal validators +// - error: Error if all endpoints fail +func (c *Client) GetAllUniversalValidators(ctx context.Context) ([]*uvalidatortypes.UniversalValidator, error) { + return retryWithRoundRobin( + len(c.uvalidatorClients), + &c.rr, + func(idx int) ([]*uvalidatortypes.UniversalValidator, error) { + resp, err := c.uvalidatorClients[idx].AllUniversalValidators(ctx, &uvalidatortypes.QueryUniversalValidatorsSetRequest{}) + if err != nil { + return nil, err + } + return resp.UniversalValidator, nil + }, + "GetAllUniversalValidators", + c.logger, + ) +} + +// GetCurrentKey retrieves the current TSS key from Push Chain. +// It tries each endpoint in round-robin order until one succeeds. +// +// Parameters: +// - ctx: Context for the request +// +// Returns: +// - *utsstypes.TssKey: TSS key +// - error: Error if all endpoints fail or no key exists +func (c *Client) GetCurrentKey(ctx context.Context) (*utsstypes.TssKey, error) { + return retryWithRoundRobin( + len(c.utssClients), + &c.rr, + func(idx int) (*utsstypes.TssKey, error) { + resp, err := c.utssClients[idx].CurrentKey(ctx, &utsstypes.QueryCurrentKeyRequest{}) + if err != nil { + return nil, err + } + if resp.Key == nil { + return nil, errors.New("pushcore: no TSS key found") + } + return resp.Key, nil + }, + "GetCurrentKey", + c.logger, + ) +} + +// GetTxsByEvents queries transactions matching the given event query. +// The query should follow Cosmos SDK event query format, e.g., "tss_process_initiated.process_id EXISTS". +// +// Parameters: +// - ctx: Context for the request +// - eventQuery: Cosmos SDK event query string +// - minHeight: Minimum block height to search (0 means no minimum) +// - maxHeight: Maximum block height to search (0 means no maximum) +// - limit: Maximum number of results to return (0 defaults to 100) +// +// Returns: +// - []*TxResult: List of matching transaction results +// - error: Error if all endpoints fail +func (c *Client) GetTxsByEvents(ctx context.Context, eventQuery string, minHeight, maxHeight uint64, limit uint64) ([]*TxResult, error) { + // Build the query events (same for all attempts) + events := []string{eventQuery} + if minHeight > 0 { + events = append(events, fmt.Sprintf("tx.height>=%d", minHeight)) + } + if maxHeight > 0 { + events = append(events, fmt.Sprintf("tx.height<=%d", maxHeight)) + } + + // Set pagination limit + pageLimit := limit + if pageLimit == 0 { + pageLimit = 100 // default limit + } + + // Join events with AND to create query string (SDK v0.50+ uses Query field) + queryString := strings.Join(events, " AND ") + + return retryWithRoundRobin( + len(c.txClients), + &c.rr, + func(idx int) ([]*TxResult, error) { + req := &tx.GetTxsEventRequest{ + Query: queryString, + Pagination: &query.PageRequest{ + Limit: pageLimit, + }, + OrderBy: tx.OrderBy_ORDER_BY_ASC, + } + + resp, err := c.txClients[idx].GetTxsEvent(ctx, req) + if err != nil { + return nil, err + } + + results := make([]*TxResult, 0, len(resp.TxResponses)) + for _, txResp := range resp.TxResponses { + results = append(results, &TxResult{ + TxHash: txResp.TxHash, + Height: txResp.Height, + TxResponse: &tx.GetTxResponse{ + Tx: resp.Txs[len(results)], + TxResponse: txResp, + }, + }) + } + return results, nil + }, + "GetTxsByEvents", + c.logger, + ) +} + +// GetGasPrice retrieves the median gas price for a specific chain from the on-chain oracle. +// The gas price is voted on by universal validators and stored on-chain. +// +// Parameters: +// - ctx: Context for the request +// - chainID: Chain identifier in CAIP-2 format (e.g., "eip155:84532" for Base Sepolia) +// +// Returns: +// - *big.Int: Median gas price in the chain's native unit (Wei for EVM chains, lamports for Solana) +// - error: Error if all endpoints fail or chainID is invalid +func (c *Client) GetGasPrice(ctx context.Context, chainID string) (*big.Int, error) { + if chainID == "" { + return nil, errors.New("pushcore: chainID is required") + } + + return retryWithRoundRobin( + len(c.uexecutorClients), + &c.rr, + func(idx int) (*big.Int, error) { + resp, err := c.uexecutorClients[idx].GasPrice(ctx, &uexecutortypes.QueryGasPriceRequest{ + ChainId: chainID, + }) + if err != nil { + return nil, err + } + if resp.GasPrice == nil { + return nil, errors.New("pushcore: GasPrice response is nil") + } + + // Get the median price using MedianIndex + if len(resp.GasPrice.Prices) == 0 { + return nil, fmt.Errorf("pushcore: no gas prices available for chain %s", chainID) + } + + medianIdx := resp.GasPrice.MedianIndex + if medianIdx >= uint64(len(resp.GasPrice.Prices)) { + // Fallback to first price if median index is out of bounds + medianIdx = 0 + } + + medianPrice := resp.GasPrice.Prices[medianIdx] + return new(big.Int).SetUint64(medianPrice), nil + }, + "GetGasPrice", + c.logger, + ) +} + +// GetGranteeGrants queries AuthZ grants for a grantee using round-robin logic. +// This function only queries and returns raw grant data; it does not perform validation or processing. +// +// Parameters: +// - ctx: Context for the request +// - granteeAddr: Address of the grantee to query grants for +// +// Returns: +// - *authz.QueryGranteeGrantsResponse: Raw grant response from the chain +// - error: Error if all endpoints fail +func (c *Client) GetGranteeGrants(ctx context.Context, granteeAddr string) (*authz.QueryGranteeGrantsResponse, error) { + // Create authz clients from existing connections + authzClients := make([]authz.QueryClient, len(c.conns)) + for i, conn := range c.conns { + authzClients[i] = authz.NewQueryClient(conn) + } + + return retryWithRoundRobin( + len(authzClients), + &c.rr, + func(idx int) (*authz.QueryGranteeGrantsResponse, error) { + return authzClients[idx].GranteeGrants(ctx, &authz.QueryGranteeGrantsRequest{ + Grantee: granteeAddr, + }) + }, + "GetGranteeGrants", + c.logger, + ) +} + +// GetAccount retrieves account information for a given address. +// It tries each endpoint in round-robin order until one succeeds. +// +// Parameters: +// - ctx: Context for the request +// - address: Bech32 address of the account +// +// Returns: +// - *authtypes.QueryAccountResponse: Account response +// - error: Error if all endpoints fail +func (c *Client) GetAccount(ctx context.Context, address string) (*authtypes.QueryAccountResponse, error) { + // Create auth clients from existing connections + authClients := make([]authtypes.QueryClient, len(c.conns)) + for i, conn := range c.conns { + authClients[i] = authtypes.NewQueryClient(conn) + } + + return retryWithRoundRobin( + len(authClients), + &c.rr, + func(idx int) (*authtypes.QueryAccountResponse, error) { + return authClients[idx].Account(ctx, &authtypes.QueryAccountRequest{ + Address: address, + }) + }, + "GetAccount", + c.logger, + ) } // CreateGRPCConnection creates a gRPC connection with appropriate transport security. -// It automatically detects whether to use TLS based on the URL scheme (https:// or http://). +// It automatically detects whether to use TLS based on the URL scheme. +// // The function handles: // - https:// URLs: Uses TLS with default credentials // - http:// or no scheme: Uses insecure connection // - Automatically adds default port 9090 if no port is specified // -// The endpoint is processed to remove the scheme prefix before dialing. +// Parameters: +// - endpoint: gRPC endpoint URL (scheme is optional, port defaults to 9090) +// +// Returns: +// - *grpc.ClientConn: gRPC client connection +// - error: Error if connection fails func CreateGRPCConnection(endpoint string) (*grpc.ClientConn, error) { if endpoint == "" { return nil, fmt.Errorf("empty endpoint provided") @@ -176,13 +501,40 @@ func CreateGRPCConnection(endpoint string) (*grpc.ClientConn, error) { return conn, nil } +// BroadcastTx broadcasts a signed transaction to the chain. +// It tries each endpoint in round-robin order until one succeeds. +// +// Parameters: +// - ctx: Context for the request +// - txBytes: Signed transaction bytes +// +// Returns: +// - *tx.BroadcastTxResponse: Broadcast response containing tx hash and result +// - error: Error if all endpoints fail +func (c *Client) BroadcastTx(ctx context.Context, txBytes []byte) (*tx.BroadcastTxResponse, error) { + return retryWithRoundRobin( + len(c.txClients), + &c.rr, + func(idx int) (*tx.BroadcastTxResponse, error) { + return c.txClients[idx].BroadcastTx(ctx, &tx.BroadcastTxRequest{ + TxBytes: txBytes, + Mode: tx.BroadcastMode_BROADCAST_MODE_SYNC, + }) + }, + "BroadcastTx", + c.logger, + ) +} + // ExtractHostnameFromURL extracts the hostname from a URL string. -// It handles various URL formats including: -// - Full URLs with scheme (https://example.com:443) -// - URLs without scheme (example.com:9090) -// - Plain hostnames (example.com) +// It handles various URL formats including full URLs with scheme, URLs without scheme, and plain hostnames. // -// The function returns just the hostname without port or scheme. +// Parameters: +// - grpcURL: URL string in any format (with or without scheme/port) +// +// Returns: +// - string: Hostname without port or scheme +// - error: Error if hostname cannot be extracted func ExtractHostnameFromURL(grpcURL string) (string, error) { if grpcURL == "" { return "", fmt.Errorf("empty URL provided") @@ -224,319 +576,3 @@ func ExtractHostnameFromURL(grpcURL string) (string, error) { return hostname, nil } - -// QueryGrantsWithRetry queries AuthZ grants for a grantee with retry logic -func QueryGrantsWithRetry(grpcURL, granteeAddr string, cdc *codec.ProtoCodec, log zerolog.Logger) (string, []string, error) { - // Simple retry: 15s, then 30s - timeouts := []time.Duration{15 * time.Second, 30 * time.Second} - - for attempt, timeout := range timeouts { - conn, err := CreateGRPCConnection(grpcURL) - if err != nil { - return "", nil, err - } - defer conn.Close() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - // Single gRPC call to get all grants - authzClient := authz.NewQueryClient(conn) - grantResp, err := authzClient.GranteeGrants(ctx, &authz.QueryGranteeGrantsRequest{ - Grantee: granteeAddr, - }) - - if err == nil { - // Process the grants - return processGrants(grantResp, granteeAddr, cdc) - } - - // On timeout, retry with longer timeout - if ctx.Err() == context.DeadlineExceeded && attempt < len(timeouts)-1 { - log.Warn(). - Int("attempt", attempt+1). - Dur("timeout", timeout). - Msg("Timeout querying grants, retrying...") - time.Sleep(2 * time.Second) - continue - } - - return "", nil, fmt.Errorf("failed to query grants: %w", err) - } - - return "", nil, fmt.Errorf("failed after all retries") -} - -// processGrants processes the AuthZ grant response -func processGrants(grantResp *authz.QueryGranteeGrantsResponse, granteeAddr string, cdc *codec.ProtoCodec) (string, []string, error) { - if len(grantResp.Grants) == 0 { - return "", nil, fmt.Errorf("no AuthZ grants found. Please grant permissions:\npuniversald tx authz grant %s generic --msg-type=/uexecutor.v1.MsgVoteInbound --from ", granteeAddr) - } - - authorizedMessages := make(map[string]string) // msgType -> granter - var granter string - - // Check each grant for our required message types - for _, grant := range grantResp.Grants { - if grant.Authorization == nil { - continue - } - - // Only process GenericAuthorization - if grant.Authorization.TypeUrl != "/cosmos.authz.v1beta1.GenericAuthorization" { - continue - } - - msgType, err := extractMessageType(grant.Authorization, cdc) - if err != nil { - continue // Skip if we can't extract the message type - } - - // Check if this is a required message - for _, requiredMsg := range constant.SupportedMessages { - if msgType == requiredMsg { - // Check if grant is not expired - if grant.Expiration != nil && grant.Expiration.Before(time.Now()) { - continue // Skip expired grants - } - - authorizedMessages[msgType] = grant.Granter - if granter == "" { - granter = grant.Granter - } - break - } - } - } - - // Check if all required messages are authorized - var missingMessages []string - for _, requiredMsg := range constant.SupportedMessages { - if _, ok := authorizedMessages[requiredMsg]; !ok { - missingMessages = append(missingMessages, requiredMsg) - } - } - - if len(missingMessages) > 0 { - return "", nil, fmt.Errorf("missing AuthZ grants for: %v\nGrant permissions using:\npuniversald tx authz grant %s generic --msg-type= --from ", missingMessages, granteeAddr) - } - - // Return authorized messages - authorizedList := make([]string, 0, len(authorizedMessages)) - for msgType := range authorizedMessages { - authorizedList = append(authorizedList, msgType) - } - - return granter, authorizedList, nil -} - -// extractMessageType extracts the message type from a GenericAuthorization -func extractMessageType(authzAny *codectypes.Any, cdc *codec.ProtoCodec) (string, error) { - var genericAuth authz.GenericAuthorization - if err := cdc.Unmarshal(authzAny.Value, &genericAuth); err != nil { - return "", err - } - return genericAuth.Msg, nil -} - -// GetLatestBlockNum returns the latest block number from Push Chain. -// It tries each endpoint in round-robin order until one succeeds. -func (c *Client) GetLatestBlockNum() (uint64, error) { - if len(c.cmtClients) == 0 { - return 0, errors.New("pushcore: no endpoints configured") - } - - start := int(atomic.AddUint32(&c.rr, 1)-1) % len(c.cmtClients) - - var lastErr error - for i := 0; i < len(c.cmtClients); i++ { - idx := (start + i) % len(c.cmtClients) - client := c.cmtClients[idx] - - resp, err := client.GetLatestBlock(context.Background(), &cmtservice.GetLatestBlockRequest{}) - if err == nil && resp.SdkBlock != nil { - return uint64(resp.SdkBlock.Header.Height), nil - } - - lastErr = err - c.logger.Debug(). - Int("attempt", i+1). - Int("endpoint_index", idx). - Err(err). - Msg("GetLatestBlockNum failed; trying next endpoint") - } - - return 0, fmt.Errorf("pushcore: GetLatestBlockNum failed on all %d endpoints: %w", len(c.cmtClients), lastErr) -} - -// GetUniversalValidators returns all universal validators from Push Chain. -// It tries each endpoint in round-robin order until one succeeds. -func (c *Client) GetUniversalValidators() ([]*uvalidatortypes.UniversalValidator, error) { - if len(c.uvalidatorClients) == 0 { - return nil, errors.New("pushcore: no endpoints configured") - } - - start := int(atomic.AddUint32(&c.rr, 1)-1) % len(c.uvalidatorClients) - - var lastErr error - for i := 0; i < len(c.uvalidatorClients); i++ { - idx := (start + i) % len(c.uvalidatorClients) - client := c.uvalidatorClients[idx] - - resp, err := client.AllUniversalValidators(context.Background(), &uvalidatortypes.QueryUniversalValidatorsSetRequest{}) - if err == nil { - return resp.UniversalValidator, nil - } - - lastErr = err - c.logger.Debug(). - Int("attempt", i+1). - Int("endpoint_index", idx). - Err(err). - Msg("GetUniversalValidators failed; trying next endpoint") - } - - return nil, fmt.Errorf("pushcore: GetUniversalValidators failed on all %d endpoints: %w", len(c.uvalidatorClients), lastErr) -} - -// GetCurrentTSSKeyId returns the current TSS key ID from Push Chain. -// It tries each endpoint in round-robin order until one succeeds. -// Returns empty string if no key exists. -func (c *Client) GetCurrentTSSKeyId() (string, error) { - if len(c.utssClients) == 0 { - return "", errors.New("pushcore: no endpoints configured") - } - - start := int(atomic.AddUint32(&c.rr, 1)-1) % len(c.utssClients) - - var lastErr error - for i := 0; i < len(c.utssClients); i++ { - idx := (start + i) % len(c.utssClients) - client := c.utssClients[idx] - - resp, err := client.CurrentKey(context.Background(), &utsstypes.QueryCurrentKeyRequest{}) - if err == nil { - if resp.Key != nil { - return resp.Key.KeyId, nil - } - return "", nil // No key exists - } - - lastErr = err - c.logger.Debug(). - Int("attempt", i+1). - Int("endpoint_index", idx). - Err(err). - Msg("GetCurrentTSSKeyId failed; trying next endpoint") - } - - return "", fmt.Errorf("pushcore: GetCurrentTSSKeyId failed on all %d endpoints: %w", len(c.utssClients), lastErr) -} - -// TxResult represents a transaction result with its events. -type TxResult struct { - TxHash string - Height int64 - TxResponse *tx.GetTxResponse -} - -// GetTxsByEvents queries transactions matching the given event query. -// The query should follow Cosmos SDK event query format, e.g., "tss_process_initiated.process_id EXISTS" -// minHeight and maxHeight can be used to filter by block range (0 means no limit). -func (c *Client) GetTxsByEvents(eventQuery string, minHeight, maxHeight uint64, limit uint64) ([]*TxResult, error) { - if len(c.txClients) == 0 { - return nil, errors.New("pushcore: no endpoints configured") - } - - start := int(atomic.AddUint32(&c.rr, 1)-1) % len(c.txClients) - - var lastErr error - for i := 0; i < len(c.txClients); i++ { - idx := (start + i) % len(c.txClients) - client := c.txClients[idx] - - // Build the query events - events := []string{eventQuery} - - // Add height range filters if specified - if minHeight > 0 { - events = append(events, fmt.Sprintf("tx.height>=%d", minHeight)) - } - if maxHeight > 0 { - events = append(events, fmt.Sprintf("tx.height<=%d", maxHeight)) - } - - // Set pagination limit - pageLimit := limit - if pageLimit == 0 { - pageLimit = 100 // default limit - } - - // Join events with AND to create query string (SDK v0.50+ uses Query field) - queryString := strings.Join(events, " AND ") - - req := &tx.GetTxsEventRequest{ - Query: queryString, - Pagination: &query.PageRequest{ - Limit: pageLimit, - }, - OrderBy: tx.OrderBy_ORDER_BY_ASC, - } - - resp, err := client.GetTxsEvent(context.Background(), req) - if err == nil { - results := make([]*TxResult, 0, len(resp.TxResponses)) - for _, txResp := range resp.TxResponses { - results = append(results, &TxResult{ - TxHash: txResp.TxHash, - Height: txResp.Height, - TxResponse: &tx.GetTxResponse{ - Tx: resp.Txs[len(results)], - TxResponse: txResp, - }, - }) - } - return results, nil - } - - lastErr = err - c.logger.Debug(). - Int("attempt", i+1). - Int("endpoint_index", idx). - Err(err). - Msg("GetTxsByEvents failed; trying next endpoint") - } - - return nil, fmt.Errorf("pushcore: GetTxsByEvents failed on all %d endpoints: %w", len(c.txClients), lastErr) -} - -// GetBlockByHeight returns block information for a specific height. -func (c *Client) GetBlockByHeight(height int64) (*cmtservice.GetBlockByHeightResponse, error) { - if len(c.cmtClients) == 0 { - return nil, errors.New("pushcore: no endpoints configured") - } - - start := int(atomic.AddUint32(&c.rr, 1)-1) % len(c.cmtClients) - - var lastErr error - for i := 0; i < len(c.cmtClients); i++ { - idx := (start + i) % len(c.cmtClients) - client := c.cmtClients[idx] - - resp, err := client.GetBlockByHeight(context.Background(), &cmtservice.GetBlockByHeightRequest{ - Height: height, - }) - if err == nil { - return resp, nil - } - - lastErr = err - c.logger.Debug(). - Int("attempt", i+1). - Int("endpoint_index", idx). - Err(err). - Msg("GetBlockByHeight failed; trying next endpoint") - } - - return nil, fmt.Errorf("pushcore: GetBlockByHeight failed on all %d endpoints: %w", len(c.cmtClients), lastErr) -} diff --git a/universalClient/pushcore/pushCore_test.go b/universalClient/pushcore/pushCore_test.go index fcf43a6a..abff6232 100644 --- a/universalClient/pushcore/pushCore_test.go +++ b/universalClient/pushcore/pushCore_test.go @@ -2,9 +2,17 @@ package pushcore import ( "context" + "math/big" "testing" + cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" + sdktypes "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/tx" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" uregistrytypes "github.com/pushchain/push-chain-node/x/uregistry/types" + utsstypes "github.com/pushchain/push-chain-node/x/utss/types" + uvalidatortypes "github.com/pushchain/push-chain-node/x/uvalidator/types" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -33,9 +41,8 @@ func TestNew(t *testing.T) { errMsg: "at least one gRPC URL is required", }, { - name: "valid URL without port", - urls: []string{"localhost"}, - // Should succeed as CreateGRPCConnection adds default port + name: "valid URL without port", + urls: []string{"localhost"}, wantErr: false, }, { @@ -59,10 +66,9 @@ func TestNew(t *testing.T) { wantErr: false, }, { - name: "mix of valid and invalid URLs", - urls: []string{"localhost:9090", "invalid-url-that-will-fail:99999", "localhost:9091"}, - // Should still succeed as long as at least one connection works - wantErr: false, + name: "mix of valid and invalid URLs", + urls: []string{"localhost:9090", "invalid-url-that-will-fail:99999", "localhost:9091"}, + wantErr: false, // Should succeed if at least one works }, } @@ -75,15 +81,14 @@ func TestNew(t *testing.T) { assert.Contains(t, err.Error(), tt.errMsg) assert.Nil(t, client) } else { - // Note: The connections might fail in test environment, but we're testing the logic - // The function might still return an error if ALL connections fail + // In test environment, connections might fail if err != nil { - // Check if it's because all connections failed + // If all connections failed, that's expected in test env assert.Contains(t, err.Error(), "all dials failed") + assert.Nil(t, client) } else { require.NotNil(t, client) assert.NotNil(t, client.logger) - // Clean up _ = client.Close() } } @@ -97,24 +102,20 @@ func TestClient_Close(t *testing.T) { t.Run("close with no connections", func(t *testing.T) { client := &Client{ logger: logger, - eps: nil, conns: nil, } err := client.Close() assert.NoError(t, err) assert.Nil(t, client.conns) - assert.Nil(t, client.eps) }) - t.Run("close with mock connections", func(t *testing.T) { - // Create a client with a valid connection + t.Run("close with connections", func(t *testing.T) { client, err := New([]string{"localhost:9090"}, logger) if err != nil { - // If we can't create a connection (common in test env), create a mock client + // If connection fails, create a mock client client = &Client{ logger: logger, - eps: []uregistrytypes.QueryClient{}, conns: []*grpc.ClientConn{}, } } @@ -122,7 +123,6 @@ func TestClient_Close(t *testing.T) { err = client.Close() assert.NoError(t, err) assert.Nil(t, client.conns) - assert.Nil(t, client.eps) }) } @@ -142,339 +142,618 @@ func TestClient_GetAllChainConfigs(t *testing.T) { assert.Nil(t, configs) }) - // Skip round-robin test as we can't mock the interface easily without nil pointers - // The actual round-robin logic is simple enough and tested by the error message count + t.Run("successful query with mock", func(t *testing.T) { + mockClient := &mockRegistryQueryClient{ + allChainConfigsResp: &uregistrytypes.QueryAllChainConfigsResponse{ + Configs: []*uregistrytypes.ChainConfig{ + {Chain: "eip155:1"}, + {Chain: "eip155:84532"}, + }, + }, + } + + client := &Client{ + logger: logger, + eps: []uregistrytypes.QueryClient{mockClient}, + } + + configs, err := client.GetAllChainConfigs(ctx) + require.NoError(t, err) + require.Len(t, configs, 2) + assert.Equal(t, "eip155:1", configs[0].Chain) + }) + + t.Run("round robin failover", func(t *testing.T) { + failingClient := &mockRegistryQueryClient{err: assert.AnError} + successClient := &mockRegistryQueryClient{ + allChainConfigsResp: &uregistrytypes.QueryAllChainConfigsResponse{ + Configs: []*uregistrytypes.ChainConfig{ + {Chain: "eip155:1"}, + }, + }, + } + + client := &Client{ + logger: logger, + eps: []uregistrytypes.QueryClient{failingClient, successClient}, + } + + configs, err := client.GetAllChainConfigs(ctx) + require.NoError(t, err) + require.Len(t, configs, 1) + }) + + t.Run("all endpoints fail", func(t *testing.T) { + client := &Client{ + logger: logger, + eps: []uregistrytypes.QueryClient{ + &mockRegistryQueryClient{err: assert.AnError}, + &mockRegistryQueryClient{err: assert.AnError}, + }, + } + + configs, err := client.GetAllChainConfigs(ctx) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed on all 2 endpoints") + assert.Nil(t, configs) + }) } -// Removed TestClient_RoundRobinCounter as it would require nil pointer handling +func TestClient_GetLatestBlock(t *testing.T) { + logger := zerolog.Nop() + + t.Run("no endpoints configured", func(t *testing.T) { + client := &Client{ + logger: logger, + cmtClients: []cmtservice.ServiceClient{}, + } + + blockNum, err := client.GetLatestBlock(context.Background()) + require.Error(t, err) + assert.Contains(t, err.Error(), "no endpoints configured") + assert.Equal(t, uint64(0), blockNum) + }) + + t.Run("successful query with mock", func(t *testing.T) { + mockClient := &mockCometBFTServiceClient{ + getLatestBlockResp: &cmtservice.GetLatestBlockResponse{ + SdkBlock: &cmtservice.Block{ + Header: cmtservice.Header{ + Height: 12345, + }, + }, + }, + } + + client := &Client{ + logger: logger, + cmtClients: []cmtservice.ServiceClient{mockClient}, + } + + blockNum, err := client.GetLatestBlock(context.Background()) + require.NoError(t, err) + assert.Equal(t, uint64(12345), blockNum) + }) -func TestNew_ErrorHandling(t *testing.T) { + t.Run("nil SdkBlock error", func(t *testing.T) { + mockClient := &mockCometBFTServiceClient{ + getLatestBlockResp: &cmtservice.GetLatestBlockResponse{ + SdkBlock: nil, + }, + } + + client := &Client{ + logger: logger, + cmtClients: []cmtservice.ServiceClient{mockClient}, + } + + blockNum, err := client.GetLatestBlock(context.Background()) + require.Error(t, err) + assert.Contains(t, err.Error(), "SdkBlock is nil") + assert.Equal(t, uint64(0), blockNum) + }) +} + +func TestClient_GetAllUniversalValidators(t *testing.T) { logger := zerolog.Nop() - t.Run("all connections fail", func(t *testing.T) { - // Use URLs that will definitely fail to connect - urls := []string{ - "invalid-host-that-doesnt-exist:99999", - "another-invalid-host:88888", + t.Run("no endpoints configured", func(t *testing.T) { + client := &Client{ + logger: logger, + uvalidatorClients: []uvalidatortypes.QueryClient{}, } - client, err := New(urls, logger) + validators, err := client.GetAllUniversalValidators(context.Background()) + require.Error(t, err) + assert.Contains(t, err.Error(), "no endpoints configured") + assert.Nil(t, validators) + }) - // Should get an error about all dials failing - if err != nil { - assert.Contains(t, err.Error(), "all dials failed") - assert.Contains(t, err.Error(), "2 urls") // Should mention the number of URLs tried - assert.Nil(t, client) - } else { - // If somehow it succeeded, make sure to clean up - require.NotNil(t, client) - _ = client.Close() + t.Run("successful query with mock", func(t *testing.T) { + mockClient := &mockUValidatorQueryClient{ + allUniversalValidatorsResp: &uvalidatortypes.QueryUniversalValidatorsSetResponse{ + UniversalValidator: []*uvalidatortypes.UniversalValidator{ + {}, + {}, + }, + }, + } + + client := &Client{ + logger: logger, + uvalidatorClients: []uvalidatortypes.QueryClient{mockClient}, + } + + validators, err := client.GetAllUniversalValidators(context.Background()) + require.NoError(t, err) + require.Len(t, validators, 2) + }) +} + +func TestClient_GetCurrentKey(t *testing.T) { + logger := zerolog.Nop() + + t.Run("no endpoints configured", func(t *testing.T) { + client := &Client{ + logger: logger, + utssClients: []utsstypes.QueryClient{}, } + + key, err := client.GetCurrentKey(context.Background()) + require.Error(t, err) + assert.Contains(t, err.Error(), "no endpoints configured") + assert.Nil(t, key) }) - t.Run("partial connection success", func(t *testing.T) { - // Mix of potentially valid and definitely invalid URLs - urls := []string{ - "localhost:9090", // Might work - "invalid-host-that-doesnt-exist:99999", // Will fail + t.Run("successful query with key", func(t *testing.T) { + mockClient := &mockUTSSQueryClient{ + currentKeyResp: &utsstypes.QueryCurrentKeyResponse{ + Key: &utsstypes.TssKey{ + KeyId: "key-123", + }, + }, } - client, err := New(urls, logger) + client := &Client{ + logger: logger, + utssClients: []utsstypes.QueryClient{mockClient}, + } - // This should succeed if at least one connection works - // or fail if all connections fail - if err != nil { - assert.Contains(t, err.Error(), "all dials failed") - } else { - require.NotNil(t, client) - _ = client.Close() + key, err := client.GetCurrentKey(context.Background()) + require.NoError(t, err) + require.NotNil(t, key) + assert.Equal(t, "key-123", key.KeyId) + }) + + t.Run("no key exists (nil key)", func(t *testing.T) { + mockClient := &mockUTSSQueryClient{ + currentKeyResp: &utsstypes.QueryCurrentKeyResponse{ + Key: nil, + }, + } + + client := &Client{ + logger: logger, + utssClients: []utsstypes.QueryClient{mockClient}, } + + key, err := client.GetCurrentKey(context.Background()) + require.Error(t, err) + assert.Contains(t, err.Error(), "no TSS key found") + assert.Nil(t, key) }) } -// Removed TestClient_GetAllChainConfigs_ErrorPropagation as it would require nil pointer handling +func TestClient_GetTxsByEvents(t *testing.T) { + logger := zerolog.Nop() -func TestCreateGRPCConnection(t *testing.T) { - tests := []struct { - name string - endpoint string - wantErr bool - errorContains string - }{ - { - name: "empty endpoint", - endpoint: "", - wantErr: true, - errorContains: "empty endpoint", - }, - { - name: "http endpoint without port", - endpoint: "http://localhost", - wantErr: false, - }, - { - name: "https endpoint without port", - endpoint: "https://localhost", - wantErr: false, - }, - { - name: "http endpoint with port", - endpoint: "http://localhost:9090", - wantErr: false, - }, - { - name: "https endpoint with port", - endpoint: "https://localhost:9090", - wantErr: false, - }, - { - name: "endpoint without scheme and without port", - endpoint: "localhost", - wantErr: false, - }, - { - name: "endpoint without scheme but with port", - endpoint: "localhost:9090", - wantErr: false, - }, - { - name: "endpoint with custom port", - endpoint: "localhost:8080", - wantErr: false, - }, - { - name: "endpoint with invalid port format", - endpoint: "localhost:", - wantErr: false, // Should add default port - }, - { - name: "endpoint with path after colon", - endpoint: "http://localhost:/path", - wantErr: false, // Should add default port - }, - } + t.Run("no endpoints configured", func(t *testing.T) { + client := &Client{ + logger: logger, + txClients: []tx.ServiceClient{}, + } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - conn, err := CreateGRPCConnection(tt.endpoint) + txs, err := client.GetTxsByEvents(context.Background(), "test.event", 0, 0, 0) + require.Error(t, err) + assert.Contains(t, err.Error(), "no endpoints configured") + assert.Nil(t, txs) + }) - if tt.wantErr { - require.Error(t, err) - if tt.errorContains != "" { - assert.Contains(t, err.Error(), tt.errorContains) - } - assert.Nil(t, conn) - } else { - require.NoError(t, err) - require.NotNil(t, conn) - // Clean up connection - err = conn.Close() - assert.NoError(t, err) - } - }) + t.Run("successful query with mock", func(t *testing.T) { + mockClient := &mockTxServiceClient{ + getTxsEventResp: &tx.GetTxsEventResponse{ + Txs: []*tx.Tx{ + {Body: &tx.TxBody{}}, + }, + TxResponses: []*sdktypes.TxResponse{ + { + Height: 100, + TxHash: "0x123", + }, + }, + }, + } + + client := &Client{ + logger: logger, + txClients: []tx.ServiceClient{mockClient}, + } + + txs, err := client.GetTxsByEvents(context.Background(), "test.event", 0, 0, 0) + require.NoError(t, err) + require.Len(t, txs, 1) + assert.Equal(t, "0x123", txs[0].TxHash) + assert.Equal(t, int64(100), txs[0].Height) + }) + + t.Run("with height filters", func(t *testing.T) { + mockClient := &mockTxServiceClient{ + getTxsEventResp: &tx.GetTxsEventResponse{ + Txs: []*tx.Tx{}, + TxResponses: []*sdktypes.TxResponse{}, + }, + } + + client := &Client{ + logger: logger, + txClients: []tx.ServiceClient{mockClient}, + } + + txs, err := client.GetTxsByEvents(context.Background(), "test.event", 100, 200, 50) + require.NoError(t, err) + assert.NotNil(t, txs) + }) +} + +func TestClient_GetGasPrice(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + t.Run("no endpoints configured", func(t *testing.T) { + client := &Client{ + logger: logger, + uexecutorClients: []uexecutortypes.QueryClient{}, + } + + price, err := client.GetGasPrice(ctx, "eip155:84532") + require.Error(t, err) + assert.Contains(t, err.Error(), "no endpoints configured") + assert.Nil(t, price) + }) + + t.Run("empty chainID", func(t *testing.T) { + client := &Client{ + logger: logger, + uexecutorClients: []uexecutortypes.QueryClient{&mockUExecutorQueryClient{}}, + } + + price, err := client.GetGasPrice(ctx, "") + require.Error(t, err) + assert.Contains(t, err.Error(), "chainID is required") + assert.Nil(t, price) + }) + + t.Run("successful gas price retrieval", func(t *testing.T) { + mockClient := &mockUExecutorQueryClient{ + gasPriceResp: &uexecutortypes.QueryGasPriceResponse{ + GasPrice: &uexecutortypes.GasPrice{ + ObservedChainId: "eip155:84532", + Signers: []string{"validator1", "validator2", "validator3"}, + Prices: []uint64{1000000000, 2000000000, 3000000000}, + BlockNums: []uint64{100, 101, 102}, + MedianIndex: 1, // Median is 2 gwei (index 1) + }, + }, + } + + client := &Client{ + logger: logger, + uexecutorClients: []uexecutortypes.QueryClient{mockClient}, + } + + price, err := client.GetGasPrice(ctx, "eip155:84532") + require.NoError(t, err) + require.NotNil(t, price) + assert.Equal(t, big.NewInt(2000000000), price) + }) + + t.Run("single validator price", func(t *testing.T) { + mockClient := &mockUExecutorQueryClient{ + gasPriceResp: &uexecutortypes.QueryGasPriceResponse{ + GasPrice: &uexecutortypes.GasPrice{ + ObservedChainId: "eip155:1", + Signers: []string{"validator1"}, + Prices: []uint64{5000000000}, + BlockNums: []uint64{100}, + MedianIndex: 0, + }, + }, + } + + client := &Client{ + logger: logger, + uexecutorClients: []uexecutortypes.QueryClient{mockClient}, + } + + price, err := client.GetGasPrice(ctx, "eip155:1") + require.NoError(t, err) + assert.Equal(t, big.NewInt(5000000000), price) + }) + + t.Run("empty prices array", func(t *testing.T) { + mockClient := &mockUExecutorQueryClient{ + gasPriceResp: &uexecutortypes.QueryGasPriceResponse{ + GasPrice: &uexecutortypes.GasPrice{ + ObservedChainId: "eip155:84532", + Signers: []string{}, + Prices: []uint64{}, + BlockNums: []uint64{}, + MedianIndex: 0, + }, + }, + } + + client := &Client{ + logger: logger, + uexecutorClients: []uexecutortypes.QueryClient{mockClient}, + } + + price, err := client.GetGasPrice(ctx, "eip155:84532") + require.Error(t, err) + assert.Contains(t, err.Error(), "no gas prices available") + assert.Nil(t, price) + }) + + t.Run("median index out of bounds fallback", func(t *testing.T) { + mockClient := &mockUExecutorQueryClient{ + gasPriceResp: &uexecutortypes.QueryGasPriceResponse{ + GasPrice: &uexecutortypes.GasPrice{ + ObservedChainId: "eip155:84532", + Signers: []string{"validator1"}, + Prices: []uint64{1500000000}, + BlockNums: []uint64{100}, + MedianIndex: 99, // Out of bounds + }, + }, + } + + client := &Client{ + logger: logger, + uexecutorClients: []uexecutortypes.QueryClient{mockClient}, + } + + price, err := client.GetGasPrice(ctx, "eip155:84532") + require.NoError(t, err) + // Should fallback to first price + assert.Equal(t, big.NewInt(1500000000), price) + }) + + t.Run("round robin failover", func(t *testing.T) { + failingClient := &mockUExecutorQueryClient{err: assert.AnError} + successClient := &mockUExecutorQueryClient{ + gasPriceResp: &uexecutortypes.QueryGasPriceResponse{ + GasPrice: &uexecutortypes.GasPrice{ + ObservedChainId: "eip155:84532", + Prices: []uint64{1000000000}, + MedianIndex: 0, + }, + }, + } + + client := &Client{ + logger: logger, + uexecutorClients: []uexecutortypes.QueryClient{failingClient, successClient}, + } + + price, err := client.GetGasPrice(ctx, "eip155:84532") + require.NoError(t, err) + assert.Equal(t, big.NewInt(1000000000), price) + }) + + t.Run("all endpoints fail", func(t *testing.T) { + client := &Client{ + logger: logger, + uexecutorClients: []uexecutortypes.QueryClient{ + &mockUExecutorQueryClient{err: assert.AnError}, + &mockUExecutorQueryClient{err: assert.AnError}, + }, + } + + price, err := client.GetGasPrice(ctx, "eip155:84532") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed on all 2 endpoints") + assert.Nil(t, price) + }) +} + +func TestClient_GetGranteeGrants(t *testing.T) { + logger := zerolog.Nop() + + t.Run("no endpoints configured", func(t *testing.T) { + client := &Client{ + logger: logger, + conns: []*grpc.ClientConn{}, + } + + grants, err := client.GetGranteeGrants(context.Background(), "cosmos1abc...") + require.Error(t, err) + assert.Contains(t, err.Error(), "no endpoints configured") + assert.Nil(t, grants) + }) + + t.Run("successful query with mock", func(t *testing.T) { + // Note: This test requires actual gRPC connections, so we'll test the error case + // For a full mock test, we'd need to set up a gRPC server + client := &Client{ + logger: logger, + conns: []*grpc.ClientConn{}, + } + + grants, err := client.GetGranteeGrants(context.Background(), "cosmos1abc...") + require.Error(t, err) + assert.Contains(t, err.Error(), "no endpoints configured") + assert.Nil(t, grants) + }) +} + +func TestClient_GetAccount(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + t.Run("no endpoints configured", func(t *testing.T) { + client := &Client{ + logger: logger, + conns: []*grpc.ClientConn{}, + } + + account, err := client.GetAccount(ctx, "cosmos1abc123") + require.Error(t, err) + assert.Contains(t, err.Error(), "no endpoints configured") + assert.Nil(t, account) + }) + + t.Run("empty address", func(t *testing.T) { + client := &Client{ + logger: logger, + conns: []*grpc.ClientConn{}, + } + + account, err := client.GetAccount(ctx, "") + require.Error(t, err) + assert.Contains(t, err.Error(), "no endpoints configured") + assert.Nil(t, account) + }) +} + +// Mock implementations + +type mockRegistryQueryClient struct { + uregistrytypes.QueryClient + allChainConfigsResp *uregistrytypes.QueryAllChainConfigsResponse + err error +} + +func (m *mockRegistryQueryClient) AllChainConfigs(ctx context.Context, req *uregistrytypes.QueryAllChainConfigsRequest, opts ...grpc.CallOption) (*uregistrytypes.QueryAllChainConfigsResponse, error) { + if m.err != nil { + return nil, m.err } + return m.allChainConfigsResp, nil } -func TestCreateGRPCConnection_PortHandling(t *testing.T) { - tests := []struct { - name string - endpoint string - expectedContains string // What the processed endpoint should contain - }{ - { - name: "adds default port when missing", - endpoint: "localhost", - expectedContains: ":9090", - }, - { - name: "preserves existing port", - endpoint: "localhost:8080", - expectedContains: ":8080", - }, - { - name: "adds port to http endpoint", - endpoint: "http://localhost", - expectedContains: ":9090", - }, - { - name: "adds port to https endpoint", - endpoint: "https://localhost", - expectedContains: ":9090", - }, - { - name: "handles empty port", - endpoint: "localhost:", - expectedContains: ":9090", - }, +func (m *mockRegistryQueryClient) ChainConfig(ctx context.Context, req *uregistrytypes.QueryChainConfigRequest, opts ...grpc.CallOption) (*uregistrytypes.QueryChainConfigResponse, error) { + return nil, nil +} + +type mockCometBFTServiceClient struct { + cmtservice.ServiceClient + getLatestBlockResp *cmtservice.GetLatestBlockResponse + err error +} + +func (m *mockCometBFTServiceClient) GetLatestBlock(ctx context.Context, req *cmtservice.GetLatestBlockRequest, opts ...grpc.CallOption) (*cmtservice.GetLatestBlockResponse, error) { + if m.err != nil { + return nil, m.err } + return m.getLatestBlockResp, nil +} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - conn, err := CreateGRPCConnection(tt.endpoint) - require.NoError(t, err) - require.NotNil(t, conn) +func (m *mockCometBFTServiceClient) GetBlockByHeight(ctx context.Context, req *cmtservice.GetBlockByHeightRequest, opts ...grpc.CallOption) (*cmtservice.GetBlockByHeightResponse, error) { + return nil, nil +} - // Get the target from the connection - target := conn.Target() - assert.Contains(t, target, tt.expectedContains, "Expected target to contain %s, got %s", tt.expectedContains, target) +type mockUValidatorQueryClient struct { + uvalidatortypes.QueryClient + allUniversalValidatorsResp *uvalidatortypes.QueryUniversalValidatorsSetResponse + err error +} - // Clean up - err = conn.Close() - assert.NoError(t, err) - }) +func (m *mockUValidatorQueryClient) AllUniversalValidators(ctx context.Context, req *uvalidatortypes.QueryUniversalValidatorsSetRequest, opts ...grpc.CallOption) (*uvalidatortypes.QueryUniversalValidatorsSetResponse, error) { + if m.err != nil { + return nil, m.err } + return m.allUniversalValidatorsResp, nil } -func TestCreateGRPCConnection_TLSHandling(t *testing.T) { - tests := []struct { - name string - endpoint string - // Note: We can't easily test if TLS is actually enabled without attempting a real connection - // But we can verify the function doesn't error for different schemes - }{ - { - name: "https should not error", - endpoint: "https://localhost:9090", - }, - { - name: "http should not error", - endpoint: "http://localhost:9090", - }, - { - name: "no scheme should not error", - endpoint: "localhost:9090", - }, +func (m *mockUValidatorQueryClient) UniversalValidator(ctx context.Context, req *uvalidatortypes.QueryUniversalValidatorRequest, opts ...grpc.CallOption) (*uvalidatortypes.QueryUniversalValidatorResponse, error) { + return nil, nil +} + +type mockUTSSQueryClient struct { + utsstypes.QueryClient + currentKeyResp *utsstypes.QueryCurrentKeyResponse + err error +} + +func (m *mockUTSSQueryClient) CurrentKey(ctx context.Context, req *utsstypes.QueryCurrentKeyRequest, opts ...grpc.CallOption) (*utsstypes.QueryCurrentKeyResponse, error) { + if m.err != nil { + return nil, m.err } + return m.currentKeyResp, nil +} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - conn, err := CreateGRPCConnection(tt.endpoint) - require.NoError(t, err) - require.NotNil(t, conn) +func (m *mockUTSSQueryClient) KeyById(ctx context.Context, req *utsstypes.QueryKeyByIdRequest, opts ...grpc.CallOption) (*utsstypes.QueryKeyByIdResponse, error) { + return nil, nil +} - // Clean up - err = conn.Close() - assert.NoError(t, err) - }) +type mockTxServiceClient struct { + tx.ServiceClient + getTxsEventResp *tx.GetTxsEventResponse + err error +} + +func (m *mockTxServiceClient) GetTxsEvent(ctx context.Context, req *tx.GetTxsEventRequest, opts ...grpc.CallOption) (*tx.GetTxsEventResponse, error) { + if m.err != nil { + return nil, m.err } + return m.getTxsEventResp, nil } -func TestExtractHostnameFromURL(t *testing.T) { - tests := []struct { - name string - url string - expectedHostname string - wantErr bool - errorContains string - }{ - { - name: "https URL with port", - url: "https://grpc.example.com:443", - expectedHostname: "grpc.example.com", - wantErr: false, - }, - { - name: "https URL without port", - url: "https://grpc.example.com", - expectedHostname: "grpc.example.com", - wantErr: false, - }, - { - name: "http URL with port", - url: "http://localhost:9090", - expectedHostname: "localhost", - wantErr: false, - }, - { - name: "http URL without port", - url: "http://api.test.com", - expectedHostname: "api.test.com", - wantErr: false, - }, - { - name: "plain hostname without port", - url: "example.com", - expectedHostname: "example.com", - wantErr: false, - }, - { - name: "plain hostname with port", - url: "example.com:8080", - expectedHostname: "example.com", - wantErr: false, - }, - { - name: "localhost without port", - url: "localhost", - expectedHostname: "localhost", - wantErr: false, - }, - { - name: "localhost with port", - url: "localhost:9090", - expectedHostname: "localhost", - wantErr: false, - }, - { - name: "complex subdomain", - url: "https://grpc.rpc-testnet-donut-node1.push.org:443", - expectedHostname: "grpc.rpc-testnet-donut-node1.push.org", - wantErr: false, - }, - { - name: "URL with path", - url: "https://example.com:443/some/path", - expectedHostname: "example.com", - wantErr: false, - }, - { - name: "empty URL", - url: "", - expectedHostname: "", - wantErr: true, - errorContains: "empty URL provided", - }, - { - name: "URL with only scheme", - url: "https://", - expectedHostname: "", - wantErr: true, - errorContains: "could not extract hostname", - }, - { - name: "URL with only port", - url: ":9090", - expectedHostname: "", - wantErr: true, - errorContains: "could not extract hostname", - }, - { - name: "IPv4 address", - url: "192.168.1.1:9090", - expectedHostname: "192.168.1.1", - wantErr: false, - }, - { - name: "IPv4 address with scheme", - url: "http://192.168.1.1:9090", - expectedHostname: "192.168.1.1", - wantErr: false, - }, +func (m *mockTxServiceClient) GetTx(ctx context.Context, req *tx.GetTxRequest, opts ...grpc.CallOption) (*tx.GetTxResponse, error) { + return nil, nil +} + +type mockUExecutorQueryClient struct { + uexecutortypes.QueryClient + gasPriceResp *uexecutortypes.QueryGasPriceResponse + err error +} + +func (m *mockUExecutorQueryClient) GasPrice(ctx context.Context, req *uexecutortypes.QueryGasPriceRequest, opts ...grpc.CallOption) (*uexecutortypes.QueryGasPriceResponse, error) { + if m.err != nil { + return nil, m.err } + return m.gasPriceResp, nil +} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - hostname, err := ExtractHostnameFromURL(tt.url) +func (m *mockUExecutorQueryClient) Params(ctx context.Context, req *uexecutortypes.QueryParamsRequest, opts ...grpc.CallOption) (*uexecutortypes.QueryParamsResponse, error) { + return nil, nil +} - if tt.wantErr { - require.Error(t, err) - if tt.errorContains != "" { - assert.Contains(t, err.Error(), tt.errorContains) - } - } else { - require.NoError(t, err) - assert.Equal(t, tt.expectedHostname, hostname) - } - }) +func (m *mockUExecutorQueryClient) AllPendingInbounds(ctx context.Context, req *uexecutortypes.QueryAllPendingInboundsRequest, opts ...grpc.CallOption) (*uexecutortypes.QueryAllPendingInboundsResponse, error) { + return nil, nil +} + +func (m *mockUExecutorQueryClient) GetUniversalTx(ctx context.Context, req *uexecutortypes.QueryGetUniversalTxRequest, opts ...grpc.CallOption) (*uexecutortypes.QueryGetUniversalTxResponse, error) { + return nil, nil +} + +func (m *mockUExecutorQueryClient) AllUniversalTx(ctx context.Context, req *uexecutortypes.QueryAllUniversalTxRequest, opts ...grpc.CallOption) (*uexecutortypes.QueryAllUniversalTxResponse, error) { + return nil, nil +} + +func (m *mockUExecutorQueryClient) AllGasPrices(ctx context.Context, req *uexecutortypes.QueryAllGasPricesRequest, opts ...grpc.CallOption) (*uexecutortypes.QueryAllGasPricesResponse, error) { + return nil, nil +} + +type mockAuthQueryClient struct { + authtypes.QueryClient + accountResp *authtypes.QueryAccountResponse + err error +} + +func (m *mockAuthQueryClient) Account(ctx context.Context, req *authtypes.QueryAccountRequest, opts ...grpc.CallOption) (*authtypes.QueryAccountResponse, error) { + if m.err != nil { + return nil, m.err } -} \ No newline at end of file + return m.accountResp, nil +} diff --git a/universalClient/pushsigner/grant_verifier.go b/universalClient/pushsigner/grant_verifier.go new file mode 100644 index 00000000..ed764dca --- /dev/null +++ b/universalClient/pushsigner/grant_verifier.go @@ -0,0 +1,171 @@ +package pushsigner + +import ( + "context" + "fmt" + "io" + "slices" + "strings" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/x/authz" + + "github.com/pushchain/push-chain-node/universalClient/config" + "github.com/pushchain/push-chain-node/universalClient/constant" + "github.com/pushchain/push-chain-node/universalClient/pushcore" + keysv2 "github.com/pushchain/push-chain-node/universalClient/pushsigner/keys" + uetypes "github.com/pushchain/push-chain-node/x/uexecutor/types" +) + +// GrantInfo represents information about a single AuthZ grant. +type grantInfo struct { + Granter string + MessageType string + Expiration *time.Time +} + +// ValidationResult contains the validated hotkey information. +type validationResult struct { + Keyring keyring.Keyring + KeyName string + KeyAddr string + Granter string + Messages []string +} + +// ValidateKeysAndGrants validates hotkey and AuthZ grants against the specified granter. +func validateKeysAndGrants(keyringBackend config.KeyringBackend, keyringPassword string, nodeHome string, pushCore *pushcore.Client, granter string) (*validationResult, error) { + interfaceRegistry := keysv2.CreateInterfaceRegistryWithEVMSupport() + authz.RegisterInterfaces(interfaceRegistry) + uetypes.RegisterInterfaces(interfaceRegistry) + cdc := codec.NewProtoCodec(interfaceRegistry) + + // Prepare password reader for file backend + var reader io.Reader = nil + if keyringBackend == config.KeyringBackendFile { + if keyringPassword == "" { + return nil, fmt.Errorf("keyring_password is required for file backend") + } + // Keyring expects password twice, each followed by newline + passwordInput := fmt.Sprintf("%s\n%s\n", keyringPassword, keyringPassword) + reader = strings.NewReader(passwordInput) + } + + kr, err := keysv2.CreateKeyringFromConfig(nodeHome, reader, keyringBackend) + if err != nil { + return nil, fmt.Errorf("failed to create keyring: %w", err) + } + + keyInfos, err := kr.List() + if err != nil { + return nil, fmt.Errorf("failed to list keys: %w", err) + } + if len(keyInfos) == 0 { + return nil, fmt.Errorf("no keys found in keyring") + } + + keyInfo := keyInfos[0] + keyAddr, err := keyInfo.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get key address: %w", err) + } + keyAddrStr := keyAddr.String() + + grantResp, err := pushCore.GetGranteeGrants(context.Background(), keyAddrStr) + if err != nil { + return nil, fmt.Errorf("failed to query grants: %w", err) + } + + grants := extractGrantInfo(grantResp, cdc) + if len(grants) == 0 { + return nil, fmt.Errorf("no AuthZ grants found for %s", keyAddrStr) + } + + // Verify grants against the specified granter + authorizedMsgs, err := verifyGrants(grants, granter) + if err != nil { + return nil, err + } + + return &validationResult{ + Keyring: kr, + KeyName: keyInfo.Name, + KeyAddr: keyAddrStr, + Granter: granter, + Messages: authorizedMsgs, + }, nil +} + +// VerifyGrants verifies that all required messages are authorized by the specified granter. +func verifyGrants(grants []grantInfo, granter string) ([]string, error) { + now := time.Now() + authorized := make(map[string]bool) + + for _, grant := range grants { + // Skip expired grants + if grant.Expiration != nil && grant.Expiration.Before(now) { + continue + } + + // Only consider grants from the specified granter + if grant.Granter != granter { + continue + } + + // Check if this grant is for a required message type + if slices.Contains(constant.RequiredMsgGrants, grant.MessageType) { + authorized[grant.MessageType] = true + } + } + + // Verify all required grants are present + var missing []string + for _, req := range constant.RequiredMsgGrants { + if !authorized[req] { + missing = append(missing, req) + } + } + + if len(missing) > 0 { + return nil, fmt.Errorf("missing grants from granter %s: %v", granter, missing) + } + + // Return list of authorized message types + msgs := make([]string, 0, len(authorized)) + for m := range authorized { + msgs = append(msgs, m) + } + return msgs, nil +} + +// extractGrantInfo extracts grant info from response. +func extractGrantInfo(resp *authz.QueryGranteeGrantsResponse, cdc *codec.ProtoCodec) []grantInfo { + var grants []grantInfo + for _, grant := range resp.Grants { + if grant.Authorization == nil || grant.Authorization.TypeUrl != "/cosmos.authz.v1beta1.GenericAuthorization" { + continue + } + msgType, err := extractMessageType(grant.Authorization, cdc) + if err != nil { + continue + } + grants = append(grants, grantInfo{ + Granter: grant.Granter, + MessageType: msgType, + Expiration: grant.Expiration, + }) + } + return grants +} + +// extractMessageType extracts message type from GenericAuthorization. +func extractMessageType(any *codectypes.Any, cdc *codec.ProtoCodec) (string, error) { + var ga authz.GenericAuthorization + if err := cdc.Unmarshal(any.Value, &ga); err != nil { + return "", err + } + return ga.Msg, nil +} diff --git a/universalClient/keys/interfaces.go b/universalClient/pushsigner/keys/interfaces.go similarity index 56% rename from universalClient/keys/interfaces.go rename to universalClient/pushsigner/keys/interfaces.go index 2f8cec7f..eafad58d 100644 --- a/universalClient/keys/interfaces.go +++ b/universalClient/pushsigner/keys/interfaces.go @@ -1,7 +1,7 @@ -package keys +package keysv2 import ( - cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/crypto/keyring" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -11,24 +11,22 @@ type KeyringBackend string const ( // KeyringBackendTest is the test Cosmos keyring backend (unencrypted) KeyringBackendTest KeyringBackend = "test" - + // KeyringBackendFile is the file Cosmos keyring backend (encrypted) KeyringBackendFile KeyringBackend = "file" ) -// String returns the string representation of the keyring backend -func (kb KeyringBackend) String() string { - return string(kb) -} - // UniversalValidatorKeys defines the interface for key management in Universal Validator type UniversalValidatorKeys interface { // GetAddress returns the hot key address GetAddress() (sdk.AccAddress, error) - - // GetPrivateKey returns the hot key private key (requires password) - GetPrivateKey(password string) (cryptotypes.PrivKey, error) - - // GetHotkeyPassword returns the password for file backend - GetHotkeyPassword() string -} \ No newline at end of file + + // GetKeyName returns the name of the hot key in the keyring + GetKeyName() string + + // GetKeyring returns the underlying keyring for signing operations. + // It validates that the key exists before returning the keyring. + // For file backend, decryption happens automatically when signing via tx.Sign(). + // This allows signing without exposing the private key. + GetKeyring() (keyring.Keyring, error) +} diff --git a/universalClient/keys/keyring.go b/universalClient/pushsigner/keys/keyring.go similarity index 73% rename from universalClient/keys/keyring.go rename to universalClient/pushsigner/keys/keyring.go index b761b602..e37faaf0 100644 --- a/universalClient/keys/keyring.go +++ b/universalClient/pushsigner/keys/keyring.go @@ -1,4 +1,4 @@ -package keys +package keysv2 import ( "fmt" @@ -10,15 +10,15 @@ import ( codectypes "github.com/cosmos/cosmos-sdk/codec/types" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" "github.com/cosmos/cosmos-sdk/crypto/keyring" - "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" - evmhd "github.com/cosmos/evm/crypto/hd" - sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" - cosmosevmkeyring "github.com/cosmos/evm/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" evmcrypto "github.com/cosmos/evm/crypto/ethsecp256k1" + evmhd "github.com/cosmos/evm/crypto/hd" + cosmosevmkeyring "github.com/cosmos/evm/crypto/keyring" "github.com/rs/zerolog/log" - + "github.com/pushchain/push-chain-node/universalClient/config" ) @@ -28,33 +28,32 @@ type KeyringConfig struct { KeyringBackend KeyringBackend HotkeyName string HotkeyPassword string - OperatorAddr string } // GetKeyringKeybase creates and returns keyring and key info -func GetKeyringKeybase(config KeyringConfig) (keyring.Keyring, string, error) { +func GetKeyringKeybase(cfg KeyringConfig) (keyring.Keyring, string, error) { logger := log.Logger.With().Str("module", "GetKeyringKeybase").Logger() - - if len(config.HotkeyName) == 0 { + + if len(cfg.HotkeyName) == 0 { return nil, "", fmt.Errorf("hotkey name is empty") } - if len(config.HomeDir) == 0 { + if len(cfg.HomeDir) == 0 { return nil, "", fmt.Errorf("home directory is empty") } // Prepare password reader for file backend var reader io.Reader = strings.NewReader("") - if config.KeyringBackend == KeyringBackendFile { - if config.HotkeyPassword == "" { + if cfg.KeyringBackend == KeyringBackendFile { + if cfg.HotkeyPassword == "" { return nil, "", fmt.Errorf("password is required for file backend") } // Keyring expects password twice, each followed by newline - passwordInput := fmt.Sprintf("%s\n%s\n", config.HotkeyPassword, config.HotkeyPassword) + passwordInput := fmt.Sprintf("%s\n%s\n", cfg.HotkeyPassword, cfg.HotkeyPassword) reader = strings.NewReader(passwordInput) } - kb, err := CreateKeyring(config.HomeDir, reader, config.KeyringBackend) + kb, err := CreateKeyring(cfg.HomeDir, reader, cfg.KeyringBackend) if err != nil { return nil, "", fmt.Errorf("failed to get keybase: %w", err) } @@ -67,13 +66,13 @@ func GetKeyringKeybase(config KeyringConfig) (keyring.Keyring, string, error) { os.Stdin = nil logger.Debug(). - Msgf("Checking for Hotkey: %s \nFolder: %s\nBackend: %s", - config.HotkeyName, config.HomeDir, kb.Backend()) - - rc, err := kb.Key(config.HotkeyName) + Msgf("Checking for Hotkey: %s \nFolder: %s\nBackend: %s", + cfg.HotkeyName, cfg.HomeDir, kb.Backend()) + + rc, err := kb.Key(cfg.HotkeyName) if err != nil { - return nil, "", fmt.Errorf("key not present in backend %s with name (%s): %w", - kb.Backend(), config.HotkeyName, err) + return nil, "", fmt.Errorf("key not present in backend %s with name (%s): %w", + kb.Backend(), cfg.HotkeyName, err) } // Get public key in bech32 format @@ -85,48 +84,30 @@ func GetKeyringKeybase(config KeyringConfig) (keyring.Keyring, string, error) { return kb, pubkeyBech32, nil } -// CreateNewKey creates a new key in the keyring -func CreateNewKey(kr keyring.Keyring, name string, mnemonic string, passphrase string) (*keyring.Record, error) { - if mnemonic != "" { - // Import from mnemonic using EVM algorithm - return kr.NewAccount(name, mnemonic, passphrase, sdk.FullFundraiserPath, evmhd.EthSecp256k1) - } - - // Generate new key with mnemonic using EVM algorithm - record, _, err := kr.NewMnemonic(name, keyring.English, sdk.FullFundraiserPath, passphrase, evmhd.EthSecp256k1) - if err != nil { - return nil, fmt.Errorf("failed to generate new key with mnemonic: %w", err) - } - - // Return the newly created key record - return record, nil -} - -// CreateNewKeyWithMnemonic creates a new key in the keyring and returns both the record and generated mnemonic -func CreateNewKeyWithMnemonic(kr keyring.Keyring, name string, mnemonic string, passphrase string) (*keyring.Record, string, error) { +// CreateNewKey creates a new key in the keyring and returns the record and mnemonic. +// If mnemonic is provided, it imports the key; otherwise, it generates a new one. +// The returned mnemonic will be empty if importing from an existing mnemonic. +func CreateNewKey(kr keyring.Keyring, name string, mnemonic string, passphrase string) (*keyring.Record, string, error) { if mnemonic != "" { // Import from mnemonic using EVM algorithm record, err := kr.NewAccount(name, mnemonic, passphrase, sdk.FullFundraiserPath, evmhd.EthSecp256k1) return record, mnemonic, err } - + // Generate new key with mnemonic using EVM algorithm record, generatedMnemonic, err := kr.NewMnemonic(name, keyring.English, sdk.FullFundraiserPath, passphrase, evmhd.EthSecp256k1) if err != nil { return nil, "", fmt.Errorf("failed to generate new key with mnemonic: %w", err) } - - // Return the newly created key record and generated mnemonic + return record, generatedMnemonic, nil } - - // CreateInterfaceRegistryWithEVMSupport creates an interface registry with EVM-compatible key types func CreateInterfaceRegistryWithEVMSupport() codectypes.InterfaceRegistry { registry := codectypes.NewInterfaceRegistry() cryptocodec.RegisterInterfaces(registry) - + // Register all key types (both public and private) registry.RegisterImplementations((*cryptotypes.PubKey)(nil), &secp256k1.PubKey{}, @@ -138,7 +119,7 @@ func CreateInterfaceRegistryWithEVMSupport() codectypes.InterfaceRegistry { &ed25519.PrivKey{}, &evmcrypto.PrivKey{}, ) - + return registry } @@ -147,7 +128,7 @@ func CreateKeyring(homeDir string, reader io.Reader, keyringBackend KeyringBacke if len(homeDir) == 0 { return nil, fmt.Errorf("home directory is empty") } - + // Create codec with EVM-compatible key types directly registry := CreateInterfaceRegistryWithEVMSupport() cdc := codec.NewProtoCodec(registry) @@ -167,6 +148,22 @@ func CreateKeyring(homeDir string, reader io.Reader, keyringBackend KeyringBacke return keyring.New(sdk.KeyringServiceName(), backend, homeDir, reader, cdc, cosmosevmkeyring.Option()) } +// CreateKeyringFromConfig creates a keyring with EVM compatibility from config backend type +func CreateKeyringFromConfig(homeDir string, reader io.Reader, configBackend config.KeyringBackend) (keyring.Keyring, error) { + // Convert config types to keys types + var keysBackend KeyringBackend + switch configBackend { + case config.KeyringBackendFile: + keysBackend = KeyringBackendFile + case config.KeyringBackendTest: + keysBackend = KeyringBackendTest + default: + keysBackend = KeyringBackendTest + } + + return CreateKeyring(homeDir, reader, keysBackend) +} + // getPubkeyBech32FromRecord extracts bech32 public key from key record func getPubkeyBech32FromRecord(record *keyring.Record) (string, error) { pubkey, err := record.GetPubKey() @@ -174,32 +171,14 @@ func getPubkeyBech32FromRecord(record *keyring.Record) (string, error) { return "", fmt.Errorf("failed to get public key: %w", err) } - // For now, return the hex representation of the public key - // This can be improved later to use proper bech32 encoding + // Return hex representation of the public key with prefix return fmt.Sprintf("pushpub%x", pubkey.Bytes()), nil } // ValidateKeyExists checks if a key exists in the keyring -func ValidateKeyExists(keyring keyring.Keyring, keyName string) error { - _, err := keyring.Key(keyName) - if err != nil { +func ValidateKeyExists(kr keyring.Keyring, keyName string) error { + if _, err := kr.Key(keyName); err != nil { return fmt.Errorf("key %s not found: %w", keyName, err) } return nil } - -// CreateKeyringFromConfig creates a keyring with EVM compatibility from config backend type -func CreateKeyringFromConfig(homeDir string, reader io.Reader, configBackend config.KeyringBackend) (keyring.Keyring, error) { - // Convert config types to keys types - var keysBackend KeyringBackend - switch configBackend { - case config.KeyringBackendFile: - keysBackend = KeyringBackendFile - case config.KeyringBackendTest: - keysBackend = KeyringBackendTest - default: - keysBackend = KeyringBackendTest - } - - return CreateKeyring(homeDir, reader, keysBackend) -} \ No newline at end of file diff --git a/universalClient/keys/keyring_test.go b/universalClient/pushsigner/keys/keyring_test.go similarity index 90% rename from universalClient/keys/keyring_test.go rename to universalClient/pushsigner/keys/keyring_test.go index 675e91f1..8cc607f9 100644 --- a/universalClient/keys/keyring_test.go +++ b/universalClient/pushsigner/keys/keyring_test.go @@ -1,4 +1,4 @@ -package keys +package keysv2 import ( "os" @@ -44,7 +44,6 @@ func (suite *KeyringTestSuite) SetupTest() { KeyringBackend: KeyringBackendTest, HotkeyName: "test-key", HotkeyPassword: "", - OperatorAddr: "push1abc123def456", } // Create keyring with EVM compatibility using our standard CreateKeyring function @@ -73,7 +72,7 @@ func (suite *KeyringTestSuite) TestGetKeyringKeybase() { // TestGetKeyringKeybaseWithExistingKey tests keyring with existing key func (suite *KeyringTestSuite) TestGetKeyringKeybaseWithExistingKey() { // First create a key in the test keyring - _, err := CreateNewKey(suite.kb, "test-key", "", "") + _, _, err := CreateNewKey(suite.kb, "test-key", "", "") require.NoError(suite.T(), err) kb, record, err := GetKeyringKeybase(suite.config) @@ -86,7 +85,7 @@ func (suite *KeyringTestSuite) TestGetKeyringKeybaseWithExistingKey() { // TestCreateNewKey tests key creation func (suite *KeyringTestSuite) TestCreateNewKey() { - record, err := CreateNewKey(suite.kb, "new-test-key", "", "") + record, _, err := CreateNewKey(suite.kb, "new-test-key", "", "") require.NoError(suite.T(), err) assert.NotNil(suite.T(), record) @@ -102,11 +101,12 @@ func (suite *KeyringTestSuite) TestCreateNewKey() { func (suite *KeyringTestSuite) TestCreateNewKeyWithMnemonic() { mnemonic := "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about" - record, _, err := CreateNewKeyWithMnemonic(suite.kb, "mnemonic-key", mnemonic, "") + record, returnedMnemonic, err := CreateNewKey(suite.kb, "mnemonic-key", mnemonic, "") require.NoError(suite.T(), err) assert.NotNil(suite.T(), record) assert.Equal(suite.T(), "mnemonic-key", record.Name) + assert.Equal(suite.T(), mnemonic, returnedMnemonic) // Should return the provided mnemonic // Verify key was created retrievedRecord, err := suite.kb.Key("mnemonic-key") @@ -118,15 +118,12 @@ func (suite *KeyringTestSuite) TestCreateNewKeyWithMnemonic() { func (suite *KeyringTestSuite) TestCreateNewKeyWithInvalidMnemonic() { invalidMnemonic := "invalid mnemonic words" - _, _, err := CreateNewKeyWithMnemonic(suite.kb, "invalid-key", invalidMnemonic, "") + _, _, err := CreateNewKey(suite.kb, "invalid-key", invalidMnemonic, "") assert.Error(suite.T(), err) assert.Contains(suite.T(), err.Error(), "Invalid mnenomic") } - - - // TestGetKeybase tests keybase creation with different backends func (suite *KeyringTestSuite) TestGetKeybase() { // Test with test backend @@ -150,7 +147,7 @@ func (suite *KeyringTestSuite) TestGetKeybaseWithFileBackend() { // TestValidateKeyExists tests key existence validation func (suite *KeyringTestSuite) TestValidateKeyExists() { // Create a key first - _, err := CreateNewKey(suite.kb, "validation-test", "", "") + _, _, err := CreateNewKey(suite.kb, "validation-test", "", "") require.NoError(suite.T(), err) // Test existing key @@ -166,7 +163,7 @@ func (suite *KeyringTestSuite) TestValidateKeyExists() { // TestGetPubkeyBech32FromRecord tests public key extraction func (suite *KeyringTestSuite) TestGetPubkeyBech32FromRecord() { // Create a key - record, err := CreateNewKey(suite.kb, "pubkey-test", "", "") + record, _, err := CreateNewKey(suite.kb, "pubkey-test", "", "") require.NoError(suite.T(), err) // Get public key @@ -185,11 +182,10 @@ func (suite *KeyringTestSuite) TestKeyringConfigValidation() { KeyringBackend: KeyringBackendTest, HotkeyName: "test-key", HotkeyPassword: "", - OperatorAddr: "push1abc123def456", } // Create a key for this config to work - _, err := CreateNewKey(suite.kb, validConfig.HotkeyName, "", "") + _, _, err := CreateNewKey(suite.kb, validConfig.HotkeyName, "", "") require.NoError(suite.T(), err) // Test the validation indirectly through GetKeyringKeybase diff --git a/universalClient/pushsigner/keys/keys.go b/universalClient/pushsigner/keys/keys.go new file mode 100644 index 00000000..276021b2 --- /dev/null +++ b/universalClient/pushsigner/keys/keys.go @@ -0,0 +1,61 @@ +package keysv2 + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ UniversalValidatorKeys = &Keys{} + +// Keys manages all the keys used by Universal Validator +type Keys struct { + keyName string // Hot key name in keyring + keyring keyring.Keyring // Cosmos SDK keyring + hotkeyPassword string // Password for file backend +} + +// NewKeys creates a new instance of Keys +func NewKeys( + kr keyring.Keyring, + keyName string, + hotkeyPassword string, +) *Keys { + return &Keys{ + keyName: keyName, + keyring: kr, + hotkeyPassword: hotkeyPassword, + } +} + +// GetAddress returns the hot key address +func (k *Keys) GetAddress() (sdk.AccAddress, error) { + info, err := k.keyring.Key(k.keyName) + if err != nil { + return nil, fmt.Errorf("failed to get key %s: %w", k.keyName, err) + } + + addr, err := info.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get address from key info: %w", err) + } + + return addr, nil +} + +// GetKeyName returns the name of the hot key in the keyring +func (k *Keys) GetKeyName() string { + return k.keyName +} + +// GetKeyring returns the underlying keyring for signing operations. +// It validates that the key exists in the keyring before returning it. +// For file backend, the keyring handles decryption automatically when signing. +func (k *Keys) GetKeyring() (keyring.Keyring, error) { + // Validate that the key exists in the keyring + if _, err := k.keyring.Key(k.keyName); err != nil { + return nil, fmt.Errorf("key %s not found in keyring: %w", k.keyName, err) + } + return k.keyring, nil +} diff --git a/universalClient/keys/keys_test.go b/universalClient/pushsigner/keys/keys_test.go similarity index 73% rename from universalClient/keys/keys_test.go rename to universalClient/pushsigner/keys/keys_test.go index 096247cc..a6c165e9 100644 --- a/universalClient/keys/keys_test.go +++ b/universalClient/pushsigner/keys/keys_test.go @@ -1,7 +1,8 @@ -package keys +package keysv2 import ( "os" + "strings" "testing" sdk "github.com/cosmos/cosmos-sdk/types" @@ -20,7 +21,7 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } -func TestNewKeysWithKeybase(t *testing.T) { +func TestNewKeys(t *testing.T) { // Create temporary directory for test keyring tempDir, err := os.MkdirTemp("", "test-keyring") require.NoError(t, err) @@ -31,22 +32,18 @@ func TestNewKeysWithKeybase(t *testing.T) { require.NoError(t, err) // Create basic Keys instance - keys := &Keys{ - signerName: "test-hotkey", - kb: kb, - } + keys := NewKeys(kb, "test-hotkey", "") require.NotNil(t, keys) - require.Equal(t, "test-hotkey", keys.signerName) - require.NotNil(t, keys.kb) + require.Equal(t, "test-hotkey", keys.keyName) + require.NotNil(t, keys.keyring) // Test methods that should work without requiring actual key - assert.NotNil(t, keys.kb) - assert.Equal(t, "", keys.GetHotkeyPassword()) // Should be empty for test + assert.NotNil(t, keys.keyring) + // Password is not exposed - signing uses keyring directly + assert.Equal(t, "test-hotkey", keys.GetKeyName()) } - - func TestKeyringBackends(t *testing.T) { tests := []struct { name string @@ -90,31 +87,32 @@ func TestPasswordFailureScenarios(t *testing.T) { defer func() { _ = os.RemoveAll(tempDir) }() // Test with file backend requiring password - kb, err := CreateKeyring(tempDir, nil, KeyringBackendFile) + // For file backend, we need a password reader + passwordReader := strings.NewReader("testpass\ntestpass\n") + kb, err := CreateKeyring(tempDir, passwordReader, KeyringBackendFile) require.NoError(t, err) - keys := &Keys{ - signerName: "test-key", - kb: kb, - } + // Create a key first with password + _, _, err = CreateNewKey(kb, "test-key", "", "testpass") + require.NoError(t, err) - // Test GetPrivateKey without password for file backend - _, err = keys.GetPrivateKey("") - assert.Error(t, err) - assert.Contains(t, err.Error(), "password is required for file backend") + keys := NewKeys(kb, "test-key", "") - // Test with test backend (should not require password) - kbTest, err := CreateKeyring(tempDir, nil, KeyringBackendTest) + // Test GetKeyring returns the keyring and validates key exists + kr, err := keys.GetKeyring() require.NoError(t, err) + assert.NotNil(t, kr) + // Verify it's the same backend type + assert.Equal(t, kb.Backend(), kr.Backend()) - keysTest := &Keys{ - signerName: "test-key", - kb: kbTest, - } + // Test with test backend + kbTest, err := CreateKeyring(tempDir, nil, KeyringBackendTest) + require.NoError(t, err) - // Should not error with empty password for test backend - password := keysTest.GetHotkeyPassword() - assert.Empty(t, password) + keysTest := NewKeys(kbTest, "test-key", "") + // Password is not exposed - signing uses keyring directly + // The keyring handles password internally when needed + assert.NotNil(t, keysTest) } // TestKeyringBackendSwitching tests switching between keyring backends @@ -153,8 +151,8 @@ func TestKeyringBackendSwitching(t *testing.T) { // Both should be valid assert.NotNil(t, kb1) assert.NotNil(t, kb2) - assert.Equal(t, tt.backend1.String(), kb1.Backend()) - assert.Equal(t, tt.backend2.String(), kb2.Backend()) + assert.Equal(t, string(tt.backend1), kb1.Backend()) + assert.Equal(t, string(tt.backend2), kb2.Backend()) }) } } @@ -170,13 +168,10 @@ func TestConcurrentKeyAccess(t *testing.T) { require.NoError(t, err) keyName := "concurrent-test-key" - _, err = CreateNewKey(kb, keyName, "", "") + _, _, err = CreateNewKey(kb, keyName, "", "") require.NoError(t, err) - keys := &Keys{ - signerName: keyName, - kb: kb, - } + keys := NewKeys(kb, keyName, "") // Test concurrent GetAddress calls const numGoroutines = 10 @@ -196,7 +191,6 @@ func TestConcurrentKeyAccess(t *testing.T) { } } - // TestErrorConditions tests various error conditions func TestErrorConditions(t *testing.T) { tempDir, err := os.MkdirTemp("", "test-keyring") @@ -207,18 +201,16 @@ func TestErrorConditions(t *testing.T) { kb, err := CreateKeyring(tempDir, nil, KeyringBackendTest) require.NoError(t, err) - keys := &Keys{ - signerName: "non-existent-key", - kb: kb, - } + keys := NewKeys(kb, "non-existent-key", "") // Test GetAddress with non-existent key _, err = keys.GetAddress() assert.Error(t, err) assert.Contains(t, err.Error(), "failed to get key") - // Test GetPrivateKey with non-existent key - _, err = keys.GetPrivateKey("") + // Test GetKeyring validates key exists and returns error for non-existent key + kr, err := keys.GetKeyring() assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to export private key") + assert.Nil(t, kr) + assert.Contains(t, err.Error(), "not found in keyring") } diff --git a/universalClient/pushsigner/pushsigner.go b/universalClient/pushsigner/pushsigner.go new file mode 100644 index 00000000..09e54a2d --- /dev/null +++ b/universalClient/pushsigner/pushsigner.go @@ -0,0 +1,427 @@ +package pushsigner + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/codec" + cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + cosmosauthz "github.com/cosmos/cosmos-sdk/x/authz" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/rs/zerolog" + + "github.com/pushchain/push-chain-node/universalClient/config" + "github.com/pushchain/push-chain-node/universalClient/pushcore" + keysv2 "github.com/pushchain/push-chain-node/universalClient/pushsigner/keys" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" +) + +// Signer provides the main public API for signing and voting operations. +type Signer struct { + keys keysv2.UniversalValidatorKeys + clientCtx client.Context + pushCore *pushcore.Client + granter string + log zerolog.Logger + sequenceMutex sync.Mutex // Mutex to synchronize transaction signing + lastSequence uint64 // Track the last used sequence +} + +// New creates a new Signer instance with validation. +func New( + log zerolog.Logger, + keyringBackend config.KeyringBackend, + keyringPassword string, + nodeHome string, + pushCore *pushcore.Client, + chainID string, + granter string, +) (*Signer, error) { + log.Info().Msg("Validating hotkey and AuthZ permissions...") + + validationResult, err := validateKeysAndGrants(keyringBackend, keyringPassword, nodeHome, pushCore, granter) + if err != nil { + log.Error().Err(err).Msg("PushSigner validation failed") + return nil, fmt.Errorf("PushSigner validation failed: %w", err) + } + + keyAddress, err := sdk.AccAddressFromBech32(validationResult.KeyAddr) + if err != nil { + return nil, fmt.Errorf("failed to parse key address: %w", err) + } + + universalKeys := keysv2.NewKeys( + validationResult.Keyring, + validationResult.KeyName, + "", + ) + + derivedAddr, err := universalKeys.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get address from keys: %w", err) + } + if !derivedAddr.Equals(keyAddress) { + return nil, fmt.Errorf("key address mismatch: expected %s, got %s", keyAddress, derivedAddr) + } + + // Validate keyring is accessible before creating client context + if _, err := universalKeys.GetKeyring(); err != nil { + return nil, fmt.Errorf("failed to validate keyring: %w", err) + } + + clientCtx, err := createClientContext(validationResult.Keyring, chainID) + if err != nil { + return nil, fmt.Errorf("failed to create client context: %w", err) + } + + log.Info(). + Str("key_name", validationResult.KeyName). + Str("key_address", validationResult.KeyAddr). + Str("granter", validationResult.Granter). + Msg("Signer initialized successfully") + + return &Signer{ + keys: universalKeys, + clientCtx: clientCtx, + pushCore: pushCore, + granter: validationResult.Granter, + log: log.With().Str("component", "signer").Logger(), + }, nil +} + +// VoteInbound votes on an inbound transaction. +func (s *Signer) VoteInbound(ctx context.Context, inbound *uexecutortypes.Inbound) (string, error) { + return voteInbound(ctx, s, s.log, s.granter, inbound) +} + +// VoteGasPrice votes on a gas price observation. +func (s *Signer) VoteGasPrice(ctx context.Context, chainID string, price uint64, blockNumber uint64) (string, error) { + return voteGasPrice(ctx, s, s.log, s.granter, chainID, price, blockNumber) +} + +// VoteOutbound votes on an outbound transaction observation. +func (s *Signer) VoteOutbound(ctx context.Context, txID string, observation *uexecutortypes.OutboundObservation) (string, error) { + return voteOutbound(ctx, s, s.log, s.granter, txID, observation) +} + +// VoteTssKeyProcess votes on a TSS key process. +func (s *Signer) VoteTssKeyProcess(ctx context.Context, tssPubKey string, keyID string, processID uint64) (string, error) { + return voteTssKeyProcess(ctx, s, s.log, s.granter, tssPubKey, keyID, processID) +} + +// signAndBroadcastAuthZTx signs and broadcasts an AuthZ transaction +func (s *Signer) signAndBroadcastAuthZTx( + ctx context.Context, + msgs []sdk.Msg, + memo string, + gasLimit uint64, + feeAmount sdk.Coins, +) (*sdk.TxResponse, error) { + // Lock to prevent concurrent sequence issues + s.sequenceMutex.Lock() + defer s.sequenceMutex.Unlock() + + s.log.Info(). + Int("msg_count", len(msgs)). + Str("memo", memo). + Msg("Creating AuthZ transaction") + + // Wrap messages with AuthZ + authzMsgs, err := s.wrapWithAuthZ(msgs) + if err != nil { + return nil, fmt.Errorf("failed to wrap messages with AuthZ: %w", err) + } + + // Try up to 3 times in case of sequence mismatch + maxAttempts := 3 + for attempt := 1; attempt <= maxAttempts; attempt++ { + // Create and sign transaction + txBuilder, err := s.createTxBuilder(authzMsgs, memo, gasLimit, feeAmount) + if err != nil { + return nil, fmt.Errorf("failed to create tx builder: %w", err) + } + + // Sign the transaction with sequence management using keyring (no private key exposure) + if err := s.signTxWithSequence(ctx, txBuilder); err != nil { + return nil, fmt.Errorf("failed to sign transaction: %w", err) + } + + // Encode transaction + txBytes, err := s.clientCtx.TxConfig.TxEncoder()(txBuilder.GetTx()) + if err != nil { + return nil, fmt.Errorf("failed to encode transaction: %w", err) + } + + // Broadcast transaction using pushcore + broadcastResp, err := s.pushCore.BroadcastTx(ctx, txBytes) + if err != nil { + // Check if error is due to sequence mismatch + if strings.Contains(err.Error(), "account sequence mismatch") && attempt < maxAttempts { + s.log.Warn(). + Err(err). + Uint64("current_sequence", s.lastSequence). + Int("attempt", attempt). + Msg("Sequence mismatch detected, forcing refresh and retrying") + // Force refresh sequence on next attempt + s.lastSequence = 0 // This will force a refresh from chain + continue // Retry + } + // For other errors or final attempt, increment and return error + s.lastSequence++ + s.log.Debug(). + Uint64("new_sequence", s.lastSequence). + Msg("Incremented sequence after broadcast error") + return nil, fmt.Errorf("failed to broadcast transaction: %w", err) + } + + // Convert tx.BroadcastTxResponse to sdk.TxResponse + var txResp *sdk.TxResponse + if broadcastResp != nil && broadcastResp.TxResponse != nil { + txResp = broadcastResp.TxResponse + } + + // If chain responded with error code, handle sequence-mismatch specially + if txResp != nil && txResp.Code != 0 { + // Retry immediately for account sequence mismatch responses + if strings.Contains(strings.ToLower(txResp.RawLog), "account sequence mismatch") && attempt < maxAttempts { + s.log.Warn(). + Uint64("current_sequence", s.lastSequence). + Int("attempt", attempt). + Str("raw_log", txResp.RawLog). + Msg("Sequence mismatch in response, refreshing and retrying") + // Force refresh from chain on next attempt + s.lastSequence = 0 + continue + } + + // Conservatively increment sequence since the sequence may have been consumed + s.lastSequence++ + s.log.Debug(). + Uint64("new_sequence", s.lastSequence). + Msg("Incremented sequence after on-chain error response") + + // Log and return error + s.log.Error(). + Str("tx_hash", txResp.TxHash). + Uint32("code", txResp.Code). + Str("raw_log", txResp.RawLog). + Uint64("sequence_used", s.lastSequence-1). + Msg("Transaction failed on chain") + return txResp, fmt.Errorf("transaction failed with code %d: %s", txResp.Code, txResp.RawLog) + } + + // Success: increment sequence once and return + s.lastSequence++ + s.log.Debug(). + Uint64("new_sequence", s.lastSequence). + Str("tx_hash", txResp.TxHash). + Msg("Incremented sequence after successful broadcast") + + s.log.Info(). + Str("tx_hash", txResp.TxHash). + Int64("gas_used", txResp.GasUsed). + Uint64("sequence_used", s.lastSequence-1). + Msg("Transaction broadcasted and executed successfully") + + return txResp, nil + } + + return nil, fmt.Errorf("failed to broadcast transaction after %d attempts", maxAttempts) +} + +// wrapWithAuthZ wraps messages with AuthZ MsgExec +func (s *Signer) wrapWithAuthZ(msgs []sdk.Msg) ([]sdk.Msg, error) { + if len(msgs) == 0 { + return nil, fmt.Errorf("no messages to wrap") + } + + // Get hot key address for grantee + hotKeyAddr, err := s.keys.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get hot key address: %w", err) + } + + s.log.Debug(). + Str("grantee", hotKeyAddr.String()). + Int("msg_count", len(msgs)). + Msg("Wrapping messages with AuthZ") + + // Create MsgExec + msgExec := cosmosauthz.NewMsgExec(hotKeyAddr, msgs) + + return []sdk.Msg{&msgExec}, nil +} + +// createTxBuilder creates a transaction builder with the given parameters +func (s *Signer) createTxBuilder( + msgs []sdk.Msg, + memo string, + gasLimit uint64, + feeAmount sdk.Coins, +) (client.TxBuilder, error) { + txBuilder := s.clientCtx.TxConfig.NewTxBuilder() + + // Set messages + if err := txBuilder.SetMsgs(msgs...); err != nil { + return nil, fmt.Errorf("failed to set messages: %w", err) + } + + // Set memo + txBuilder.SetMemo(memo) + + // Set gas limit + txBuilder.SetGasLimit(gasLimit) + + // Set fee amount + txBuilder.SetFeeAmount(feeAmount) + + return txBuilder, nil +} + +// signTxWithSequence signs a transaction with proper sequence management using keyring +// This method does NOT expose the private key - it uses the keyring directly +func (s *Signer) signTxWithSequence(ctx context.Context, txBuilder client.TxBuilder) error { + s.log.Debug().Msg("Starting transaction signing with sequence management") + + // Get account info to refresh sequence if needed + account, err := s.getAccountInfo(ctx) + if err != nil { + return fmt.Errorf("failed to get account info: %w", err) + } + + // Reconcile local vs chain sequence conservatively: + // - If we have no local sequence (0), adopt chain's sequence. + // - If local < chain, adopt chain (we are behind). + // - If local > chain, keep local (likely recent tx not yet reflected in query). + chainSequence := account.GetSequence() + if s.lastSequence == 0 { + s.lastSequence = chainSequence + s.log.Info(). + Uint64("adopted_chain_sequence", chainSequence). + Msg("Initialized local sequence from chain") + } else if s.lastSequence < chainSequence { + s.log.Info(). + Uint64("chain_sequence", chainSequence). + Uint64("cached_sequence", s.lastSequence). + Msg("Local sequence behind chain, adopting chain's sequence") + s.lastSequence = chainSequence + } else if s.lastSequence > chainSequence { + s.log.Warn(). + Uint64("chain_sequence", chainSequence). + Uint64("cached_sequence", s.lastSequence). + Msg("Local sequence ahead of chain query, keeping local to avoid reuse") + } + + // Get hot key address + hotKeyAddr, err := s.keys.GetAddress() + if err != nil { + return fmt.Errorf("failed to get hot key address: %w", err) + } + + keyName := s.keys.GetKeyName() + + s.log.Debug(). + Str("signer", hotKeyAddr.String()). + Str("key_name", keyName). + Uint64("account_number", account.GetAccountNumber()). + Uint64("sequence", s.lastSequence). + Msg("Signing transaction with managed sequence using keyring") + + // Get keyring and validate key exists + kr, err := s.keys.GetKeyring() + if err != nil { + return fmt.Errorf("failed to get keyring: %w", err) + } + + // Use SDK's tx.Sign method which uses the keyring directly (no private key exposure) + // The keyring handles decryption automatically for file backend when signing + // Create a tx factory from the client context + txFactory := tx.Factory{}. + WithChainID(s.clientCtx.ChainID). + WithKeybase(kr). + WithTxConfig(s.clientCtx.TxConfig). + WithAccountNumber(account.GetAccountNumber()). + WithSequence(s.lastSequence) + + err = tx.Sign( + ctx, + txFactory, + keyName, + txBuilder, + false, // overwriteSig + ) + if err != nil { + return fmt.Errorf("failed to sign transaction with keyring: %w", err) + } + + s.log.Info(). + Str("signer", hotKeyAddr.String()). + Uint64("sequence", s.lastSequence). + Msg("Transaction signed successfully with managed sequence") + + return nil +} + +// getAccountInfo retrieves account information for the hot key using pushcore +func (s *Signer) getAccountInfo(ctx context.Context) (client.Account, error) { + hotKeyAddr, err := s.keys.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get hot key address: %w", err) + } + + s.log.Debug(). + Str("address", hotKeyAddr.String()). + Msg("Querying account info from chain") + + // Query account information using pushcore + accountResp, err := s.pushCore.GetAccount(ctx, hotKeyAddr.String()) + if err != nil { + return nil, fmt.Errorf("failed to query account info: %w", err) + } + + // Unpack account using interface registry from client context + var account sdk.AccountI + if err := s.clientCtx.InterfaceRegistry.UnpackAny(accountResp.Account, &account); err != nil { + return nil, fmt.Errorf("failed to unpack account: %w", err) + } + + s.log.Debug(). + Str("address", account.GetAddress().String()). + Uint64("account_number", account.GetAccountNumber()). + Uint64("sequence", account.GetSequence()). + Msg("Retrieved account info") + + return account, nil +} + +func createClientContext(kr cosmoskeyring.Keyring, chainID string) (client.Context, error) { + interfaceRegistry := keysv2.CreateInterfaceRegistryWithEVMSupport() + cosmosauthz.RegisterInterfaces(interfaceRegistry) + authtypes.RegisterInterfaces(interfaceRegistry) + banktypes.RegisterInterfaces(interfaceRegistry) + stakingtypes.RegisterInterfaces(interfaceRegistry) + govtypes.RegisterInterfaces(interfaceRegistry) + uexecutortypes.RegisterInterfaces(interfaceRegistry) + + cdc := codec.NewProtoCodec(interfaceRegistry) + txConfig := authtx.NewTxConfig(cdc, []signing.SignMode{signing.SignMode_SIGN_MODE_DIRECT}) + + clientCtx := client.Context{}. + WithCodec(cdc). + WithInterfaceRegistry(interfaceRegistry). + WithChainID(chainID). + WithKeyring(kr). + WithTxConfig(txConfig) + + return clientCtx, nil +} diff --git a/universalClient/pushsigner/pushsigner_test.go b/universalClient/pushsigner/pushsigner_test.go new file mode 100644 index 00000000..725d5c4d --- /dev/null +++ b/universalClient/pushsigner/pushsigner_test.go @@ -0,0 +1,169 @@ +package pushsigner + +import ( + "os" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/pushchain/push-chain-node/universalClient/config" + "github.com/pushchain/push-chain-node/universalClient/pushcore" + keysv2 "github.com/pushchain/push-chain-node/universalClient/pushsigner/keys" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" +) + +func TestMain(m *testing.M) { + // Initialize SDK config for tests + sdkConfig := sdk.GetConfig() + func() { + defer func() { + _ = recover() // Ignore panic if already sealed + }() + sdkConfig.SetBech32PrefixForAccount("push", "pushpub") + sdkConfig.SetBech32PrefixForValidator("pushvaloper", "pushvaloperpub") + sdkConfig.SetBech32PrefixForConsensusNode("pushvalcons", "pushvalconspub") + sdkConfig.Seal() + }() + + os.Exit(m.Run()) +} + +// createMockPushCoreClient creates a minimal pushcore.Client for testing. +// Since pushcore.Client is a concrete struct, we create an empty one +// and tests will need to handle the actual gRPC calls appropriately. +func createMockPushCoreClient() *pushcore.Client { + return &pushcore.Client{} +} + +func TestNew(t *testing.T) { + logger := zerolog.Nop() + + t.Run("validation failure - no keys in keyring", func(t *testing.T) { + tempDir, err := os.MkdirTemp("", "test-signer") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + cfg := &config.Config{ + KeyringBackend: config.KeyringBackendTest, + KeyringPassword: "", + } + + mockCore := createMockPushCoreClient() + + signer, err := New(logger, cfg.KeyringBackend, cfg.KeyringPassword, "", mockCore, "test-chain", "cosmos1granter") + require.Error(t, err) + assert.Nil(t, signer) + assert.Contains(t, err.Error(), "PushSigner validation failed") + }) + + t.Run("validation failure - keyring creation fails", func(t *testing.T) { + cfg := &config.Config{ + KeyringBackend: config.KeyringBackendFile, + KeyringPassword: "", // Missing password for file backend + } + + mockCore := createMockPushCoreClient() + + signer, err := New(logger, cfg.KeyringBackend, cfg.KeyringPassword, "", mockCore, "test-chain", "cosmos1granter") + require.Error(t, err) + assert.Nil(t, signer) + assert.Contains(t, err.Error(), "keyring_password is required for file backend") + }) + + t.Run("validation failure - no grants", func(t *testing.T) { + tempDir, err := os.MkdirTemp("", "test-signer") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + // Create keyring and add a key + kr, err := keysv2.CreateKeyring(tempDir, nil, keysv2.KeyringBackendTest) + require.NoError(t, err) + + _, _, err = keysv2.CreateNewKey(kr, "test-key", "", "") + require.NoError(t, err) + + cfg := &config.Config{ + KeyringBackend: config.KeyringBackendTest, + KeyringPassword: "", + } + + mockCore := createMockPushCoreClient() + + // This will fail because GetGranteeGrants will fail (no real gRPC connection) + signer, err := New(logger, cfg.KeyringBackend, cfg.KeyringPassword, tempDir, mockCore, "test-chain", "cosmos1granter") + require.Error(t, err) + assert.Nil(t, signer) + // Error will be from GetGranteeGrants failing + }) +} + +func TestSigner_GetKeyring(t *testing.T) { + tempDir, err := os.MkdirTemp("", "test-signer") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + kr, err := keysv2.CreateKeyring(tempDir, nil, keysv2.KeyringBackendTest) + require.NoError(t, err) + + record, _, err := keysv2.CreateNewKey(kr, "test-key", "", "") + require.NoError(t, err) + + keys := keysv2.NewKeys(kr, record.Name, "") + + t.Run("valid key", func(t *testing.T) { + keyring, err := keys.GetKeyring() + require.NoError(t, err) + assert.NotNil(t, keyring) + }) + + t.Run("invalid key", func(t *testing.T) { + invalidKeys := keysv2.NewKeys(kr, "non-existent-key", "") + keyring, err := invalidKeys.GetKeyring() + require.Error(t, err) + assert.Nil(t, keyring) + assert.Contains(t, err.Error(), "not found in keyring") + }) +} + +// TestSigner_VoteInbound tests the VoteInbound method signature. +// Full integration tests would require a complete setup with real keyring, pushcore client, etc. +func TestSigner_VoteInbound(t *testing.T) { + // This test verifies the method exists and has the correct signature. + // Full testing requires integration test setup with real dependencies. + t.Run("method exists", func(t *testing.T) { + // Verify the method signature by checking it compiles + var signer *Signer + var inbound *uexecutortypes.Inbound + _ = signer + _ = inbound + // Method signature: VoteInbound(ctx context.Context, inbound *uexecutortypes.Inbound) (string, error) + assert.True(t, true) + }) +} + +// TestSigner_VoteGasPrice tests the VoteGasPrice method signature. +func TestSigner_VoteGasPrice(t *testing.T) { + t.Run("method exists", func(t *testing.T) { + // Method signature: VoteGasPrice(ctx context.Context, chainID string, price uint64, blockNumber uint64) (string, error) + assert.True(t, true) + }) +} + +// TestSigner_VoteOutbound tests the VoteOutbound method signature. +func TestSigner_VoteOutbound(t *testing.T) { + t.Run("method exists", func(t *testing.T) { + // Method signature: VoteOutbound(ctx context.Context, txID string, observation *uexecutortypes.OutboundObservation) (string, error) + assert.True(t, true) + }) +} + +// TestSigner_VoteTssKeyProcess tests the VoteTssKeyProcess method signature. +func TestSigner_VoteTssKeyProcess(t *testing.T) { + t.Run("method exists", func(t *testing.T) { + // Method signature: VoteTssKeyProcess(ctx context.Context, tssPubKey string, keyID string, processID uint64) (string, error) + assert.True(t, true) + }) +} diff --git a/universalClient/pushsigner/vote.go b/universalClient/pushsigner/vote.go new file mode 100644 index 00000000..09be5468 --- /dev/null +++ b/universalClient/pushsigner/vote.go @@ -0,0 +1,145 @@ +package pushsigner + +import ( + "context" + "fmt" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/rs/zerolog" + + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" + utsstypes "github.com/pushchain/push-chain-node/x/utss/types" +) + +const ( + defaultGasLimit = uint64(500000000) + defaultFeeAmount = "500000000000000upc" + defaultVoteTimeout = 30 * time.Second +) + +// vote broadcasts a vote transaction with the given message +func vote( + ctx context.Context, + signer *Signer, + log zerolog.Logger, + msg sdk.Msg, + memo string, +) (string, error) { + feeAmount, err := sdk.ParseCoinsNormalized(defaultFeeAmount) + if err != nil { + return "", fmt.Errorf("failed to parse fee amount: %w", err) + } + + if memo == "" { + memo = fmt.Sprintf("Vote: %s", sdk.MsgTypeURL(msg)) + } + + msgType := sdk.MsgTypeURL(msg) + log.Debug(). + Str("msg_type", msgType). + Str("memo", memo). + Msg("broadcasting vote transaction") + + voteCtx, cancel := context.WithTimeout(ctx, defaultVoteTimeout) + defer cancel() + + txResp, err := signer.signAndBroadcastAuthZTx( + voteCtx, + []sdk.Msg{msg}, + memo, + defaultGasLimit, + feeAmount, + ) + if err != nil { + log.Error().Str("msg_type", msgType).Err(err).Msg("failed to broadcast vote") + return "", fmt.Errorf("failed to broadcast vote: %w", err) + } + + if txResp.Code != 0 { + log.Error(). + Str("msg_type", msgType). + Str("tx_hash", txResp.TxHash). + Uint32("code", txResp.Code). + Str("raw_log", txResp.RawLog). + Msg("vote rejected") + return "", fmt.Errorf("vote failed with code %d: %s", txResp.Code, txResp.RawLog) + } + + log.Info().Str("msg_type", msgType).Str("tx_hash", txResp.TxHash).Msg("vote successful") + return txResp.TxHash, nil +} + +// voteInbound votes on an inbound transaction +func voteInbound( + ctx context.Context, + signer *Signer, + log zerolog.Logger, + granter string, + inbound *uexecutortypes.Inbound, +) (string, error) { + msg := &uexecutortypes.MsgVoteInbound{ + Signer: granter, + Inbound: inbound, + } + memo := fmt.Sprintf("Vote inbound: %s", inbound.TxHash) + return vote(ctx, signer, log, msg, memo) +} + +// voteGasPrice votes on a gas price observation +func voteGasPrice( + ctx context.Context, + signer *Signer, + log zerolog.Logger, + granter string, + chainID string, + price uint64, + blockNumber uint64, +) (string, error) { + msg := &uexecutortypes.MsgVoteGasPrice{ + Signer: granter, + ObservedChainId: chainID, + Price: price, + BlockNumber: blockNumber, + } + memo := fmt.Sprintf("Vote gas price: %s @ %d", chainID, price) + return vote(ctx, signer, log, msg, memo) +} + +// voteOutbound votes on an outbound transaction observation +func voteOutbound( + ctx context.Context, + signer *Signer, + log zerolog.Logger, + granter string, + txID string, + observation *uexecutortypes.OutboundObservation, +) (string, error) { + msg := &uexecutortypes.MsgVoteOutbound{ + Signer: granter, + TxId: txID, + ObservedTx: observation, + } + memo := fmt.Sprintf("Vote outbound: %s", txID) + return vote(ctx, signer, log, msg, memo) +} + +// voteTssKeyProcess votes on a TSS key process +func voteTssKeyProcess( + ctx context.Context, + signer *Signer, + log zerolog.Logger, + granter string, + tssPubKey string, + keyID string, + processID uint64, +) (string, error) { + msg := &utsstypes.MsgVoteTssKeyProcess{ + Signer: granter, + TssPubkey: tssPubKey, + KeyId: keyID, + ProcessId: processID, + } + memo := fmt.Sprintf("Vote TSS key: %s", keyID) + return vote(ctx, signer, log, msg, memo) +} diff --git a/universalClient/rpcpool/endpoint.go b/universalClient/rpcpool/endpoint.go deleted file mode 100644 index 024c63ba..00000000 --- a/universalClient/rpcpool/endpoint.go +++ /dev/null @@ -1,203 +0,0 @@ -package rpcpool - -import ( - "sync" - "time" -) - -// EndpointState represents the current state of an RPC endpoint -type EndpointState int - -const ( - StateHealthy EndpointState = iota - StateDegraded - StateUnhealthy - StateExcluded -) - -func (s EndpointState) String() string { - switch s { - case StateHealthy: - return "healthy" - case StateDegraded: - return "degraded" - case StateUnhealthy: - return "unhealthy" - case StateExcluded: - return "excluded" - default: - return "unknown" - } -} - -// EndpointMetrics tracks performance and health metrics for an endpoint -type EndpointMetrics struct { - mu sync.RWMutex - TotalRequests uint64 - SuccessfulRequests uint64 - FailedRequests uint64 - AverageLatency time.Duration - ConsecutiveFailures int - LastSuccessTime time.Time - LastErrorTime time.Time - LastError error - HealthScore float64 // 0-100, calculated from success rate and latency -} - -// UpdateSuccess updates metrics for a successful request -func (m *EndpointMetrics) UpdateSuccess(latency time.Duration) { - m.mu.Lock() - defer m.mu.Unlock() - - m.TotalRequests++ - m.SuccessfulRequests++ - m.ConsecutiveFailures = 0 - m.LastSuccessTime = time.Now() - - // Update rolling average latency - if m.AverageLatency == 0 { - m.AverageLatency = latency - } else { - // Exponential moving average with alpha = 0.1 - m.AverageLatency = time.Duration(float64(m.AverageLatency)*0.9 + float64(latency)*0.1) - } - - m.calculateHealthScore() -} - -// UpdateFailure updates metrics for a failed request -func (m *EndpointMetrics) UpdateFailure(err error, latency time.Duration) { - m.mu.Lock() - defer m.mu.Unlock() - - m.TotalRequests++ - m.FailedRequests++ - m.ConsecutiveFailures++ - m.LastErrorTime = time.Now() - m.LastError = err - - // Update latency even for failures (for timeout tracking) - if latency > 0 && m.AverageLatency > 0 { - m.AverageLatency = time.Duration(float64(m.AverageLatency)*0.9 + float64(latency)*0.1) - } - - m.calculateHealthScore() -} - -// calculateHealthScore computes health score based on success rate and latency -func (m *EndpointMetrics) calculateHealthScore() { - if m.TotalRequests == 0 { - m.HealthScore = 100.0 - return - } - - // Base score from success rate (0-100) - successRate := float64(m.SuccessfulRequests) / float64(m.TotalRequests) - baseScore := successRate * 100.0 - - // Latency penalty: reduce score based on high latency - // Assume 1 second is baseline, penalize above that - latencyPenalty := 0.0 - if m.AverageLatency > time.Second { - // Each additional second reduces score by up to 20 points - extraSeconds := m.AverageLatency.Seconds() - 1.0 - latencyPenalty = extraSeconds * 5.0 // 5 points per second - if latencyPenalty > 20.0 { - latencyPenalty = 20.0 - } - } - - // Consecutive failure penalty - failurePenalty := float64(m.ConsecutiveFailures) * 10.0 // 10 points per consecutive failure - if failurePenalty > 50.0 { - failurePenalty = 50.0 - } - - m.HealthScore = baseScore - latencyPenalty - failurePenalty - if m.HealthScore < 0 { - m.HealthScore = 0 - } -} - -// GetHealthScore returns the current health score (thread-safe) -func (m *EndpointMetrics) GetHealthScore() float64 { - m.mu.RLock() - defer m.mu.RUnlock() - return m.HealthScore -} - -// GetSuccessRate returns the success rate (thread-safe) -func (m *EndpointMetrics) GetSuccessRate() float64 { - m.mu.RLock() - defer m.mu.RUnlock() - if m.TotalRequests == 0 { - return 1.0 - } - return float64(m.SuccessfulRequests) / float64(m.TotalRequests) -} - -// GetConsecutiveFailures returns consecutive failure count (thread-safe) -func (m *EndpointMetrics) GetConsecutiveFailures() int { - m.mu.RLock() - defer m.mu.RUnlock() - return m.ConsecutiveFailures -} - -// Endpoint represents a single RPC endpoint with its client and metrics -type Endpoint struct { - URL string - Client Client // Generic client interface - State EndpointState - Metrics *EndpointMetrics - LastUsed time.Time - ExcludedAt time.Time // When this endpoint was excluded - mu sync.RWMutex -} - -// NewEndpoint creates a new RPC endpoint -func NewEndpoint(url string) *Endpoint { - return &Endpoint{ - URL: url, - State: StateHealthy, - Metrics: &EndpointMetrics{HealthScore: 100.0}, - } -} - -// SetClient sets the RPC client for this endpoint -func (e *Endpoint) SetClient(client Client) { - e.mu.Lock() - defer e.mu.Unlock() - e.Client = client -} - -// GetClient returns the RPC client (thread-safe) -func (e *Endpoint) GetClient() Client { - e.mu.RLock() - defer e.mu.RUnlock() - return e.Client -} - -// UpdateState updates the endpoint state (thread-safe) -func (e *Endpoint) UpdateState(state EndpointState) { - e.mu.Lock() - defer e.mu.Unlock() - - if state == StateExcluded && e.State != StateExcluded { - e.ExcludedAt = time.Now() - } - - e.State = state -} - -// GetState returns the current state (thread-safe) -func (e *Endpoint) GetState() EndpointState { - e.mu.RLock() - defer e.mu.RUnlock() - return e.State -} - -// IsHealthy returns true if endpoint is in a usable state -func (e *Endpoint) IsHealthy() bool { - state := e.GetState() - return state == StateHealthy || state == StateDegraded -} \ No newline at end of file diff --git a/universalClient/rpcpool/endpoint_test.go b/universalClient/rpcpool/endpoint_test.go deleted file mode 100644 index b820c438..00000000 --- a/universalClient/rpcpool/endpoint_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package rpcpool - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -// mockClient implements the Client interface for testing -type mockClient struct { - shouldFail bool - closed bool -} - -func (m *mockClient) Ping(ctx context.Context) error { - if m.shouldFail { - return errors.New("mock client ping failed") - } - return nil -} - -func (m *mockClient) Close() error { - m.closed = true - return nil -} - -func TestNewEndpoint(t *testing.T) { - endpoint := NewEndpoint("http://test.com") - - assert.Equal(t, "http://test.com", endpoint.URL) - assert.Equal(t, StateHealthy, endpoint.State) - assert.NotNil(t, endpoint.Metrics) - assert.Equal(t, 100.0, endpoint.Metrics.HealthScore) -} - -func TestEndpoint_SetAndGetClient(t *testing.T) { - endpoint := NewEndpoint("http://test.com") - client := &mockClient{} - - endpoint.SetClient(client) - retrievedClient := endpoint.GetClient() - - assert.Equal(t, client, retrievedClient) -} - -func TestEndpoint_StateManagement(t *testing.T) { - endpoint := NewEndpoint("http://test.com") - - // Initial state - assert.Equal(t, StateHealthy, endpoint.GetState()) - assert.True(t, endpoint.IsHealthy()) - - // Change to degraded - endpoint.UpdateState(StateDegraded) - assert.Equal(t, StateDegraded, endpoint.GetState()) - assert.True(t, endpoint.IsHealthy()) // degraded is still considered healthy - - // Change to excluded - before := time.Now() - endpoint.UpdateState(StateExcluded) - after := time.Now() - - assert.Equal(t, StateExcluded, endpoint.GetState()) - assert.False(t, endpoint.IsHealthy()) - assert.True(t, endpoint.ExcludedAt.After(before) || endpoint.ExcludedAt.Equal(before)) - assert.True(t, endpoint.ExcludedAt.Before(after) || endpoint.ExcludedAt.Equal(after)) -} - -func TestEndpointMetrics_UpdateSuccess(t *testing.T) { - metrics := &EndpointMetrics{HealthScore: 100.0} - latency := 50 * time.Millisecond - - metrics.UpdateSuccess(latency) - - assert.Equal(t, uint64(1), metrics.TotalRequests) - assert.Equal(t, uint64(1), metrics.SuccessfulRequests) - assert.Equal(t, uint64(0), metrics.FailedRequests) - assert.Equal(t, 0, metrics.ConsecutiveFailures) - assert.Equal(t, latency, metrics.AverageLatency) - assert.Equal(t, 1.0, metrics.GetSuccessRate()) -} - -func TestEndpointMetrics_UpdateFailure(t *testing.T) { - metrics := &EndpointMetrics{HealthScore: 100.0} - err := errors.New("test error") - latency := 100 * time.Millisecond - - metrics.UpdateFailure(err, latency) - - assert.Equal(t, uint64(1), metrics.TotalRequests) - assert.Equal(t, uint64(0), metrics.SuccessfulRequests) - assert.Equal(t, uint64(1), metrics.FailedRequests) - assert.Equal(t, 1, metrics.ConsecutiveFailures) - assert.Equal(t, err, metrics.LastError) - assert.Equal(t, 0.0, metrics.GetSuccessRate()) - assert.True(t, metrics.GetHealthScore() < 100.0) // Health score should decrease -} - -func TestEndpointMetrics_HealthScoreCalculation(t *testing.T) { - tests := []struct { - name string - setupFunc func(*EndpointMetrics) - expectedMinScore float64 - expectedMaxScore float64 - }{ - { - name: "perfect health", - setupFunc: func(m *EndpointMetrics) { - m.UpdateSuccess(10 * time.Millisecond) - m.UpdateSuccess(10 * time.Millisecond) - }, - expectedMinScore: 100.0, - expectedMaxScore: 100.0, - }, - { - name: "mixed results", - setupFunc: func(m *EndpointMetrics) { - m.UpdateSuccess(10 * time.Millisecond) - m.UpdateFailure(errors.New("error"), 10*time.Millisecond) - }, - expectedMinScore: 40.0, // 50% success rate - 10 points for consecutive failure - expectedMaxScore: 60.0, - }, - { - name: "high latency penalty", - setupFunc: func(m *EndpointMetrics) { - m.UpdateSuccess(5 * time.Second) // High latency - }, - expectedMinScore: 75.0, // 100 - 20 (max latency penalty) - expectedMaxScore: 85.0, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - metrics := &EndpointMetrics{HealthScore: 100.0} - tt.setupFunc(metrics) - - score := metrics.GetHealthScore() - assert.GreaterOrEqual(t, score, tt.expectedMinScore) - assert.LessOrEqual(t, score, tt.expectedMaxScore) - }) - } -} - -func TestEndpointMetrics_ConsecutiveFailures(t *testing.T) { - metrics := &EndpointMetrics{HealthScore: 100.0} - - // Add some failures - metrics.UpdateFailure(errors.New("error1"), 0) - assert.Equal(t, 1, metrics.GetConsecutiveFailures()) - - metrics.UpdateFailure(errors.New("error2"), 0) - assert.Equal(t, 2, metrics.GetConsecutiveFailures()) - - // Success should reset consecutive failures - metrics.UpdateSuccess(10 * time.Millisecond) - assert.Equal(t, 0, metrics.GetConsecutiveFailures()) -} - -func TestEndpointMetrics_ThreadSafety(t *testing.T) { - metrics := &EndpointMetrics{HealthScore: 100.0} - - // Run concurrent operations - done := make(chan bool, 100) - - // Start 50 goroutines doing success updates - for i := 0; i < 50; i++ { - go func() { - metrics.UpdateSuccess(10 * time.Millisecond) - done <- true - }() - } - - // Start 50 goroutines doing failure updates - for i := 0; i < 50; i++ { - go func() { - metrics.UpdateFailure(errors.New("test"), 10*time.Millisecond) - done <- true - }() - } - - // Wait for all goroutines to complete - for i := 0; i < 100; i++ { - <-done - } - - // Verify final state is consistent - assert.Equal(t, uint64(100), metrics.TotalRequests) - assert.Equal(t, uint64(50), metrics.SuccessfulRequests) - assert.Equal(t, uint64(50), metrics.FailedRequests) - assert.Equal(t, 0.5, metrics.GetSuccessRate()) -} \ No newline at end of file diff --git a/universalClient/rpcpool/health_monitor.go b/universalClient/rpcpool/health_monitor.go deleted file mode 100644 index 7117e951..00000000 --- a/universalClient/rpcpool/health_monitor.go +++ /dev/null @@ -1,254 +0,0 @@ -package rpcpool - -import ( - "context" - "sync" - "time" - - "github.com/rs/zerolog" - "github.com/pushchain/push-chain-node/universalClient/config" -) - -// HealthMonitor monitors the health of RPC endpoints and manages recovery -type HealthMonitor struct { - manager *Manager - config *config.RPCPoolConfig - logger zerolog.Logger - healthChecker HealthChecker - stopCh chan struct{} -} - -// NewHealthMonitor creates a new health monitor -func NewHealthMonitor(manager *Manager, config *config.RPCPoolConfig, logger zerolog.Logger) *HealthMonitor { - return &HealthMonitor{ - manager: manager, - config: config, - logger: logger.With().Str("component", "health_monitor").Logger(), - stopCh: make(chan struct{}), - } -} - -// SetHealthChecker sets the health checker implementation -func (h *HealthMonitor) SetHealthChecker(checker HealthChecker) { - h.healthChecker = checker -} - -// Start begins the health monitoring loop -func (h *HealthMonitor) Start(ctx context.Context, wg *sync.WaitGroup) { - defer wg.Done() - - // Default to 30 seconds if not configured - intervalSeconds := h.config.HealthCheckIntervalSeconds - if intervalSeconds <= 0 { - intervalSeconds = 30 - } - - h.logger.Info(). - Str("interval", (time.Duration(intervalSeconds) * time.Second).String()). - Msg("starting health monitor") - - ticker := time.NewTicker(time.Duration(intervalSeconds) * time.Second) - defer ticker.Stop() - - // Immediate health check - h.performHealthChecks(ctx) - - for { - select { - case <-ctx.Done(): - h.logger.Info().Msg("health monitor stopping: context cancelled") - return - case <-h.stopCh: - h.logger.Info().Msg("health monitor stopping: stop signal received") - return - case <-ticker.C: - h.performHealthChecks(ctx) - } - } -} - -// Stop stops the health monitor -func (h *HealthMonitor) Stop() { - close(h.stopCh) -} - -// performHealthChecks checks the health of all endpoints -func (h *HealthMonitor) performHealthChecks(ctx context.Context) { - h.logger.Debug().Msg("performing health checks on all endpoints") - - endpoints := h.manager.GetEndpoints() - - var wg sync.WaitGroup - for _, endpoint := range endpoints { - wg.Add(1) - go func(ep *Endpoint) { - defer wg.Done() - h.checkEndpointHealth(ctx, ep) - }(endpoint) - } - - wg.Wait() - h.logger.Debug().Msg("health checks completed") -} - -// checkEndpointHealth performs a health check on a single endpoint -func (h *HealthMonitor) checkEndpointHealth(ctx context.Context, endpoint *Endpoint) { - // Default to 10 seconds if not configured - timeoutSeconds := h.config.RequestTimeoutSeconds - if timeoutSeconds <= 0 { - timeoutSeconds = 10 - } - - // Create timeout context for this specific health check - checkCtx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) - defer cancel() - - client := endpoint.GetClient() - if client == nil { - h.logger.Debug(). - Str("url", endpoint.URL). - Msg("endpoint has no client, skipping health check") - return - } - - start := time.Now() - var err error - - // Use custom health checker if available, otherwise skip active health checking - if h.healthChecker != nil { - err = h.healthChecker.CheckHealth(checkCtx, client) - } else { - // No active health checking - rely on passive monitoring only - h.logger.Debug(). - Str("url", endpoint.URL). - Msg("no health checker configured, skipping active health check") - return - } - - latency := time.Since(start) - - // Handle excluded endpoints trying to recover - if endpoint.GetState() == StateExcluded { - h.handleExcludedEndpointCheck(endpoint, err == nil, latency, err) - return - } - - // Update metrics based on health check result - success := err == nil - h.manager.UpdateEndpointMetrics(endpoint, success, latency, err) - - if success { - h.logger.Debug(). - Str("url", endpoint.URL). - Dur("latency", latency). - Float64("health_score", endpoint.Metrics.GetHealthScore()). - Msg("endpoint health check passed") - } else { - h.logger.Warn(). - Str("url", endpoint.URL). - Dur("latency", latency). - Err(err). - Int("consecutive_failures", endpoint.Metrics.GetConsecutiveFailures()). - Msg("endpoint health check failed") - } -} - -// handleExcludedEndpointCheck handles health checking for excluded endpoints -func (h *HealthMonitor) handleExcludedEndpointCheck(endpoint *Endpoint, success bool, latency time.Duration, err error) { - // Default to 5 minutes if not configured - recoverySeconds := h.config.RecoveryIntervalSeconds - if recoverySeconds <= 0 { - recoverySeconds = 300 - } - - // Check if enough time has passed since exclusion for recovery attempt - endpoint.mu.RLock() - excludedAt := endpoint.ExcludedAt - endpoint.mu.RUnlock() - - if time.Since(excludedAt) < time.Duration(recoverySeconds)*time.Second { - // Not enough time has passed, skip recovery attempt - return - } - - h.logger.Info(). - Str("url", endpoint.URL). - Dur("excluded_duration", time.Since(excludedAt)). - Bool("success", success). - Msg("attempting endpoint recovery") - - if success { - // Recovery successful - reset metrics and promote to degraded state - // Start with degraded instead of healthy to monitor closely - endpoint.Metrics = &EndpointMetrics{HealthScore: 70.0} // Start with moderate score - endpoint.UpdateState(StateDegraded) - - h.logger.Info(). - Str("url", endpoint.URL). - Dur("recovery_latency", latency). - Msg("endpoint successfully recovered, promoted to degraded state") - } else { - // Recovery failed - update exclusion time to wait another recovery interval - endpoint.mu.Lock() - endpoint.ExcludedAt = time.Now() - endpoint.mu.Unlock() - - h.logger.Warn(). - Str("url", endpoint.URL). - Err(err). - Msg("endpoint recovery failed, extending exclusion period") - } -} - -// GetHealthStatus returns a summary of endpoint health -func (h *HealthMonitor) GetHealthStatus() *HealthStatus { - endpoints := h.manager.GetEndpoints() - - healthyCount := 0 - degradedCount := 0 - unhealthyCount := 0 - excludedCount := 0 - - endpointStatuses := make([]EndpointStatus, len(endpoints)) - - for i, endpoint := range endpoints { - state := endpoint.GetState() - - switch state { - case StateHealthy: - healthyCount++ - case StateDegraded: - degradedCount++ - case StateUnhealthy: - unhealthyCount++ - case StateExcluded: - excludedCount++ - } - - var lastError string - if endpoint.Metrics.LastError != nil { - lastError = endpoint.Metrics.LastError.Error() - } - - endpointStatuses[i] = EndpointStatus{ - URL: endpoint.URL, - State: state.String(), - HealthScore: endpoint.Metrics.GetHealthScore(), - ResponseTime: endpoint.Metrics.AverageLatency.Milliseconds(), - LastChecked: endpoint.LastUsed, - LastError: lastError, - } - } - - return &HealthStatus{ - ChainID: h.manager.chainID, - TotalEndpoints: len(endpoints), - HealthyCount: healthyCount, - UnhealthyCount: unhealthyCount, - DegradedCount: degradedCount, - ExcludedCount: excludedCount, - Strategy: string(h.manager.selector.GetStrategy()), - Endpoints: endpointStatuses, - } -} - diff --git a/universalClient/rpcpool/health_monitor_test.go b/universalClient/rpcpool/health_monitor_test.go deleted file mode 100644 index a8bedfce..00000000 --- a/universalClient/rpcpool/health_monitor_test.go +++ /dev/null @@ -1,530 +0,0 @@ -package rpcpool - -import ( - "context" - "errors" - "sync" - "testing" - "time" - - "github.com/pushchain/push-chain-node/universalClient/config" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -// MockHealthChecker is a mock implementation of HealthChecker -type MockHealthChecker struct { - mock.Mock -} - -func (m *MockHealthChecker) CheckHealth(ctx context.Context, client Client) error { - args := m.Called(ctx, client) - return args.Error(0) -} - -// MockManager is a mock implementation of Manager for testing -type MockManager struct { - endpoints []*Endpoint - chainID string - selector *EndpointSelector -} - -func (m *MockManager) GetEndpoints() []*Endpoint { - return m.endpoints -} - -func (m *MockManager) UpdateEndpointMetrics(endpoint *Endpoint, success bool, latency time.Duration, err error) { - // Update the endpoint metrics - if success { - endpoint.Metrics.UpdateSuccess(latency) - } else { - endpoint.Metrics.UpdateFailure(err, latency) - } -} - -// MockClient represents a mock RPC client for testing -type MockClient struct { - healthy bool -} - -func (m *MockClient) Ping(ctx context.Context) error { - if m.healthy { - return nil - } - return errors.New("unhealthy") -} - -func (m *MockClient) Close() error { - return nil -} - -func setupTestHealthMonitor(t *testing.T) (*HealthMonitor, *Manager, *config.RPCPoolConfig) { - cfg := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 1, - RequestTimeoutSeconds: 1, - RecoveryIntervalSeconds: 2, - } - - manager := &MockManager{ - chainID: "test-chain", - endpoints: []*Endpoint{}, - selector: NewEndpointSelector(StrategyRoundRobin), - } - - // We need to create the actual Manager for NewHealthMonitor - realManager := &Manager{ - chainID: "test-chain", - endpoints: manager.endpoints, - selector: manager.selector, - logger: zerolog.Nop(), - config: &config.RPCPoolConfig{ - UnhealthyThreshold: 3, - }, - } - - monitor := NewHealthMonitor(realManager, cfg, zerolog.Nop()) - - return monitor, realManager, cfg -} - -func TestNewHealthMonitor(t *testing.T) { - monitor, manager, cfg := setupTestHealthMonitor(t) - - assert.NotNil(t, monitor) - assert.Equal(t, manager, monitor.manager) - assert.Equal(t, cfg, monitor.config) - assert.NotNil(t, monitor.stopCh) -} - -func TestHealthMonitor_SetHealthChecker(t *testing.T) { - monitor, _, _ := setupTestHealthMonitor(t) - - checker := &MockHealthChecker{} - monitor.SetHealthChecker(checker) - - assert.Equal(t, checker, monitor.healthChecker) -} - -func TestHealthMonitor_StartStop(t *testing.T) { - monitor, _, _ := setupTestHealthMonitor(t) - - ctx := context.Background() - var wg sync.WaitGroup - - // Start the monitor - wg.Add(1) - go monitor.Start(ctx, &wg) - - // Give it time to start - time.Sleep(100 * time.Millisecond) - - // Stop the monitor - monitor.Stop() - - // Wait for goroutine to finish - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - select { - case <-done: - // Success - case <-time.After(2 * time.Second): - t.Error("Monitor did not stop in time") - } -} - -func TestHealthMonitor_performHealthChecks(t *testing.T) { - monitor, manager, _ := setupTestHealthMonitor(t) - - // Create test endpoints - endpoint1 := &Endpoint{ - URL: "http://localhost:8545", - State: StateHealthy, - Metrics: &EndpointMetrics{}, - LastUsed: time.Now(), - Client: &MockClient{healthy: true}, - } - - endpoint2 := &Endpoint{ - URL: "http://localhost:8546", - State: StateHealthy, - Metrics: &EndpointMetrics{}, - LastUsed: time.Now(), - Client: &MockClient{healthy: false}, - } - - manager.endpoints = []*Endpoint{endpoint1, endpoint2} - - // Setup health checker - checker := &MockHealthChecker{} - checker.On("CheckHealth", mock.Anything, &MockClient{healthy: true}).Return(nil) - checker.On("CheckHealth", mock.Anything, &MockClient{healthy: false}).Return(errors.New("connection failed")) - monitor.SetHealthChecker(checker) - - // Perform health checks - ctx := context.Background() - monitor.performHealthChecks(ctx) - - // Verify health checks were performed - checker.AssertExpectations(t) - - // Check metrics were updated - assert.Greater(t, endpoint1.Metrics.SuccessfulRequests, uint64(0)) - assert.Greater(t, endpoint2.Metrics.FailedRequests, uint64(0)) -} - -func TestHealthMonitor_checkEndpointHealth(t *testing.T) { - tests := []struct { - name string - endpoint *Endpoint - setupMock func(*MockHealthChecker, *Endpoint) - expectedState EndpointState - hasHealthChecker bool - expectSuccess bool - }{ - { - name: "healthy endpoint", - endpoint: &Endpoint{ - URL: "http://localhost:8545", - State: StateHealthy, - Metrics: &EndpointMetrics{}, - LastUsed: time.Now(), - Client: &MockClient{healthy: true}, - }, - setupMock: func(m *MockHealthChecker, e *Endpoint) { - m.On("CheckHealth", mock.Anything, e.Client).Return(nil) - }, - expectedState: StateHealthy, - hasHealthChecker: true, - expectSuccess: true, - }, - { - name: "unhealthy endpoint", - endpoint: &Endpoint{ - URL: "http://localhost:8546", - State: StateHealthy, - Metrics: &EndpointMetrics{}, - LastUsed: time.Now(), - Client: &MockClient{healthy: false}, - }, - setupMock: func(m *MockHealthChecker, e *Endpoint) { - m.On("CheckHealth", mock.Anything, e.Client).Return(errors.New("connection failed")) - }, - expectedState: StateDegraded, // Failed health check with success rate < 0.5 downgrades to degraded - hasHealthChecker: true, - expectSuccess: false, - }, - { - name: "no client", - endpoint: &Endpoint{ - URL: "http://localhost:8547", - State: StateHealthy, - Metrics: &EndpointMetrics{}, - LastUsed: time.Now(), - Client: nil, - }, - setupMock: nil, - expectedState: StateHealthy, - hasHealthChecker: true, - expectSuccess: false, - }, - { - name: "no health checker", - endpoint: &Endpoint{ - URL: "http://localhost:8548", - State: StateHealthy, - Metrics: &EndpointMetrics{}, - LastUsed: time.Now(), - Client: &MockClient{healthy: true}, - }, - setupMock: nil, - expectedState: StateHealthy, - hasHealthChecker: false, - expectSuccess: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - monitor, _, _ := setupTestHealthMonitor(t) - - if tt.hasHealthChecker { - checker := &MockHealthChecker{} - if tt.setupMock != nil && tt.endpoint.Client != nil { - tt.setupMock(checker, tt.endpoint) - } - monitor.SetHealthChecker(checker) - } - - ctx := context.Background() - monitor.checkEndpointHealth(ctx, tt.endpoint) - - assert.Equal(t, tt.expectedState, tt.endpoint.State) - - if tt.hasHealthChecker && tt.endpoint.Client != nil && tt.setupMock != nil { - if tt.expectSuccess { - assert.Greater(t, tt.endpoint.Metrics.SuccessfulRequests, uint64(0)) - assert.Equal(t, uint64(0), tt.endpoint.Metrics.FailedRequests) - } else { - assert.Equal(t, uint64(0), tt.endpoint.Metrics.SuccessfulRequests) - assert.Greater(t, tt.endpoint.Metrics.FailedRequests, uint64(0)) - } - - // Verify mock expectations were called - if tt.hasHealthChecker { - checker := monitor.healthChecker.(*MockHealthChecker) - checker.AssertExpectations(t) - } - } - }) - } -} - -func TestHealthMonitor_handleExcludedEndpointCheck(t *testing.T) { - tests := []struct { - name string - success bool - timeSinceExcl time.Duration - expectedState EndpointState - shouldRecover bool - }{ - { - name: "successful recovery after interval", - success: true, - timeSinceExcl: 3 * time.Second, - expectedState: StateDegraded, - shouldRecover: true, - }, - { - name: "failed recovery after interval", - success: false, - timeSinceExcl: 3 * time.Second, - expectedState: StateExcluded, - shouldRecover: false, - }, - { - name: "too soon for recovery", - success: true, - timeSinceExcl: 1 * time.Second, - expectedState: StateExcluded, - shouldRecover: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - monitor, _, _ := setupTestHealthMonitor(t) - - endpoint := &Endpoint{ - URL: "http://localhost:8545", - State: StateExcluded, - Metrics: &EndpointMetrics{}, - ExcludedAt: time.Now().Add(-tt.timeSinceExcl), - } - - var err error - if !tt.success { - err = errors.New("connection failed") - } - - monitor.handleExcludedEndpointCheck(endpoint, tt.success, 100*time.Millisecond, err) - - assert.Equal(t, tt.expectedState, endpoint.State) - - if tt.shouldRecover && tt.success { - assert.Equal(t, float64(70.0), endpoint.Metrics.HealthScore) - } - }) - } -} - -func TestHealthMonitor_GetHealthStatus(t *testing.T) { - monitor, manager, _ := setupTestHealthMonitor(t) - - // Create test endpoints with different states - endpoints := []*Endpoint{ - { - URL: "http://localhost:8545", - State: StateHealthy, - Metrics: &EndpointMetrics{HealthScore: 95.0}, - LastUsed: time.Now(), - }, - { - URL: "http://localhost:8546", - State: StateDegraded, - Metrics: &EndpointMetrics{HealthScore: 60.0}, - LastUsed: time.Now(), - }, - { - URL: "http://localhost:8547", - State: StateUnhealthy, - Metrics: &EndpointMetrics{HealthScore: 30.0, LastError: errors.New("connection timeout")}, - LastUsed: time.Now(), - }, - { - URL: "http://localhost:8548", - State: StateExcluded, - Metrics: &EndpointMetrics{HealthScore: 0.0}, - LastUsed: time.Now(), - ExcludedAt: time.Now(), - }, - } - - manager.endpoints = endpoints - - status := monitor.GetHealthStatus() - - assert.NotNil(t, status) - assert.Equal(t, "test-chain", status.ChainID) - assert.Equal(t, 4, status.TotalEndpoints) - assert.Equal(t, 1, status.HealthyCount) - assert.Equal(t, 1, status.DegradedCount) - assert.Equal(t, 1, status.UnhealthyCount) - assert.Equal(t, 1, status.ExcludedCount) - assert.Equal(t, "round-robin", status.Strategy) - assert.Len(t, status.Endpoints, 4) - - // Verify endpoint statuses - assert.Equal(t, "healthy", status.Endpoints[0].State) - assert.Equal(t, float64(95.0), status.Endpoints[0].HealthScore) - - assert.Equal(t, "degraded", status.Endpoints[1].State) - assert.Equal(t, float64(60.0), status.Endpoints[1].HealthScore) - - assert.Equal(t, "unhealthy", status.Endpoints[2].State) - assert.Equal(t, "connection timeout", status.Endpoints[2].LastError) - - assert.Equal(t, "excluded", status.Endpoints[3].State) -} - -func TestHealthMonitor_contextCancellation(t *testing.T) { - monitor, _, _ := setupTestHealthMonitor(t) - - ctx, cancel := context.WithCancel(context.Background()) - var wg sync.WaitGroup - - // Start the monitor - wg.Add(1) - go monitor.Start(ctx, &wg) - - // Give it time to start - time.Sleep(100 * time.Millisecond) - - // Cancel context - cancel() - - // Wait for goroutine to finish - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - select { - case <-done: - // Success - case <-time.After(2 * time.Second): - t.Error("Monitor did not stop on context cancellation") - } -} - -func TestHealthMonitor_defaultConfigurations(t *testing.T) { - // Test with zero/negative config values to ensure defaults are used - cfg := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 0, - RequestTimeoutSeconds: -1, - RecoveryIntervalSeconds: 0, - } - - manager := &MockManager{ - chainID: "test-chain", - endpoints: []*Endpoint{}, - selector: NewEndpointSelector(StrategyRoundRobin), - } - - // We need to create the actual Manager for NewHealthMonitor - realManager := &Manager{ - chainID: "test-chain", - endpoints: manager.endpoints, - selector: manager.selector, - logger: zerolog.Nop(), - config: &config.RPCPoolConfig{ - UnhealthyThreshold: 3, - }, - } - - monitor := NewHealthMonitor(realManager, cfg, zerolog.Nop()) - - // Create an endpoint for testing - endpoint := &Endpoint{ - URL: "http://localhost:8545", - State: StateExcluded, - Metrics: &EndpointMetrics{}, - ExcludedAt: time.Now().Add(-6 * time.Minute), // Excluded 6 minutes ago - Client: &MockClient{healthy: true}, - } - - // Setup health checker - checker := &MockHealthChecker{} - checker.On("CheckHealth", mock.Anything, endpoint.Client).Return(nil) - monitor.SetHealthChecker(checker) - - // Test that defaults are applied (recovery after 5 minutes) - monitor.handleExcludedEndpointCheck(endpoint, true, 100*time.Millisecond, nil) - - // Should recover since 6 minutes > default 5 minutes - assert.Equal(t, StateDegraded, endpoint.State) -} - -func TestHealthMonitor_immediateHealthCheck(t *testing.T) { - monitor, manager, _ := setupTestHealthMonitor(t) - - // Create test endpoint - endpoint := &Endpoint{ - URL: "http://localhost:8545", - State: StateHealthy, - Metrics: &EndpointMetrics{}, - LastUsed: time.Now(), - Client: &MockClient{healthy: true}, - } - - manager.endpoints = []*Endpoint{endpoint} - - // Setup health checker - checkCount := 0 - checker := &MockHealthChecker{} - checker.On("CheckHealth", mock.Anything, endpoint.Client).Return(nil).Run(func(args mock.Arguments) { - checkCount++ - }) - monitor.SetHealthChecker(checker) - - ctx := context.Background() - var wg sync.WaitGroup - - // Start the monitor - wg.Add(1) - go func() { - defer wg.Done() - // Run for a short time - ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond) - defer cancel() - - var innerWg sync.WaitGroup - innerWg.Add(1) - go monitor.Start(ctx, &innerWg) - innerWg.Wait() - }() - - // Wait for completion - wg.Wait() - - // Should have performed at least one immediate health check - assert.GreaterOrEqual(t, checkCount, 1) -} - -// Ensure MockHealthChecker implements HealthChecker interface -var _ HealthChecker = (*MockHealthChecker)(nil) \ No newline at end of file diff --git a/universalClient/rpcpool/interfaces.go b/universalClient/rpcpool/interfaces.go deleted file mode 100644 index 7cd86081..00000000 --- a/universalClient/rpcpool/interfaces.go +++ /dev/null @@ -1,25 +0,0 @@ -package rpcpool - -import ( - "context" -) - -// Client defines a generic interface for RPC clients that can be used in the pool -// Both EVM (*ethclient.Client) and SVM (*rpc.Client) clients implement this through adapters -type Client interface { - // Ping performs a basic health check on the client - Ping(ctx context.Context) error - - // Close closes the client connection - Close() error -} - -// ClientFactory creates chain-specific clients for a given URL -// This function is provided by each chain implementation (EVM, SVM) to create their specific client types -type ClientFactory func(url string) (Client, error) - -// HealthChecker defines the interface for checking endpoint health -// Each chain type (EVM, SVM) implements this with chain-specific logic -type HealthChecker interface { - CheckHealth(ctx context.Context, client Client) error -} \ No newline at end of file diff --git a/universalClient/rpcpool/manager.go b/universalClient/rpcpool/manager.go deleted file mode 100644 index 99d2ec3a..00000000 --- a/universalClient/rpcpool/manager.go +++ /dev/null @@ -1,276 +0,0 @@ -package rpcpool - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/rs/zerolog" - "github.com/pushchain/push-chain-node/universalClient/config" -) - -// Manager manages a pool of RPC endpoints with load balancing and health checking -type Manager struct { - chainID string - endpoints []*Endpoint - selector *EndpointSelector - config *config.RPCPoolConfig - logger zerolog.Logger - HealthMonitor *HealthMonitor // Exported for external access - clientFactory ClientFactory // Function to create client for URL - stopCh chan struct{} - wg sync.WaitGroup - mu sync.RWMutex -} - -// NewManager creates a new RPC pool manager -func NewManager( - chainID string, - urls []string, - poolConfig *config.RPCPoolConfig, - clientFactory ClientFactory, - logger zerolog.Logger, -) *Manager { - if len(urls) == 0 { - logger.Warn().Str("chain_id", chainID).Msg("no RPC URLs provided for pool") - return nil - } - - // Create endpoints - endpoints := make([]*Endpoint, len(urls)) - for i, url := range urls { - endpoints[i] = NewEndpoint(url) - } - - strategy := LoadBalancingStrategy(poolConfig.LoadBalancingStrategy) - selector := NewEndpointSelector(strategy) - - manager := &Manager{ - chainID: chainID, - endpoints: endpoints, - selector: selector, - config: poolConfig, - logger: logger.With().Str("component", "rpc_pool").Str("chain_id", chainID).Logger(), - clientFactory: clientFactory, - stopCh: make(chan struct{}), - } - - // Create health monitor - manager.HealthMonitor = NewHealthMonitor(manager, poolConfig, logger) - - return manager -} - -// Start initializes all endpoints and starts health monitoring -func (m *Manager) Start(ctx context.Context) error { - m.logger.Info(). - Int("endpoint_count", len(m.endpoints)). - Str("strategy", string(m.selector.GetStrategy())). - Msg("starting RPC pool manager") - - // Initialize all endpoints - var initErrors []error - for _, endpoint := range m.endpoints { - if err := m.initializeEndpoint(ctx, endpoint); err != nil { - m.logger.Warn(). - Str("url", endpoint.URL). - Err(err). - Msg("failed to initialize endpoint") - endpoint.UpdateState(StateUnhealthy) - initErrors = append(initErrors, err) - } - } - - // Check if we have enough healthy endpoints - healthyCount := m.getHealthyEndpointCount() - if healthyCount < m.config.MinHealthyEndpoints { - return fmt.Errorf("insufficient healthy endpoints: %d/%d (minimum: %d)", - healthyCount, len(m.endpoints), m.config.MinHealthyEndpoints) - } - - // Start health monitoring - m.wg.Add(1) - go m.HealthMonitor.Start(ctx, &m.wg) - - m.logger.Info(). - Int("healthy_endpoints", healthyCount). - Int("total_endpoints", len(m.endpoints)). - Msg("RPC pool manager started") - - return nil -} - -// Stop stops the pool manager and health monitoring -func (m *Manager) Stop() { - m.logger.Info().Msg("stopping RPC pool manager") - - // Stop the health monitor first - if m.HealthMonitor != nil { - m.HealthMonitor.Stop() - } - - close(m.stopCh) - m.wg.Wait() - - // Close all client connections - for _, endpoint := range m.endpoints { - if client := endpoint.GetClient(); client != nil { - if err := client.Close(); err != nil { - m.logger.Warn(). - Str("url", endpoint.URL). - Err(err). - Msg("failed to close client connection") - } - } - } - - m.logger.Info().Msg("RPC pool manager stopped") -} - -// initializeEndpoint creates and initializes the client for an endpoint -func (m *Manager) initializeEndpoint(ctx context.Context, endpoint *Endpoint) error { - client, err := m.clientFactory(endpoint.URL) - if err != nil { - return fmt.Errorf("failed to create client for %s: %w", endpoint.URL, err) - } - - endpoint.SetClient(client) - endpoint.UpdateState(StateHealthy) - - m.logger.Info(). - Str("url", endpoint.URL). - Msg("endpoint initialized successfully") - - return nil -} - -// SelectEndpoint selects an available endpoint based on the configured strategy -func (m *Manager) SelectEndpoint() (*Endpoint, error) { - healthyEndpoints := m.getHealthyEndpoints() - - if len(healthyEndpoints) == 0 { - return nil, fmt.Errorf("no healthy endpoints available") - } - - selected := m.selector.SelectEndpoint(healthyEndpoints) - if selected == nil { - return nil, fmt.Errorf("failed to select endpoint") - } - - // Update last used time - selected.mu.Lock() - selected.LastUsed = time.Now() - selected.mu.Unlock() - - return selected, nil -} - -// getHealthyEndpoints returns all endpoints that can serve requests -func (m *Manager) getHealthyEndpoints() []*Endpoint { - m.mu.RLock() - defer m.mu.RUnlock() - - healthy := make([]*Endpoint, 0, len(m.endpoints)) - for _, endpoint := range m.endpoints { - if endpoint.IsHealthy() { - healthy = append(healthy, endpoint) - } - } - return healthy -} - -// GetHealthyEndpointCount returns the count of healthy endpoints -func (m *Manager) GetHealthyEndpointCount() int { - return len(m.getHealthyEndpoints()) -} - -// getHealthyEndpointCount returns the count of healthy endpoints (deprecated: use GetHealthyEndpointCount) -func (m *Manager) getHealthyEndpointCount() int { - return m.GetHealthyEndpointCount() -} - -// UpdateEndpointMetrics updates metrics for an endpoint after a request -func (m *Manager) UpdateEndpointMetrics(endpoint *Endpoint, success bool, latency time.Duration, err error) { - if success { - endpoint.Metrics.UpdateSuccess(latency) - - // Potentially upgrade state if it was degraded - if endpoint.GetState() == StateDegraded { - // If we have a good success rate now, upgrade to healthy - if endpoint.Metrics.GetSuccessRate() > 0.8 { - endpoint.UpdateState(StateHealthy) - m.logger.Info(). - Str("url", endpoint.URL). - Float64("success_rate", endpoint.Metrics.GetSuccessRate()). - Msg("endpoint promoted to healthy") - } - } - } else { - endpoint.Metrics.UpdateFailure(err, latency) - - // Check if we should downgrade the endpoint state - consecutiveFailures := endpoint.Metrics.GetConsecutiveFailures() - - if consecutiveFailures >= m.config.UnhealthyThreshold { - // Mark as excluded - endpoint.UpdateState(StateExcluded) - m.logger.Warn(). - Str("url", endpoint.URL). - Int("consecutive_failures", consecutiveFailures). - Err(err). - Msg("endpoint excluded due to consecutive failures") - } else if endpoint.Metrics.GetSuccessRate() < 0.5 && endpoint.GetState() == StateHealthy { - // Downgrade to degraded - endpoint.UpdateState(StateDegraded) - m.logger.Warn(). - Str("url", endpoint.URL). - Float64("success_rate", endpoint.Metrics.GetSuccessRate()). - Msg("endpoint downgraded to degraded") - } - } -} - -// GetEndpointStats returns statistics about all endpoints -func (m *Manager) GetEndpointStats() *EndpointStats { - m.mu.RLock() - defer m.mu.RUnlock() - - endpoints := make([]EndpointInfo, len(m.endpoints)) - - for i, endpoint := range m.endpoints { - endpoints[i] = EndpointInfo{ - URL: endpoint.URL, - State: endpoint.GetState().String(), - HealthScore: endpoint.Metrics.GetHealthScore(), - LastUsed: endpoint.LastUsed, - RequestCount: endpoint.Metrics.TotalRequests, - FailureCount: endpoint.Metrics.FailedRequests, - TotalLatency: endpoint.Metrics.AverageLatency.Milliseconds() * int64(endpoint.Metrics.TotalRequests), - AverageLatency: float64(endpoint.Metrics.AverageLatency.Milliseconds()), - } - } - - return &EndpointStats{ - ChainID: m.chainID, - TotalEndpoints: len(m.endpoints), - Strategy: string(m.selector.GetStrategy()), - Endpoints: endpoints, - } -} - -// GetEndpoints returns all endpoints (for health monitor access) -func (m *Manager) GetEndpoints() []*Endpoint { - m.mu.RLock() - defer m.mu.RUnlock() - - // Return a copy to prevent external modification - endpoints := make([]*Endpoint, len(m.endpoints)) - copy(endpoints, m.endpoints) - return endpoints -} - -// GetConfig returns the pool configuration (for health monitor access) -func (m *Manager) GetConfig() *config.RPCPoolConfig { - return m.config -} \ No newline at end of file diff --git a/universalClient/rpcpool/manager_test.go b/universalClient/rpcpool/manager_test.go deleted file mode 100644 index faa5f64f..00000000 --- a/universalClient/rpcpool/manager_test.go +++ /dev/null @@ -1,318 +0,0 @@ -package rpcpool - -import ( - "context" - "testing" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/pushchain/push-chain-node/universalClient/config" -) - -// mockClientFactory creates mock clients for testing -func mockClientFactory(shouldFail bool) ClientFactory { - return func(url string) (Client, error) { - if shouldFail { - return nil, assert.AnError - } - return &mockClient{}, nil - } -} - -func TestNewManager(t *testing.T) { - logger := zerolog.Nop() - poolConfig := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 30, - UnhealthyThreshold: 3, - RecoveryIntervalSeconds: 300, - MinHealthyEndpoints: 1, - RequestTimeoutSeconds: 10, - LoadBalancingStrategy: "round-robin", - } - - tests := []struct { - name string - chainID string - urls []string - expectedNil bool - }{ - { - name: "valid configuration", - chainID: "eip155:1", - urls: []string{"http://test1.com", "http://test2.com"}, - expectedNil: false, - }, - { - name: "empty URLs returns nil", - chainID: "eip155:1", - urls: []string{}, - expectedNil: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - manager := NewManager( - tt.chainID, - tt.urls, - poolConfig, - mockClientFactory(false), - logger, - ) - - if tt.expectedNil { - assert.Nil(t, manager) - } else { - assert.NotNil(t, manager) - assert.Equal(t, tt.chainID, manager.chainID) - assert.Len(t, manager.endpoints, len(tt.urls)) - assert.NotNil(t, manager.HealthMonitor) - } - }) - } -} - -func TestManager_Start_Success(t *testing.T) { - logger := zerolog.Nop() - poolConfig := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 30, - UnhealthyThreshold: 3, - RecoveryIntervalSeconds: 300, - MinHealthyEndpoints: 1, - RequestTimeoutSeconds: 10, - LoadBalancingStrategy: "round-robin", - } - - manager := NewManager( - "eip155:1", - []string{"http://test1.com", "http://test2.com"}, - poolConfig, - mockClientFactory(false), - logger, - ) - require.NotNil(t, manager) - - ctx := context.Background() - err := manager.Start(ctx) - assert.NoError(t, err) - - // Verify healthy endpoints - assert.Equal(t, 2, manager.GetHealthyEndpointCount()) - - // Clean up - manager.Stop() -} - -func TestManager_Start_InsufficientHealthyEndpoints(t *testing.T) { - logger := zerolog.Nop() - poolConfig := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 30, - UnhealthyThreshold: 3, - RecoveryIntervalSeconds: 300, - MinHealthyEndpoints: 2, // Require 2 healthy endpoints - RequestTimeoutSeconds: 10, - LoadBalancingStrategy: "round-robin", - } - - manager := NewManager( - "eip155:1", - []string{"http://test1.com", "http://test2.com"}, - poolConfig, - mockClientFactory(true), // All clients fail to initialize - logger, - ) - require.NotNil(t, manager) - - ctx := context.Background() - err := manager.Start(ctx) - assert.Error(t, err) - assert.Contains(t, err.Error(), "insufficient healthy endpoints") -} - -func TestManager_SelectEndpoint(t *testing.T) { - logger := zerolog.Nop() - poolConfig := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 30, - UnhealthyThreshold: 3, - RecoveryIntervalSeconds: 300, - MinHealthyEndpoints: 1, - RequestTimeoutSeconds: 10, - LoadBalancingStrategy: "round-robin", - } - - manager := NewManager( - "eip155:1", - []string{"http://test1.com", "http://test2.com"}, - poolConfig, - mockClientFactory(false), - logger, - ) - require.NotNil(t, manager) - - ctx := context.Background() - err := manager.Start(ctx) - require.NoError(t, err) - defer manager.Stop() - - // Test endpoint selection - endpoint, err := manager.SelectEndpoint() - assert.NoError(t, err) - assert.NotNil(t, endpoint) - assert.Contains(t, []string{"http://test1.com", "http://test2.com"}, endpoint.URL) - - // Verify last used time is set - assert.True(t, time.Since(endpoint.LastUsed) < time.Second) -} - -func TestManager_SelectEndpoint_NoHealthyEndpoints(t *testing.T) { - logger := zerolog.Nop() - poolConfig := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 30, - UnhealthyThreshold: 3, - RecoveryIntervalSeconds: 300, - MinHealthyEndpoints: 1, - RequestTimeoutSeconds: 10, - LoadBalancingStrategy: "round-robin", - } - - manager := NewManager( - "eip155:1", - []string{"http://test1.com"}, - poolConfig, - mockClientFactory(false), - logger, - ) - require.NotNil(t, manager) - - ctx := context.Background() - err := manager.Start(ctx) - require.NoError(t, err) - defer manager.Stop() - - // Mark the only endpoint as excluded - manager.endpoints[0].UpdateState(StateExcluded) - - // Should fail to select endpoint - endpoint, err := manager.SelectEndpoint() - assert.Error(t, err) - assert.Nil(t, endpoint) - assert.Contains(t, err.Error(), "no healthy endpoints available") -} - -func TestManager_UpdateEndpointMetrics(t *testing.T) { - logger := zerolog.Nop() - poolConfig := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 30, - UnhealthyThreshold: 3, - RecoveryIntervalSeconds: 300, - MinHealthyEndpoints: 1, - RequestTimeoutSeconds: 10, - LoadBalancingStrategy: "round-robin", - } - - manager := NewManager( - "eip155:1", - []string{"http://test1.com"}, - poolConfig, - mockClientFactory(false), - logger, - ) - require.NotNil(t, manager) - - ctx := context.Background() - err := manager.Start(ctx) - require.NoError(t, err) - defer manager.Stop() - - endpoint := manager.endpoints[0] - latency := 50 * time.Millisecond - - // Test successful request - manager.UpdateEndpointMetrics(endpoint, true, latency, nil) - assert.Equal(t, uint64(1), endpoint.Metrics.TotalRequests) - assert.Equal(t, uint64(1), endpoint.Metrics.SuccessfulRequests) - - // Test failed request that should lead to exclusion after threshold - for i := 0; i < 3; i++ { // UnhealthyThreshold is 3 - manager.UpdateEndpointMetrics(endpoint, false, latency, assert.AnError) - } - - // Endpoint should be excluded due to consecutive failures - assert.Equal(t, StateExcluded, endpoint.GetState()) -} - -func TestManager_GetEndpointStats(t *testing.T) { - logger := zerolog.Nop() - poolConfig := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 30, - UnhealthyThreshold: 3, - RecoveryIntervalSeconds: 300, - MinHealthyEndpoints: 1, - RequestTimeoutSeconds: 10, - LoadBalancingStrategy: "round-robin", - } - - manager := NewManager( - "eip155:1", - []string{"http://test1.com", "http://test2.com"}, - poolConfig, - mockClientFactory(false), - logger, - ) - require.NotNil(t, manager) - - ctx := context.Background() - err := manager.Start(ctx) - require.NoError(t, err) - defer manager.Stop() - - stats := manager.GetEndpointStats() - - assert.Equal(t, "round-robin", stats.Strategy) - assert.Equal(t, 2, stats.TotalEndpoints) - - endpoints := stats.Endpoints - assert.Len(t, endpoints, 2) - - endpoint1 := endpoints[0] - assert.Contains(t, []string{"http://test1.com", "http://test2.com"}, endpoint1.URL) - assert.Equal(t, "healthy", endpoint1.State) -} - -func TestManager_Stop(t *testing.T) { - logger := zerolog.Nop() - poolConfig := &config.RPCPoolConfig{ - HealthCheckIntervalSeconds: 30, - UnhealthyThreshold: 3, - RecoveryIntervalSeconds: 300, - MinHealthyEndpoints: 1, - RequestTimeoutSeconds: 10, - LoadBalancingStrategy: "round-robin", - } - - manager := NewManager( - "eip155:1", - []string{"http://test1.com"}, - poolConfig, - mockClientFactory(false), - logger, - ) - require.NotNil(t, manager) - - ctx := context.Background() - err := manager.Start(ctx) - require.NoError(t, err) - - // Verify client is not closed initially - client := manager.endpoints[0].GetClient().(*mockClient) - assert.False(t, client.closed) - - // Stop the manager - manager.Stop() - - // Verify client is closed - assert.True(t, client.closed) -} \ No newline at end of file diff --git a/universalClient/rpcpool/strategies.go b/universalClient/rpcpool/strategies.go deleted file mode 100644 index 34df47bc..00000000 --- a/universalClient/rpcpool/strategies.go +++ /dev/null @@ -1,95 +0,0 @@ -package rpcpool - -import ( - "math/rand" - "sync/atomic" -) - -// LoadBalancingStrategy defines how requests are distributed across endpoints -type LoadBalancingStrategy string - -const ( - StrategyRoundRobin LoadBalancingStrategy = "round-robin" - StrategyWeighted LoadBalancingStrategy = "weighted" -) - -// EndpointSelector handles endpoint selection based on different strategies -type EndpointSelector struct { - strategy LoadBalancingStrategy - currentIndex atomic.Uint32 -} - -// NewEndpointSelector creates a new endpoint selector with the specified strategy -func NewEndpointSelector(strategy LoadBalancingStrategy) *EndpointSelector { - if strategy != StrategyRoundRobin && strategy != StrategyWeighted { - strategy = StrategyRoundRobin - } - - return &EndpointSelector{ - strategy: strategy, - } -} - -// SelectEndpoint selects an endpoint from the healthy endpoints based on the configured strategy -func (s *EndpointSelector) SelectEndpoint(healthyEndpoints []*Endpoint) *Endpoint { - if len(healthyEndpoints) == 0 { - return nil - } - - switch s.strategy { - case StrategyWeighted: - return s.selectWeighted(healthyEndpoints) - case StrategyRoundRobin: - fallthrough - default: - return s.selectRoundRobin(healthyEndpoints) - } -} - -// selectRoundRobin implements round-robin selection -func (s *EndpointSelector) selectRoundRobin(endpoints []*Endpoint) *Endpoint { - if len(endpoints) == 1 { - return endpoints[0] - } - - index := s.currentIndex.Add(1) % uint32(len(endpoints)) - return endpoints[index] -} - -// selectWeighted implements weighted selection based on health scores -func (s *EndpointSelector) selectWeighted(endpoints []*Endpoint) *Endpoint { - if len(endpoints) == 1 { - return endpoints[0] - } - - // Calculate total weight (sum of health scores) - totalWeight := 0.0 - for _, endpoint := range endpoints { - totalWeight += endpoint.Metrics.GetHealthScore() - } - - if totalWeight == 0 { - // If all endpoints have zero health score, fall back to round-robin - return s.selectRoundRobin(endpoints) - } - - // Generate random number between 0 and totalWeight - target := rand.Float64() * totalWeight - - // Select endpoint based on weight - currentWeight := 0.0 - for _, endpoint := range endpoints { - currentWeight += endpoint.Metrics.GetHealthScore() - if currentWeight >= target { - return endpoint - } - } - - // Fallback to last endpoint (shouldn't happen) - return endpoints[len(endpoints)-1] -} - -// GetStrategy returns the current strategy -func (s *EndpointSelector) GetStrategy() LoadBalancingStrategy { - return s.strategy -} \ No newline at end of file diff --git a/universalClient/rpcpool/strategies_test.go b/universalClient/rpcpool/strategies_test.go deleted file mode 100644 index 7fff5ab3..00000000 --- a/universalClient/rpcpool/strategies_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package rpcpool - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewEndpointSelector(t *testing.T) { - tests := []struct { - name string - strategy LoadBalancingStrategy - expected LoadBalancingStrategy - }{ - { - name: "round robin strategy", - strategy: StrategyRoundRobin, - expected: StrategyRoundRobin, - }, - { - name: "weighted strategy", - strategy: StrategyWeighted, - expected: StrategyWeighted, - }, - { - name: "invalid strategy defaults to round robin", - strategy: "invalid", - expected: StrategyRoundRobin, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - selector := NewEndpointSelector(tt.strategy) - assert.Equal(t, tt.expected, selector.GetStrategy()) - }) - } -} - -func TestEndpointSelector_SelectEndpoint_Empty(t *testing.T) { - selector := NewEndpointSelector(StrategyRoundRobin) - endpoints := []*Endpoint{} - - selected := selector.SelectEndpoint(endpoints) - assert.Nil(t, selected) -} - -func TestEndpointSelector_SelectEndpoint_Single(t *testing.T) { - selector := NewEndpointSelector(StrategyRoundRobin) - endpoint := NewEndpoint("http://test1.com") - endpoints := []*Endpoint{endpoint} - - selected := selector.SelectEndpoint(endpoints) - assert.Equal(t, endpoint, selected) -} - -func TestEndpointSelector_RoundRobin(t *testing.T) { - selector := NewEndpointSelector(StrategyRoundRobin) - - endpoint1 := NewEndpoint("http://test1.com") - endpoint2 := NewEndpoint("http://test2.com") - endpoint3 := NewEndpoint("http://test3.com") - endpoints := []*Endpoint{endpoint1, endpoint2, endpoint3} - - // Test round robin distribution - selections := make(map[*Endpoint]int) - for i := 0; i < 9; i++ { // 3 complete cycles - selected := selector.SelectEndpoint(endpoints) - selections[selected]++ - } - - // Each endpoint should be selected 3 times - assert.Equal(t, 3, selections[endpoint1]) - assert.Equal(t, 3, selections[endpoint2]) - assert.Equal(t, 3, selections[endpoint3]) -} - -func TestEndpointSelector_WeightedSelection(t *testing.T) { - selector := NewEndpointSelector(StrategyWeighted) - - // Create endpoints with different health scores - endpoint1 := NewEndpoint("http://test1.com") - endpoint1.Metrics.HealthScore = 100.0 // Highest score - - endpoint2 := NewEndpoint("http://test2.com") - endpoint2.Metrics.HealthScore = 50.0 - - endpoint3 := NewEndpoint("http://test3.com") - endpoint3.Metrics.HealthScore = 25.0 // Lowest score - - endpoints := []*Endpoint{endpoint1, endpoint2, endpoint3} - - // Test weighted distribution over many selections - selections := make(map[*Endpoint]int) - iterations := 1000 - - for i := 0; i < iterations; i++ { - selected := selector.SelectEndpoint(endpoints) - selections[selected]++ - } - - // endpoint1 should be selected most often (highest weight) - assert.Greater(t, selections[endpoint1], selections[endpoint2]) - assert.Greater(t, selections[endpoint2], selections[endpoint3]) - - // All endpoints should be selected at least once - assert.Greater(t, selections[endpoint1], 0) - assert.Greater(t, selections[endpoint2], 0) - assert.Greater(t, selections[endpoint3], 0) -} - -func TestEndpointSelector_WeightedSelection_ZeroHealthScore(t *testing.T) { - selector := NewEndpointSelector(StrategyWeighted) - - // Create endpoints with zero health scores - endpoint1 := NewEndpoint("http://test1.com") - endpoint1.Metrics.HealthScore = 0.0 - - endpoint2 := NewEndpoint("http://test2.com") - endpoint2.Metrics.HealthScore = 0.0 - - endpoints := []*Endpoint{endpoint1, endpoint2} - - // Should fall back to round robin when all health scores are zero - selections := make(map[*Endpoint]int) - for i := 0; i < 10; i++ { - selected := selector.SelectEndpoint(endpoints) - selections[selected]++ - } - - // Both endpoints should be selected equally (round robin fallback) - assert.Equal(t, 5, selections[endpoint1]) - assert.Equal(t, 5, selections[endpoint2]) -} - -func TestEndpointSelector_WeightedSelection_SingleEndpoint(t *testing.T) { - selector := NewEndpointSelector(StrategyWeighted) - - endpoint := NewEndpoint("http://test.com") - endpoint.Metrics.HealthScore = 75.0 - endpoints := []*Endpoint{endpoint} - - // Should always return the single endpoint - for i := 0; i < 5; i++ { - selected := selector.SelectEndpoint(endpoints) - assert.Equal(t, endpoint, selected) - } -} - -func TestLoadBalancingStrategy_String(t *testing.T) { - assert.Equal(t, "round-robin", string(StrategyRoundRobin)) - assert.Equal(t, "weighted", string(StrategyWeighted)) -} \ No newline at end of file diff --git a/universalClient/rpcpool/types.go b/universalClient/rpcpool/types.go deleted file mode 100644 index f6a61a0d..00000000 --- a/universalClient/rpcpool/types.go +++ /dev/null @@ -1,45 +0,0 @@ -package rpcpool - -import "time" - -// HealthStatus represents the health status of the RPC pool -type HealthStatus struct { - ChainID string `json:"chain_id"` - TotalEndpoints int `json:"total_endpoints"` - HealthyCount int `json:"healthy_count"` - UnhealthyCount int `json:"unhealthy_count"` - DegradedCount int `json:"degraded_count"` - ExcludedCount int `json:"excluded_count"` - Strategy string `json:"strategy"` - Endpoints []EndpointStatus `json:"endpoints"` -} - -// EndpointStatus represents the status of a single endpoint -type EndpointStatus struct { - URL string `json:"url"` - State string `json:"state"` - HealthScore float64 `json:"health_score"` - ResponseTime int64 `json:"response_time_ms"` - LastChecked time.Time `json:"last_checked"` - LastError string `json:"last_error,omitempty"` -} - -// EndpointStats represents statistics for endpoints -type EndpointStats struct { - ChainID string `json:"chain_id"` - TotalEndpoints int `json:"total_endpoints"` - Strategy string `json:"strategy"` - Endpoints []EndpointInfo `json:"endpoints"` -} - -// EndpointInfo represents information about a single endpoint -type EndpointInfo struct { - URL string `json:"url"` - State string `json:"state"` - HealthScore float64 `json:"health_score"` - LastUsed time.Time `json:"last_used"` - RequestCount uint64 `json:"request_count"` - FailureCount uint64 `json:"failure_count"` - TotalLatency int64 `json:"total_latency_ms"` - AverageLatency float64 `json:"average_latency_ms"` -} \ No newline at end of file diff --git a/universalClient/store/models.go b/universalClient/store/models.go index 3400b520..42893e17 100644 --- a/universalClient/store/models.go +++ b/universalClient/store/models.go @@ -1,66 +1,71 @@ // Package store contains GORM-backed SQLite models used by the Universal Validator. +// +// Database Structure (database file: chain_data.db): +// +// chains/ +// ├── push/ +// │ └── chain_data.db +// │ ├── chain_states +// │ └── events +// └── {external_chain_caip_format}/ +// └── chain_data.db +// ├── chain_states +// ├── chain_transactions +// └── gas_vote_transactions package store import ( "gorm.io/gorm" ) -// ChainState tracks the state for the chain this database belongs to. -// Since each chain has its own database, there's only one row per database. +// ChainState tracks synchronization state for a chain. +// One record per database (each chain has its own DB). type ChainState struct { gorm.Model - LastBlock uint64 - // Can add more chain-specific state fields as needed (e.g., LastSync, Metadata) + LastBlock uint64 // Last processed block height } -// ChainTransaction tracks transactions for the chain this database belongs to. -// Since each chain has its own database, ChainID is not needed. +// ChainTransaction tracks inbound transaction events from external chains +// (Ethereum, Solana, etc.) that need processing and voting on Push chain. +// +// TODO: Rename to ECEvent (External Chain Event) and update table name to "events" type ChainTransaction struct { gorm.Model - TxHash string `gorm:"uniqueIndex:idx_tx_hash_log_index"` - LogIndex uint `gorm:"uniqueIndex:idx_tx_hash_log_index"` - BlockNumber uint64 - EventIdentifier string + TxHash string `gorm:"uniqueIndex:idx_tx_hash_log_index"` // Transaction hash from external chain + LogIndex uint `gorm:"uniqueIndex:idx_tx_hash_log_index"` // Log index within transaction + BlockNumber uint64 // Block number (or slot for Solana) on external chain + EventIdentifier string // Event type identifier Status string `gorm:"index"` // "confirmation_pending", "awaiting_vote", "confirmed", "failed", "reorged" - Confirmations uint64 - ConfirmationType string // "STANDARD" or "FAST" - which confirmation type this tx requires - Data []byte // Store raw event data - VoteTxHash string // Transaction hash of the vote on pchain + Confirmations uint64 // Number of block confirmations received + ConfirmationType string // "STANDARD" or "FAST" + Data []byte // Raw JSON-encoded event data + VoteTxHash string // Vote transaction hash on Push chain (empty until voted) } -// GasVoteTransaction tracks gas price votes for the chain this database belongs to. -// Since each chain has its own database, ChainID is not needed. -// Uses GORM's built-in CreatedAt/UpdatedAt for timestamp tracking. +// GasVoteTransaction tracks gas price votes sent to Push chain for an external chain. type GasVoteTransaction struct { gorm.Model - GasPrice uint64 `gorm:"not null"` // Gas price voted for (in wei) - VoteTxHash string `gorm:"index"` // On-chain vote transaction hash - Status string `gorm:"default:'success'"` - ErrorMsg string `gorm:"type:text"` // Error message if vote failed + GasPrice uint64 `gorm:"not null"` // Gas price voted for (wei for EVM chains, lamports for Solana chains) + VoteTxHash string `gorm:"index"` // Vote transaction hash on Push chain + Status string `gorm:"default:'success'"` // "success" or "failed" + ErrorMsg string `gorm:"type:text"` // Error message if vote failed } -// TSSEvent tracks TSS protocol events (KeyGen, KeyRefresh, Sign) from Push Chain. -type TSSEvent struct { +// PCEvent tracks Push Chain events (TSS protocol events: KeyGen, KeyRefresh, QuorumChange, Sign). +// Table name: "events" (PC_EVENTS) +type PCEvent struct { gorm.Model - EventID string `gorm:"uniqueIndex;not null"` // Unique identifier for the event - BlockNumber uint64 `gorm:"index;not null"` // Block number when event was detected - ProtocolType string // "keygen", "keyrefresh", or "sign" - Status string `gorm:"index;not null"` // "PENDING", "IN_PROGRESS", "SUCCESS" - ExpiryHeight uint64 `gorm:"index;not null"` // Block height when event expires - EventData []byte // Raw event data from chain - VoteTxHash string // Transaction hash of the vote on pchain - ErrorMsg string `gorm:"type:text"` // Error message if status is FAILED + EventID string `gorm:"uniqueIndex;not null"` // Unique event identifier + BlockHeight uint64 `gorm:"index;not null"` // Block height where event was detected + ExpiryBlockHeight uint64 `gorm:"index;not null"` // Block height when event expires + Type string // "KEYGEN", "KEYREFRESH", "QUORUM_CHANGE", or "SIGN" + Status string `gorm:"index;not null"` // "PENDING", "IN_PROGRESS", "BROADCASTED", "COMPLETED", "REVERTED" + EventData []byte // Raw JSON-encoded event data from chain + TxHash string // Transaction hash ( Voting Tx for "KEYGEN", "KEYREFRESH", "QUORUM_CHANGE", External Chain Broadcast Tx for "SIGN") + ErrorMsg string `gorm:"type:text"` // Error message if processing failed } -// ExternalChainSignature tracks signatures that need to be broadcasted to external chains. -// Created when a Sign protocol completes successfully. -// TODO: Finalize Structure -type ChainTSSTransaction struct { - gorm.Model - TSSEventID uint `gorm:"index;not null"` // Reference to TSSEvent - Status string `gorm:"index;not null"` // "PENDING" or "SUCCESS" (after broadcast) - Signature []byte `gorm:"not null"` // ECDSA signature (65 bytes: R(32) + S(32) + RecoveryID(1)) - MessageHash []byte `gorm:"not null"` // Message hash that was signed - BroadcastTxHash string `gorm:"index"` // Transaction hash after successful broadcast - ErrorMsg string `gorm:"type:text"` // Error message if broadcast failed +// TableName specifies the table name for PCEvent. +func (PCEvent) TableName() string { + return "events" } diff --git a/universalClient/tss/coordinator/coordinator.go b/universalClient/tss/coordinator/coordinator.go index a5bb2624..1751415a 100644 --- a/universalClient/tss/coordinator/coordinator.go +++ b/universalClient/tss/coordinator/coordinator.go @@ -2,7 +2,6 @@ package coordinator import ( "context" - "crypto/sha256" "encoding/json" "sort" "sync" @@ -13,10 +12,12 @@ import ( session "go-wrapper/go-dkls/sessions" + "github.com/pushchain/push-chain-node/universalClient/chains/common" "github.com/pushchain/push-chain-node/universalClient/pushcore" "github.com/pushchain/push-chain-node/universalClient/store" "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" "github.com/pushchain/push-chain-node/universalClient/tss/keyshare" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" "github.com/pushchain/push-chain-node/x/uvalidator/types" ) @@ -25,6 +26,7 @@ type Coordinator struct { eventStore *eventstore.Store pushCore *pushcore.Client keyshareManager *keyshare.Manager + txBuilderFactory common.OutboundTxBuilderFactory validatorAddress string coordinatorRange uint64 pollInterval time.Duration @@ -54,6 +56,7 @@ func NewCoordinator( eventStore *eventstore.Store, pushCore *pushcore.Client, keyshareManager *keyshare.Manager, + txBuilderFactory common.OutboundTxBuilderFactory, validatorAddress string, coordinatorRange uint64, pollInterval time.Duration, @@ -67,6 +70,7 @@ func NewCoordinator( eventStore: eventStore, pushCore: pushCore, keyshareManager: keyshareManager, + txBuilderFactory: txBuilderFactory, validatorAddress: validatorAddress, coordinatorRange: coordinatorRange, pollInterval: pollInterval, @@ -158,15 +162,15 @@ func (c *Coordinator) GetMultiAddrsFromPeerID(ctx context.Context, peerID string // GetLatestBlockNum gets the latest block number from pushCore. func (c *Coordinator) GetLatestBlockNum() (uint64, error) { - return c.pushCore.GetLatestBlockNum() + return c.pushCore.GetLatestBlock() } // IsPeerCoordinator checks if the given peerID is the coordinator for the current block. // Uses cached allValidators for performance. func (c *Coordinator) IsPeerCoordinator(ctx context.Context, peerID string) (bool, error) { - currentBlock, err := c.pushCore.GetLatestBlockNum() + currentBlock, err := c.pushCore.GetLatestBlock() if err != nil { - return false, errors.Wrap(err, "failed to get latest block number") + return false, errors.Wrap(err, "failed to get latest block") } // Use cached validators @@ -211,9 +215,16 @@ func (c *Coordinator) IsPeerCoordinator(ctx context.Context, peerID string) (boo return coordValidatorAddr == validatorAddress, nil } -// GetCurrentTSSKeyId gets the current TSS key ID from pushCore. -func (c *Coordinator) GetCurrentTSSKeyId() (string, error) { - return c.pushCore.GetCurrentTSSKeyId() +// GetCurrentTSSKey gets the current TSS key ID and public key from pushCore. +func (c *Coordinator) GetCurrentTSSKey() (string, string, error) { + key, err := c.pushCore.GetCurrentKey() + if err != nil { + return "", "", err + } + if key == nil { + return "", "", nil // No key exists + } + return key.KeyId, key.TssPubkey, nil } // GetEligibleUV returns eligible validators for the given protocol type. @@ -294,7 +305,7 @@ func (c *Coordinator) pollLoop(ctx context.Context) { // updateValidators fetches and caches all validators. func (c *Coordinator) updateValidators() { - allValidators, err := c.pushCore.GetUniversalValidators() + allValidators, err := c.pushCore.GetAllUniversalValidators() if err != nil { c.logger.Warn().Err(err).Msg("failed to update validators cache") return @@ -309,9 +320,9 @@ func (c *Coordinator) updateValidators() { // processPendingEvents checks if this node is coordinator, and only then reads DB and processes events. func (c *Coordinator) processPendingEvents(ctx context.Context) error { - currentBlock, err := c.pushCore.GetLatestBlockNum() + currentBlock, err := c.pushCore.GetLatestBlock() if err != nil { - return errors.Wrap(err, "failed to get latest block number") + return errors.Wrap(err, "failed to get latest block") } // Use cached validators (updated at polling interval) @@ -371,13 +382,13 @@ func (c *Coordinator) processPendingEvents(ctx context.Context) error { for _, event := range events { c.logger.Info(). Str("event_id", event.EventID). - Str("protocol_type", event.ProtocolType). - Uint64("block_number", event.BlockNumber). + Str("type", event.Type). + Uint64("block_height", event.BlockHeight). Msg("processing event as coordinator") // Get participants based on protocol type (using cached allValidators) - participants := getParticipantsForProtocol(event.ProtocolType, allValidators) + participants := getParticipantsForProtocol(event.Type, allValidators) if participants == nil { - c.logger.Debug().Str("event_id", event.EventID).Str("protocol_type", event.ProtocolType).Msg("unknown protocol type") + c.logger.Debug().Str("event_id", event.EventID).Str("type", event.Type).Msg("unknown protocol type") continue } if len(participants) == 0 { @@ -398,7 +409,7 @@ func (c *Coordinator) processPendingEvents(ctx context.Context) error { // processEventAsCoordinator processes a TSS event as the coordinator. // Creates setup message based on event type and sends to all participants. -func (c *Coordinator) processEventAsCoordinator(ctx context.Context, event store.TSSEvent, participants []*types.UniversalValidator) error { +func (c *Coordinator) processEventAsCoordinator(ctx context.Context, event store.PCEvent, participants []*types.UniversalValidator) error { // Sort participants by party ID for consistency sortedParticipants := make([]*types.UniversalValidator, len(participants)) copy(sortedParticipants, participants) @@ -427,17 +438,18 @@ func (c *Coordinator) processEventAsCoordinator(ctx context.Context, event store // Create setup message based on event type var setupData []byte + var signMetadata *SignMetadata var err error - switch event.ProtocolType { - case "keygen", "keyrefresh": + switch event.Type { + case string(ProtocolKeygen), string(ProtocolKeyrefresh): // Keygen and keyrefresh use the same setup structure setupData, err = c.createKeygenSetup(threshold, partyIDs) - case "quorumchange": + case string(ProtocolQuorumChange): setupData, err = c.createQcSetup(ctx, threshold, partyIDs, sortedParticipants) - case "sign": - setupData, err = c.createSignSetup(ctx, event.EventData, partyIDs) + case string(ProtocolSign): + setupData, signMetadata, err = c.createSignSetup(ctx, event.EventData, partyIDs) default: - err = errors.Errorf("unknown protocol type: %s", event.ProtocolType) + err = errors.Errorf("unknown protocol type: %s", event.Type) } if err != nil { @@ -450,6 +462,7 @@ func (c *Coordinator) processEventAsCoordinator(ctx context.Context, event store EventID: event.EventID, Payload: setupData, Participants: partyIDs, + SignMetadata: signMetadata, // nil for non-sign events } setupMsgBytes, err := json.Marshal(setupMsg) if err != nil { @@ -613,19 +626,24 @@ func (c *Coordinator) createKeygenSetup(threshold int, partyIDs []string) ([]byt return setupData, nil } -// createSignSetup creates a sign setup message. -// Requires loading the keyshare to extract keyID and messageHash from event data. -func (c *Coordinator) createSignSetup(ctx context.Context, eventData []byte, partyIDs []string) ([]byte, error) { +// createSignSetup creates a sign setup message and returns the sign metadata. +// Uses the OutboundTxBuilder to build the actual transaction for the destination chain. +// Returns the setup data, sign metadata (for participant verification), and error. +func (c *Coordinator) createSignSetup(ctx context.Context, eventData []byte, partyIDs []string) ([]byte, *SignMetadata, error) { // Get current TSS keyId from pushCore - keyIDStr, err := c.pushCore.GetCurrentTSSKeyId() + key, err := c.pushCore.GetCurrentKey() if err != nil { - return nil, errors.Wrap(err, "failed to get current TSS keyId") + return nil, nil, errors.Wrap(err, "failed to get current TSS keyId") } + if key == nil { + return nil, nil, errors.New("no TSS key exists") + } + keyIDStr := key.KeyId // Load keyshare to ensure it exists (validation) keyshareBytes, err := c.keyshareManager.Get(keyIDStr) if err != nil { - return nil, errors.Wrapf(err, "failed to load keyshare for keyId %s", keyIDStr) + return nil, nil, errors.Wrapf(err, "failed to load keyshare for keyId %s", keyIDStr) } _ = keyshareBytes // Keyshare is loaded for validation, keyID is derived from string @@ -641,34 +659,68 @@ func (c *Coordinator) createSignSetup(ctx context.Context, eventData []byte, par participantIDs = append(participantIDs, []byte(partyID)...) } - // Extract message string from eventData and hash it - var message string - // Try to parse as JSON first (in case eventData is JSON with "message" field) - var eventDataJSON map[string]interface{} - if err := json.Unmarshal(eventData, &eventDataJSON); err == nil { - // Successfully parsed as JSON, try to get "message" field - if msg, ok := eventDataJSON["message"].(string); ok { - message = msg - } else { - return nil, errors.New("event data JSON does not contain 'message' string field") - } - } else { - // Not JSON, treat eventData as the message string directly - message = string(eventData) + // Build the transaction and get signing parameters + txResult, err := c.buildSignTransaction(ctx, eventData) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to build sign transaction") + } + + setupData, err := session.DklsSignSetupMsgNew(keyIDBytes, nil, txResult.SigningHash, participantIDs) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create sign setup") + } + + // Create sign metadata with gas price and signing hash for participant verification + signMetadata := &SignMetadata{ + GasPrice: txResult.GasPrice, + SigningHash: txResult.SigningHash, + } + + return setupData, signMetadata, nil +} + +// buildSignTransaction builds the outbound transaction using the appropriate OutboundTxBuilder. +func (c *Coordinator) buildSignTransaction(ctx context.Context, eventData []byte) (*common.OutboundTxResult, error) { + if len(eventData) == 0 { + return nil, errors.New("event data is empty") + } + + var data uexecutortypes.OutboundCreatedEvent + if err := json.Unmarshal(eventData, &data); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal outbound event data") } - if message == "" { - return nil, errors.New("message is empty") + if data.TxID == "" { + return nil, errors.New("outbound event missing tx_id") } - // Hash the message to get messageHash (SHA256) - messageHash := sha256.Sum256([]byte(message)) + if data.DestinationChain == "" { + return nil, errors.New("outbound event missing destination_chain") + } + + if c.txBuilderFactory == nil { + return nil, errors.New("tx builder factory not configured") + } - setupData, err := session.DklsSignSetupMsgNew(keyIDBytes, nil, messageHash[:], participantIDs) + // Get gas price from pushcore oracle + gasPrice, err := c.pushCore.GetGasPrice(ctx, data.DestinationChain) if err != nil { - return nil, errors.Wrap(err, "failed to create sign setup") + return nil, errors.Wrapf(err, "failed to get gas price for chain %s", data.DestinationChain) } - return setupData, nil + + // Get the builder for the destination chain + builder, err := c.txBuilderFactory.CreateBuilder(data.DestinationChain) + if err != nil { + return nil, errors.Wrapf(err, "failed to create tx builder for chain %s", data.DestinationChain) + } + + // Build the transaction with the gas price from oracle + txResult, err := builder.BuildTransaction(ctx, &data, gasPrice) + if err != nil { + return nil, errors.Wrap(err, "failed to build transaction") + } + + return txResult, nil } // createQcSetup creates a quorumchange setup message. @@ -679,10 +731,14 @@ func (c *Coordinator) createSignSetup(ctx context.Context, eventData []byte, par // @dev - Although tss lib can also take pending leave participants in oldParticipantIndices, we don't use that since it needs to be considered that old participants are gone and will only result in errors. func (c *Coordinator) createQcSetup(ctx context.Context, threshold int, partyIDs []string, participants []*types.UniversalValidator) ([]byte, error) { // Get current TSS keyId from pushCore - keyIDStr, err := c.pushCore.GetCurrentTSSKeyId() + key, err := c.pushCore.GetCurrentKey() if err != nil { return nil, errors.Wrap(err, "failed to get current TSS keyId") } + if key == nil { + return nil, errors.New("no TSS key exists") + } + keyIDStr := key.KeyId // Load old keyshare to get the key we're changing oldKeyshareBytes, err := c.keyshareManager.Get(keyIDStr) @@ -741,13 +797,13 @@ func (c *Coordinator) createQcSetup(ctx context.Context, threshold int, partyIDs // For sign: returns all (Active + Pending Leave) validators. func getEligibleForProtocol(protocolType string, allValidators []*types.UniversalValidator) []*types.UniversalValidator { switch protocolType { - case "keygen", "quorumchange": + case string(ProtocolKeygen), string(ProtocolQuorumChange): // Active + Pending Join return getQuorumChangeParticipants(allValidators) - case "keyrefresh": + case string(ProtocolKeyrefresh): // Active + Pending Leave return getSignEligible(allValidators) - case "sign": + case string(ProtocolSign): // Active + Pending Leave return getSignEligible(allValidators) default: @@ -761,7 +817,7 @@ func getEligibleForProtocol(protocolType string, allValidators []*types.Universa // For other protocols: returns all eligible participants (same as getEligibleForProtocol). func getParticipantsForProtocol(protocolType string, allValidators []*types.UniversalValidator) []*types.UniversalValidator { // For sign, we need random subset; for others, same as eligible - if protocolType == "sign" { + if protocolType == string(ProtocolSign) { return getSignParticipants(allValidators) } // For other protocols, return all eligible (same logic) diff --git a/universalClient/tss/coordinator/coordinator_test.go b/universalClient/tss/coordinator/coordinator_test.go index 403cc2a4..c5f37443 100644 --- a/universalClient/tss/coordinator/coordinator_test.go +++ b/universalClient/tss/coordinator/coordinator_test.go @@ -3,6 +3,7 @@ package coordinator import ( "context" "errors" + "math/big" "sync" "testing" "time" @@ -13,10 +14,12 @@ import ( "gorm.io/driver/sqlite" "gorm.io/gorm" + "github.com/pushchain/push-chain-node/universalClient/chains/common" "github.com/pushchain/push-chain-node/universalClient/pushcore" "github.com/pushchain/push-chain-node/universalClient/store" "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" "github.com/pushchain/push-chain-node/universalClient/tss/keyshare" + utsstypes "github.com/pushchain/push-chain-node/x/utss/types" "github.com/pushchain/push-chain-node/x/uvalidator/types" ) @@ -27,6 +30,7 @@ type mockPushCoreClient struct { latestBlock uint64 validators []*types.UniversalValidator currentTSSKeyId string + currentTSSPubkey string getBlockNumErr error getValidatorsErr error getKeyIdErr error @@ -34,22 +38,24 @@ type mockPushCoreClient struct { func newMockPushCoreClient() *mockPushCoreClient { return &mockPushCoreClient{ - latestBlock: 100, - currentTSSKeyId: "test-key-id", - validators: []*types.UniversalValidator{}, + latestBlock: 100, + currentTSSKeyId: "test-key-id", + currentTSSPubkey: "test-pubkey", + validators: []*types.UniversalValidator{}, } } -func (m *mockPushCoreClient) GetLatestBlockNum() (uint64, error) { +func (m *mockPushCoreClient) GetLatestBlock() (uint64, error) { m.mu.RLock() defer m.mu.RUnlock() if m.getBlockNumErr != nil { return 0, m.getBlockNumErr } + // Create a mock block response return m.latestBlock, nil } -func (m *mockPushCoreClient) GetUniversalValidators() ([]*types.UniversalValidator, error) { +func (m *mockPushCoreClient) GetAllUniversalValidators() ([]*types.UniversalValidator, error) { m.mu.RLock() defer m.mu.RUnlock() if m.getValidatorsErr != nil { @@ -58,13 +64,30 @@ func (m *mockPushCoreClient) GetUniversalValidators() ([]*types.UniversalValidat return m.validators, nil } -func (m *mockPushCoreClient) GetCurrentTSSKeyId() (string, error) { +func (m *mockPushCoreClient) GetCurrentKey() (*utsstypes.TssKey, error) { m.mu.RLock() defer m.mu.RUnlock() if m.getKeyIdErr != nil { - return "", m.getKeyIdErr + return nil, m.getKeyIdErr } - return m.currentTSSKeyId, nil + if m.currentTSSKeyId == "" { + return nil, nil // No key exists + } + return &utsstypes.TssKey{ + KeyId: m.currentTSSKeyId, + TssPubkey: m.currentTSSPubkey, + }, nil +} + +func (m *mockPushCoreClient) GetCurrentTSSKey() (string, string, error) { + key, err := m.GetCurrentKey() + if err != nil { + return "", "", err + } + if key == nil { + return "", "", errors.New("no TSS key found") + } + return key.KeyId, key.TssPubkey, nil } func (m *mockPushCoreClient) Close() error { @@ -101,7 +124,7 @@ func (m *mockPushCoreClient) setGetBlockNumError(err error) { func setupTestCoordinator(t *testing.T) (*Coordinator, *mockPushCoreClient, *eventstore.Store) { db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{}) require.NoError(t, err) - require.NoError(t, db.AutoMigrate(&store.TSSEvent{})) + require.NoError(t, db.AutoMigrate(&store.PCEvent{})) evtStore := eventstore.NewStore(db, zerolog.Nop()) @@ -149,6 +172,7 @@ func setupTestCoordinator(t *testing.T) (*Coordinator, *mockPushCoreClient, *eve evtStore, testClient, keyshareMgr, + nil, // txBuilderFactory - nil for most tests "validator1", 100, // coordinatorRange 100*time.Millisecond, @@ -238,7 +262,7 @@ func TestGetEligibleUV(t *testing.T) { require.True(t, hasValidators, "validators should be set in setup") t.Run("keygen protocol", func(t *testing.T) { - eligible := coord.GetEligibleUV("keygen") + eligible := coord.GetEligibleUV("KEYGEN") // Should return Active + Pending Join: validator1, validator2, validator3 require.Len(t, eligible, 3) addresses := make(map[string]bool) @@ -253,7 +277,7 @@ func TestGetEligibleUV(t *testing.T) { }) t.Run("keyrefresh protocol", func(t *testing.T) { - eligible := coord.GetEligibleUV("keyrefresh") + eligible := coord.GetEligibleUV("KEYREFRESH") // Should return only Active: validator1, validator2 (not validator3 which is PendingJoin) assert.Len(t, eligible, 2) addresses := make(map[string]bool) @@ -268,7 +292,7 @@ func TestGetEligibleUV(t *testing.T) { }) t.Run("quorumchange protocol", func(t *testing.T) { - eligible := coord.GetEligibleUV("quorumchange") + eligible := coord.GetEligibleUV("QUORUM_CHANGE") // Should return Active + Pending Join: validator1, validator2, validator3 require.Len(t, eligible, 3) addresses := make(map[string]bool) @@ -283,7 +307,7 @@ func TestGetEligibleUV(t *testing.T) { }) t.Run("sign protocol", func(t *testing.T) { - eligible := coord.GetEligibleUV("sign") + eligible := coord.GetEligibleUV("SIGN") // Should return random subset of Active + Pending Leave // validator1 and validator2 are Active, validator3 is PendingJoin (not eligible) // So should return validator1 and validator2 (or subset if >2/3 threshold applies) @@ -301,7 +325,7 @@ func TestGetEligibleUV(t *testing.T) { coord.allValidators = nil coord.mu.Unlock() - eligible := coord.GetEligibleUV("keygen") + eligible := coord.GetEligibleUV("KEYGEN") assert.Nil(t, eligible) }) } @@ -482,3 +506,136 @@ func TestCoordinator_StartStop(t *testing.T) { coord.mu.RUnlock() assert.False(t, running, "coordinator should be stopped") } + +func TestGetSigningHash(t *testing.T) { + ctx := context.Background() + + // Note: "valid outbound event data" test requires integration with pushcore.GetGasPrice + // which cannot be easily mocked. The validation tests below cover error paths. + + t.Run("gas price fetch fails with minimal pushCore", func(t *testing.T) { + coord, _, _ := setupTestCoordinator(t) + + mockFactory := &mockTxBuilderFactory{ + builders: map[string]*mockTxBuilder{ + "ethereum": {signingHash: []byte("mock-signing-hash-32-bytes-long!")}, + }, + } + coord.txBuilderFactory = mockFactory + + eventData := []byte(`{ + "tx_id": "0x123abc", + "destination_chain": "ethereum", + "recipient": "0xrecipient", + "amount": "1000000", + "asset_addr": "0xtoken", + "sender": "0xsender", + "payload": "0x", + "gas_limit": "21000" + }`) + + // With minimal pushCore, gas price fetch will fail + _, err := coord.buildSignTransaction(ctx, eventData) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get gas price") + }) + + t.Run("missing tx_id", func(t *testing.T) { + coord, _, _ := setupTestCoordinator(t) + coord.txBuilderFactory = &mockTxBuilderFactory{} + + eventData := []byte(`{"destination_chain": "ethereum"}`) + + _, err := coord.buildSignTransaction(ctx, eventData) + require.Error(t, err) + assert.Contains(t, err.Error(), "tx_id") + }) + + t.Run("missing destination_chain", func(t *testing.T) { + coord, _, _ := setupTestCoordinator(t) + coord.txBuilderFactory = &mockTxBuilderFactory{} + + eventData := []byte(`{"tx_id": "0x123"}`) + + _, err := coord.buildSignTransaction(ctx, eventData) + require.Error(t, err) + assert.Contains(t, err.Error(), "destination_chain") + }) + + t.Run("nil factory", func(t *testing.T) { + coord, _, _ := setupTestCoordinator(t) + // Factory is nil by default in test setup + + eventData := []byte(`{"tx_id": "0x123", "destination_chain": "ethereum"}`) + + _, err := coord.buildSignTransaction(ctx, eventData) + require.Error(t, err) + assert.Contains(t, err.Error(), "factory not configured") + }) + + t.Run("invalid json", func(t *testing.T) { + coord, _, _ := setupTestCoordinator(t) + coord.txBuilderFactory = &mockTxBuilderFactory{} + + _, err := coord.buildSignTransaction(ctx, []byte("not json")) + require.Error(t, err) + assert.Contains(t, err.Error(), "unmarshal") + }) + + t.Run("empty event data", func(t *testing.T) { + coord, _, _ := setupTestCoordinator(t) + + _, err := coord.buildSignTransaction(ctx, []byte{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "empty") + }) +} + +// mockTxBuilder implements common.OutboundTxBuilder for testing +type mockTxBuilder struct { + signingHash []byte + err error +} + +func (m *mockTxBuilder) BuildTransaction(ctx context.Context, data *common.OutboundTxData, gasPrice *big.Int) (*common.OutboundTxResult, error) { + if m.err != nil { + return nil, m.err + } + return &common.OutboundTxResult{ + SigningHash: m.signingHash, + Nonce: 1, + GasPrice: gasPrice, + GasLimit: 21000, + ChainID: "ethereum", + RawTx: []byte("raw-tx-data"), + }, nil +} + +func (m *mockTxBuilder) AssembleSignedTransaction(unsignedTx []byte, signature []byte, recoveryID byte) ([]byte, error) { + return nil, nil +} + +func (m *mockTxBuilder) BroadcastTransaction(ctx context.Context, signedTx []byte) (string, error) { + return "", nil +} + +func (m *mockTxBuilder) GetChainID() string { + return "mock-chain" +} + +// mockTxBuilderFactory implements common.OutboundTxBuilderFactory for testing +type mockTxBuilderFactory struct { + builders map[string]*mockTxBuilder +} + +func (m *mockTxBuilderFactory) CreateBuilder(chainID string) (common.OutboundTxBuilder, error) { + if builder, ok := m.builders[chainID]; ok { + return builder, nil + } + return nil, errors.New("unsupported chain: " + chainID) +} + +func (m *mockTxBuilderFactory) SupportsChain(chainID string) bool { + _, ok := m.builders[chainID] + return ok +} diff --git a/universalClient/tss/coordinator/types.go b/universalClient/tss/coordinator/types.go index 8b754778..b0b64bf8 100644 --- a/universalClient/tss/coordinator/types.go +++ b/universalClient/tss/coordinator/types.go @@ -2,6 +2,7 @@ package coordinator import ( "context" + "math/big" ) // SendFunc is a function type for sending messages to participants. @@ -13,16 +14,32 @@ type SendFunc func(ctx context.Context, peerID string, data []byte) error type ProtocolType string const ( - ProtocolKeygen ProtocolType = "keygen" - ProtocolKeyrefresh ProtocolType = "keyrefresh" - ProtocolQuorumChange ProtocolType = "quorumchange" - ProtocolSign ProtocolType = "sign" + ProtocolKeygen ProtocolType = "KEYGEN" + ProtocolKeyrefresh ProtocolType = "KEYREFRESH" + ProtocolQuorumChange ProtocolType = "QUORUM_CHANGE" + ProtocolSign ProtocolType = "SIGN" ) +// SignMetadata contains the signing parameters from the coordinator. +// Participants independently build the transaction using these parameters +// and verify the resulting hash matches before signing. +type SignMetadata struct { + // GasPrice is the gas price chosen by coordinator from the on-chain oracle. + GasPrice *big.Int `json:"gas_price"` + + // SigningHash is the hash computed by the coordinator. + // Participants verify this matches their independently computed hash. + SigningHash []byte `json:"signing_hash"` +} + // Message represents a simple message with type, eventId, payload, and participants. type Message struct { Type string `json:"type"` // "setup", "ack", "begin", "step" EventID string `json:"eventId"` Payload []byte `json:"payload"` Participants []string `json:"participants"` // Array of PartyIDs (validator addresses) participating in this process + + // SignMetadata is included for SIGN protocol setup messages. + // Participants use this to verify the signing hash before proceeding. + SignMetadata *SignMetadata `json:"sign_metadata,omitempty"` } diff --git a/universalClient/tss/docs/ARCHITECTURE.md b/universalClient/tss/docs/ARCHITECTURE.md index 5d908af7..530e7bc8 100644 --- a/universalClient/tss/docs/ARCHITECTURE.md +++ b/universalClient/tss/docs/ARCHITECTURE.md @@ -148,7 +148,7 @@ Database access layer for TSS events. Provides methods for getting pending event - `GetPendingEvents()` - Gets events ready to be processed - `UpdateStatus()` - Updates event status -- `UpdateStatusAndBlockNumber()` - Updates status and block number +- `UpdateStatusAndBlockHeight()` - Updates status and block height - `ResetInProgressEventsToPending()` - Resets IN_PROGRESS events on startup - `GetEventsByStatus()` - Queries events by status diff --git a/universalClient/tss/eventstore/store.go b/universalClient/tss/eventstore/store.go index 229f802a..5b657fe1 100644 --- a/universalClient/tss/eventstore/store.go +++ b/universalClient/tss/eventstore/store.go @@ -8,11 +8,25 @@ import ( "github.com/pushchain/push-chain-node/universalClient/store" ) +// Event statuses for TSS operations const ( - StatusPending = "PENDING" + // StatusPending - Event is waiting to be processed (TSS signing not started) + StatusPending = "PENDING" + + // StatusInProgress - TSS signing is in progress StatusInProgress = "IN_PROGRESS" - StatusSuccess = "SUCCESS" - StatusExpired = "EXPIRED" + + // StatusBroadcasted - Transaction sent to external chain (for sign events) + StatusBroadcasted = "BROADCASTED" + + // StatusCompleted - Successfully completed (key events: vote sent, sign events: confirmed) + StatusCompleted = "COMPLETED" + + // StatusReverted - Event reverted + StatusReverted = "REVERTED" + + // StatusExpired - Event expired (for key events) + StatusExpired = "EXPIRED" ) // Store provides database access for TSS events. @@ -30,9 +44,9 @@ func NewStore(db *gorm.DB, logger zerolog.Logger) *Store { } // GetPendingEvents returns all pending events that are ready to be processed. -// Events are ready if they are at least `minBlockConfirmation` blocks behind the current block. -func (s *Store) GetPendingEvents(currentBlock uint64, minBlockConfirmation uint64) ([]store.TSSEvent, error) { - var events []store.TSSEvent +// Events are ready if they are at least `minBlockConfirmation` blocks behind the current block and not expired. +func (s *Store) GetPendingEvents(currentBlock uint64, minBlockConfirmation uint64) ([]store.PCEvent, error) { + var events []store.PCEvent // Only get events that are old enough (at least minBlockConfirmation blocks behind) minBlock := currentBlock - minBlockConfirmation @@ -40,31 +54,34 @@ func (s *Store) GetPendingEvents(currentBlock uint64, minBlockConfirmation uint6 minBlock = 0 } - if err := s.db.Where("status = ? AND block_number <= ?", StatusPending, minBlock). - Order("block_number ASC, created_at ASC"). + // Get pending events that are not expired + if err := s.db.Where("status = ? AND block_height <= ? AND expiry_block_height > ?", + StatusPending, minBlock, currentBlock). + Order("block_height ASC, created_at ASC"). Find(&events).Error; err != nil { return nil, errors.Wrap(err, "failed to query pending events") } - // Filter out expired events - var validEvents []store.TSSEvent - for _, event := range events { - if event.ExpiryHeight > 0 && currentBlock > event.ExpiryHeight { - // Mark as expired - if err := s.UpdateStatus(event.EventID, StatusExpired, ""); err != nil { - s.logger.Warn().Err(err).Str("event_id", event.EventID).Msg("failed to mark event as expired") - } - continue - } - validEvents = append(validEvents, event) + return events, nil +} + +// GetExpiredEvents returns all expired events (PENDING, IN_PROGRESS, or BROADCASTED) that have expired. +func (s *Store) GetExpiredEvents(currentBlock uint64) ([]store.PCEvent, error) { + var events []store.PCEvent + + if err := s.db.Where("status IN ? AND expiry_block_height <= ?", + []string{StatusPending, StatusInProgress, StatusBroadcasted}, currentBlock). + Order("block_height ASC, created_at ASC"). + Find(&events).Error; err != nil { + return nil, errors.Wrap(err, "failed to query expired events") } - return validEvents, nil + return events, nil } // GetEvent retrieves an event by ID. -func (s *Store) GetEvent(eventID string) (*store.TSSEvent, error) { - var event store.TSSEvent +func (s *Store) GetEvent(eventID string) (*store.PCEvent, error) { + var event store.PCEvent if err := s.db.Where("event_id = ?", eventID).First(&event).Error; err != nil { return nil, err } @@ -77,7 +94,7 @@ func (s *Store) UpdateStatus(eventID, status, errorMsg string) error { if errorMsg != "" { update["error_msg"] = errorMsg } - result := s.db.Model(&store.TSSEvent{}). + result := s.db.Model(&store.PCEvent{}). Where("event_id = ?", eventID). Updates(update) if result.Error != nil { @@ -89,13 +106,13 @@ func (s *Store) UpdateStatus(eventID, status, errorMsg string) error { return nil } -// UpdateStatusAndBlockNumber updates the status and block number of an event. -func (s *Store) UpdateStatusAndBlockNumber(eventID, status string, blockNumber uint64) error { +// UpdateStatusAndBlockHeight updates the status and block height of an event. +func (s *Store) UpdateStatusAndBlockHeight(eventID, status string, blockHeight uint64) error { update := map[string]any{ "status": status, - "block_number": blockNumber, + "block_height": blockHeight, } - result := s.db.Model(&store.TSSEvent{}). + result := s.db.Model(&store.PCEvent{}). Where("event_id = ?", eventID). Updates(update) if result.Error != nil { @@ -107,28 +124,15 @@ func (s *Store) UpdateStatusAndBlockNumber(eventID, status string, blockNumber u return nil } -// GetEventsByStatus returns all events with the given status. -func (s *Store) GetEventsByStatus(status string, limit int) ([]store.TSSEvent, error) { - var events []store.TSSEvent - query := s.db.Where("status = ?", status).Order("created_at DESC") - if limit > 0 { - query = query.Limit(limit) - } - if err := query.Find(&events).Error; err != nil { - return nil, errors.Wrapf(err, "failed to query events with status %s", status) - } - return events, nil -} - -// ClearExpiredAndSuccessfulEvents deletes both expired and successful events. -func (s *Store) ClearExpiredAndSuccessfulEvents() (int64, error) { - result := s.db.Where("status IN ?", []string{StatusExpired, StatusSuccess}).Delete(&store.TSSEvent{}) +// ClearTerminalEvents deletes completed, reverted, and expired events. +func (s *Store) ClearTerminalEvents() (int64, error) { + result := s.db.Where("status IN ?", []string{StatusCompleted, StatusReverted, StatusExpired}).Delete(&store.PCEvent{}) if result.Error != nil { - return 0, errors.Wrap(result.Error, "failed to clear expired and successful events") + return 0, errors.Wrap(result.Error, "failed to clear terminal events") } s.logger.Info(). Int64("deleted_count", result.RowsAffected). - Msg("cleared expired and successful events") + Msg("cleared terminal events") return result.RowsAffected, nil } @@ -136,7 +140,7 @@ func (s *Store) ClearExpiredAndSuccessfulEvents() (int64, error) { // This should be called on node startup to handle cases where the node crashed // while events were in progress, causing sessions to be lost from memory. func (s *Store) ResetInProgressEventsToPending() (int64, error) { - result := s.db.Model(&store.TSSEvent{}). + result := s.db.Model(&store.PCEvent{}). Where("status = ?", StatusInProgress). Update("status", StatusPending) if result.Error != nil { @@ -150,15 +154,29 @@ func (s *Store) ResetInProgressEventsToPending() (int64, error) { return result.RowsAffected, nil } -// CreateEvent stores a new TSSEvent. Returns error if event already exists. -func (s *Store) CreateEvent(event *store.TSSEvent) error { +// CreateEvent stores a new PCEvent. Returns error if event already exists. +func (s *Store) CreateEvent(event *store.PCEvent) error { if err := s.db.Create(event).Error; err != nil { return errors.Wrapf(err, "failed to create event %s", event.EventID) } s.logger.Info(). Str("event_id", event.EventID). - Str("protocol_type", event.ProtocolType). - Uint64("block_number", event.BlockNumber). + Str("type", event.Type). + Uint64("block_height", event.BlockHeight). Msg("stored new TSS event") return nil } + +// UpdateTxHash updates the TxHash field for an event (used after broadcasting). +func (s *Store) UpdateTxHash(eventID, txHash string) error { + result := s.db.Model(&store.PCEvent{}). + Where("event_id = ?", eventID). + Update("tx_hash", txHash) + if result.Error != nil { + return errors.Wrapf(result.Error, "failed to update tx_hash for event %s", eventID) + } + if result.RowsAffected == 0 { + return errors.Errorf("event %s not found", eventID) + } + return nil +} diff --git a/universalClient/tss/eventstore/store_test.go b/universalClient/tss/eventstore/store_test.go index 4907dc9d..619a4d79 100644 --- a/universalClient/tss/eventstore/store_test.go +++ b/universalClient/tss/eventstore/store_test.go @@ -19,7 +19,7 @@ func setupTestDB(t *testing.T) *gorm.DB { t.Fatalf("failed to open test database: %v", err) } - if err := db.AutoMigrate(&store.TSSEvent{}); err != nil { + if err := db.AutoMigrate(&store.PCEvent{}); err != nil { t.Fatalf("failed to migrate database: %v", err) } @@ -34,18 +34,18 @@ func setupTestStore(t *testing.T) *Store { } // createTestEvent creates a test TSS event in the database. -func createTestEvent(t *testing.T, s *Store, eventID string, blockNumber uint64, status string, expiryHeight uint64) { +func createTestEvent(t *testing.T, s *Store, eventID string, blockHeight uint64, status string, expiryHeight uint64) { eventData, _ := json.Marshal(map[string]interface{}{ "key_id": "test-key-1", }) - event := store.TSSEvent{ - EventID: eventID, - BlockNumber: blockNumber, - ProtocolType: "keygen", - Status: status, - ExpiryHeight: expiryHeight, - EventData: eventData, + event := store.PCEvent{ + EventID: eventID, + BlockHeight: blockHeight, + ExpiryBlockHeight: expiryHeight, + Type: "KEYGEN", + Status: status, + EventData: eventData, } if err := s.db.Create(&event).Error; err != nil { @@ -119,8 +119,8 @@ func TestGetPendingEvents(t *testing.T) { s := setupTestStore(t) createTestEvent(t, s, "pending-1", 80, StatusPending, 200) createTestEvent(t, s, "in-progress-1", 80, StatusInProgress, 200) - createTestEvent(t, s, "success-1", 80, StatusSuccess, 200) - createTestEvent(t, s, "expired-1", 80, StatusExpired, 200) + createTestEvent(t, s, "success-1", 80, StatusCompleted, 200) + createTestEvent(t, s, "reverted-1", 80, StatusReverted, 200) events, err := s.GetPendingEvents(100, 10) if err != nil { @@ -134,30 +134,19 @@ func TestGetPendingEvents(t *testing.T) { } }) - t.Run("filters expired events", func(t *testing.T) { + t.Run("excludes expired events", func(t *testing.T) { s := setupTestStore(t) - // Create expired event (expiry at 90, current block is 100) - createTestEvent(t, s, "expired-1", 80, StatusPending, 90) - createTestEvent(t, s, "valid-1", 80, StatusPending, 200) + // Create events with different expiry heights + createTestEvent(t, s, "expired-1", 80, StatusPending, 90) // expired (expiry 90 < current 100) + createTestEvent(t, s, "valid-1", 80, StatusPending, 200) // not expired (expiry 200 > current 100) + createTestEvent(t, s, "valid-2", 80, StatusPending, 101) // not expired (expiry 101 > current 100) events, err := s.GetPendingEvents(100, 10) if err != nil { t.Fatalf("GetPendingEvents() error = %v, want nil", err) } - if len(events) != 1 { - t.Errorf("GetPendingEvents() returned %d events, want 1", len(events)) - } - if events[0].EventID != "valid-1" { - t.Errorf("GetPendingEvents() event ID = %s, want valid-1", events[0].EventID) - } - - // Verify expired event was marked as expired - expiredEvent, err := s.GetEvent("expired-1") - if err != nil { - t.Fatalf("GetEvent() error = %v, want nil", err) - } - if expiredEvent.Status != StatusExpired { - t.Errorf("expired event status = %s, want %s", expiredEvent.Status, StatusExpired) + if len(events) != 2 { + t.Errorf("GetPendingEvents() returned %d events, want 2", len(events)) } }) @@ -220,8 +209,8 @@ func TestGetEvent(t *testing.T) { if event.EventID != "event-1" { t.Errorf("GetEvent() event ID = %s, want event-1", event.EventID) } - if event.BlockNumber != 100 { - t.Errorf("GetEvent() block number = %d, want 100", event.BlockNumber) + if event.BlockHeight != 100 { + t.Errorf("GetEvent() block height = %d, want 100", event.BlockHeight) } if event.Status != StatusPending { t.Errorf("GetEvent() status = %s, want %s", event.Status, StatusPending) @@ -289,7 +278,7 @@ func TestUpdateStatus(t *testing.T) { t.Run("update non-existent event", func(t *testing.T) { s := setupTestStore(t) - err := s.UpdateStatus("non-existent", StatusSuccess, "") + err := s.UpdateStatus("non-existent", StatusCompleted, "") if err == nil { t.Fatal("UpdateStatus() error = nil, want error") } @@ -309,117 +298,50 @@ func TestUpdateStatus(t *testing.T) { } // IN_PROGRESS -> SUCCESS - if err := s.UpdateStatus("event-1", StatusSuccess, ""); err != nil { + if err := s.UpdateStatus("event-1", StatusCompleted, ""); err != nil { t.Fatalf("UpdateStatus() error = %v", err) } event, _ = s.GetEvent("event-1") - if event.Status != StatusSuccess { - t.Errorf("UpdateStatus() status = %s, want %s", event.Status, StatusSuccess) - } - }) -} - -func TestGetEventsByStatus(t *testing.T) { - t.Run("get events by status", func(t *testing.T) { - s := setupTestStore(t) - createTestEvent(t, s, "pending-1", 100, StatusPending, 200) - createTestEvent(t, s, "pending-2", 101, StatusPending, 200) - createTestEvent(t, s, "success-1", 102, StatusSuccess, 200) - createTestEvent(t, s, "expired-1", 103, StatusExpired, 200) - - events, err := s.GetEventsByStatus(StatusPending, 0) - if err != nil { - t.Fatalf("GetEventsByStatus() error = %v, want nil", err) - } - if len(events) != 2 { - t.Errorf("GetEventsByStatus() returned %d events, want 2", len(events)) - } - // Should be ordered by created_at DESC - if events[0].EventID != "pending-2" { - t.Errorf("GetEventsByStatus() first event ID = %s, want pending-2", events[0].EventID) - } - if events[1].EventID != "pending-1" { - t.Errorf("GetEventsByStatus() second event ID = %s, want pending-1", events[1].EventID) - } - }) - - t.Run("get events with limit", func(t *testing.T) { - s := setupTestStore(t) - createTestEvent(t, s, "pending-1", 100, StatusPending, 200) - createTestEvent(t, s, "pending-2", 101, StatusPending, 200) - createTestEvent(t, s, "pending-3", 102, StatusPending, 200) - - events, err := s.GetEventsByStatus(StatusPending, 2) - if err != nil { - t.Fatalf("GetEventsByStatus() error = %v, want nil", err) - } - if len(events) != 2 { - t.Errorf("GetEventsByStatus() returned %d events, want 2", len(events)) - } - }) - - t.Run("no events with status", func(t *testing.T) { - s := setupTestStore(t) - createTestEvent(t, s, "pending-1", 100, StatusPending, 200) - - events, err := s.GetEventsByStatus(StatusSuccess, 0) - if err != nil { - t.Fatalf("GetEventsByStatus() error = %v, want nil", err) - } - if len(events) != 0 { - t.Errorf("GetEventsByStatus() returned %d events, want 0", len(events)) - } - }) - - t.Run("limit zero returns all", func(t *testing.T) { - s := setupTestStore(t) - createTestEvent(t, s, "pending-1", 100, StatusPending, 200) - createTestEvent(t, s, "pending-2", 101, StatusPending, 200) - - events, err := s.GetEventsByStatus(StatusPending, 0) - if err != nil { - t.Fatalf("GetEventsByStatus() error = %v, want nil", err) - } - if len(events) != 2 { - t.Errorf("GetEventsByStatus() returned %d events, want 2", len(events)) + if event.Status != StatusCompleted { + t.Errorf("UpdateStatus() status = %s, want %s", event.Status, StatusCompleted) } }) } -func TestClearExpiredAndSuccessfulEvents(t *testing.T) { +func TestClearTerminalEvents(t *testing.T) { t.Run("clear both expired and successful events", func(t *testing.T) { s := setupTestStore(t) - createTestEvent(t, s, "success-1", 100, StatusSuccess, 200) - createTestEvent(t, s, "expired-1", 101, StatusExpired, 200) + createTestEvent(t, s, "success-1", 100, StatusCompleted, 200) + createTestEvent(t, s, "reverted-1", 101, StatusReverted, 200) createTestEvent(t, s, "pending-1", 102, StatusPending, 200) createTestEvent(t, s, "in-progress-1", 103, StatusInProgress, 200) - deleted, err := s.ClearExpiredAndSuccessfulEvents() + deleted, err := s.ClearTerminalEvents() if err != nil { - t.Fatalf("ClearExpiredAndSuccessfulEvents() error = %v, want nil", err) + t.Fatalf("ClearTerminalEvents() error = %v, want nil", err) } if deleted != 2 { - t.Errorf("ClearExpiredAndSuccessfulEvents() deleted %d events, want 2", deleted) + t.Errorf("ClearTerminalEvents() deleted %d events, want 2", deleted) } - // Verify both types are gone - success, _ := s.GetEventsByStatus(StatusSuccess, 0) - if len(success) != 0 { - t.Errorf("GetEventsByStatus(StatusSuccess) returned %d events, want 0", len(success)) + // Verify both types are gone by trying to get them + successEvent, err := s.GetEvent("success-1") + if err == nil && successEvent != nil { + t.Errorf("ClearTerminalEvents() did not delete completed event") } - expired, _ := s.GetEventsByStatus(StatusExpired, 0) - if len(expired) != 0 { - t.Errorf("GetEventsByStatus(StatusExpired) returned %d events, want 0", len(expired)) + revertedEvent, err := s.GetEvent("reverted-1") + if err == nil && revertedEvent != nil { + t.Errorf("ClearTerminalEvents() did not delete reverted event") } // Verify other events still exist - pending, _ := s.GetEventsByStatus(StatusPending, 0) - if len(pending) != 1 { - t.Errorf("GetEventsByStatus(StatusPending) returned %d events, want 1", len(pending)) + pendingEvent, err := s.GetEvent("pending-1") + if err != nil || pendingEvent == nil { + t.Errorf("ClearTerminalEvents() incorrectly deleted pending event") } - inProgress, _ := s.GetEventsByStatus(StatusInProgress, 0) - if len(inProgress) != 1 { - t.Errorf("GetEventsByStatus(StatusInProgress) returned %d events, want 1", len(inProgress)) + inProgressEvent, err := s.GetEvent("in-progress-1") + if err != nil || inProgressEvent == nil { + t.Errorf("ClearTerminalEvents() incorrectly deleted in-progress event") } }) } diff --git a/universalClient/tss/maintenance/maintenance.go b/universalClient/tss/maintenance/maintenance.go new file mode 100644 index 00000000..f7e0fff3 --- /dev/null +++ b/universalClient/tss/maintenance/maintenance.go @@ -0,0 +1,301 @@ +// Package maintenance handles TSS event maintenance tasks including expiry processing and database cleanup. +package maintenance + +import ( + "context" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/rs/zerolog" + + "github.com/pushchain/push-chain-node/universalClient/pushcore" + "github.com/pushchain/push-chain-node/universalClient/store" + "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" +) + +// OutboundVoter handles voting for outbound transaction results. +type OutboundVoter interface { + // VoteOutbound votes on an outbound transaction observation. + // isSuccess indicates whether the transaction succeeded. + // For success: txHash and blockHeight must be provided (blockHeight > 0). + // For revert: reason must be provided; txHash and blockHeight are optional (if txHash is provided, blockHeight must be > 0). + VoteOutbound(ctx context.Context, txID string, isSuccess bool, txHash string, blockHeight uint64, reason string) (string, error) +} + +// Config contains configuration for the maintenance handler. +type Config struct { + // PollInterval is how often to check for expired events (default: 30s) + PollInterval time.Duration + + // CleanupInterval is how often to clean up terminal events (default: 1h) + CleanupInterval time.Duration +} + +// DefaultConfig returns sensible defaults. +func DefaultConfig() Config { + return Config{ + PollInterval: 30 * time.Second, + CleanupInterval: 1 * time.Hour, + } +} + +// TODO: Handle BROADCASTED events completion via chain event listeners. +// Instead of polling for transaction confirmations, chain event listeners should: +// 1. Listen to gateway contract events on each chain +// 2. When a gateway event is received with enough confirmations for that chain, +// mark the corresponding BROADCASTED event as COMPLETED +// 3. Vote for outbound success/revert based on the gateway event result +// This will be implemented in the chain-specific event listeners. + +// Handler handles TSS event maintenance tasks including expiry processing and database cleanup. +type Handler struct { + eventStore *eventstore.Store + pushCore *pushcore.Client + voter OutboundVoter + config Config + logger zerolog.Logger + + mu sync.RWMutex + running bool + stopCh chan struct{} +} + +// NewHandler creates a new maintenance handler. +func NewHandler( + eventStore *eventstore.Store, + pushCore *pushcore.Client, + voter OutboundVoter, + config Config, + logger zerolog.Logger, +) *Handler { + if config.PollInterval == 0 || config.CleanupInterval == 0 { + defaultConfig := DefaultConfig() + if config.PollInterval == 0 { + config.PollInterval = defaultConfig.PollInterval + } + if config.CleanupInterval == 0 { + config.CleanupInterval = defaultConfig.CleanupInterval + } + } + return &Handler{ + eventStore: eventStore, + pushCore: pushCore, + voter: voter, + config: config, + logger: logger.With().Str("component", "tss_maintenance").Logger(), + stopCh: make(chan struct{}), + } +} + +// Start begins the maintenance handler. +func (h *Handler) Start(ctx context.Context) error { + h.mu.Lock() + if h.running { + h.mu.Unlock() + return errors.New("maintenance handler already running") + } + h.running = true + h.mu.Unlock() + + h.logger.Info(). + Dur("poll_interval", h.config.PollInterval). + Dur("cleanup_interval", h.config.CleanupInterval). + Msg("starting TSS maintenance handler") + + go h.runLoop(ctx) + return nil +} + +// Stop stops the maintenance handler. +func (h *Handler) Stop() { + h.mu.Lock() + defer h.mu.Unlock() + + if !h.running { + return + } + + close(h.stopCh) + h.running = false + h.logger.Info().Msg("TSS maintenance handler stopped") +} + +func (h *Handler) runLoop(ctx context.Context) { + expiryTicker := time.NewTicker(h.config.PollInterval) + defer expiryTicker.Stop() + + cleanupTicker := time.NewTicker(h.config.CleanupInterval) + defer cleanupTicker.Stop() + + // Run immediately on start + h.checkExpired(ctx) + h.clearTerminalEvents(ctx) + + for { + select { + case <-ctx.Done(): + return + case <-h.stopCh: + return + case <-expiryTicker.C: + h.checkExpired(ctx) + case <-cleanupTicker.C: + h.clearTerminalEvents(ctx) + } + } +} + +func (h *Handler) checkExpired(ctx context.Context) { + // Handle expired events + if err := h.handleExpiredEvents(ctx); err != nil { + h.logger.Error().Err(err).Msg("error handling expired events") + } +} + +// clearTerminalEvents clears expired, reverted, and completed events from the database. +func (h *Handler) clearTerminalEvents(ctx context.Context) { + deletedCount, err := h.eventStore.ClearTerminalEvents() + if err != nil { + h.logger.Error().Err(err).Msg("error clearing terminal events") + return + } + + if deletedCount > 0 { + h.logger.Info(). + Int64("deleted_count", deletedCount). + Msg("cleared terminal events (expired, reverted, completed) from database") + } +} + +// handleExpiredEvents finds and processes expired events. +func (h *Handler) handleExpiredEvents(ctx context.Context) error { + currentBlock, err := h.pushCore.GetLatestBlock() + if err != nil { + return errors.Wrap(err, "failed to get current block") + } + + // Get all expired events (PENDING, IN_PROGRESS, or BROADCASTED) + events, err := h.eventStore.GetExpiredEvents(currentBlock) + if err != nil { + return errors.Wrap(err, "failed to get expired events") + } + + if len(events) == 0 { + return nil + } + + h.logger.Info().Int("count", len(events)).Msg("processing expired events") + + for _, event := range events { + if err := h.processExpiredEvent(ctx, &event); err != nil { + h.logger.Error(). + Err(err). + Str("event_id", event.EventID). + Str("type", event.Type). + Str("status", event.Status). + Msg("failed to process expired event") + } + } + + return nil +} + +func (h *Handler) processExpiredEvent(ctx context.Context, event *store.PCEvent) error { + h.logger.Info(). + Str("event_id", event.EventID). + Str("type", event.Type). + Str("status", event.Status). + Uint64("expiry_block", event.ExpiryBlockHeight). + Msg("processing expired event") + + switch event.Type { + case "KEYGEN", "KEYREFRESH", "QUORUM_CHANGE": + // For key events, mark as EXPIRED + if err := h.eventStore.UpdateStatus(event.EventID, eventstore.StatusExpired, "expired"); err != nil { + return errors.Wrap(err, "failed to mark key event as expired") + } + h.logger.Info(). + Str("event_id", event.EventID). + Str("status", event.Status). + Msg("key event marked as expired") + + case "SIGN": + // For sign events, vote for revert on Push chain and mark as REVERTED + // For outbound events, txID is the eventID + txID := event.EventID + + // Determine reason based on current status + var reason string + var txHash string + var blockHeight uint64 + + switch event.Status { + case eventstore.StatusPending: + reason = "expired before signing completed" + // No txHash or blockHeight for pending events + case eventstore.StatusInProgress: + reason = "expired during TSS signing" + // No txHash or blockHeight for in-progress events + case eventstore.StatusBroadcasted: + reason = "expired after broadcast, no confirmations received" + // If broadcasted, we might have a txHash + if event.TxHash != "" { + // Parse CAIP format to get raw hash (chain expects simple hash, not CAIP) + var err error + _, txHash, err = parseCaipTxHash(event.TxHash) + if err != nil { + h.logger.Warn().Err(err).Str("event_id", event.EventID).Msg("failed to parse txHash, voting without it") + txHash = "" + } + } + default: + reason = "expired" + } + + if h.voter != nil { + voteTxHash, err := h.voter.VoteOutbound(ctx, txID, false, txHash, blockHeight, reason) + if err != nil { + h.logger.Error().Err(err).Str("event_id", event.EventID).Msg("failed to vote for revert") + // Still mark as reverted locally + } else { + h.logger.Info(). + Str("event_id", event.EventID). + Str("vote_tx_hash", voteTxHash). + Str("original_status", event.Status). + Msg("voted for outbound revert (expired)") + } + } + + if err := h.eventStore.UpdateStatus(event.EventID, eventstore.StatusReverted, reason); err != nil { + return errors.Wrap(err, "failed to mark sign event as reverted") + } + h.logger.Info(). + Str("event_id", event.EventID). + Str("original_status", event.Status). + Msg("sign event marked as reverted (expired)") + + default: + h.logger.Warn().Str("event_id", event.EventID).Str("type", event.Type).Msg("unknown event type for expiry handling") + } + + return nil +} + +// parseCaipTxHash parses a CAIP format tx hash: {chainId}:{txHash} +func parseCaipTxHash(caipTxHash string) (chainID, txHash string, err error) { + // Find the last colon (chainID can contain colons, e.g., "eip155:11155111") + lastColon := -1 + for i := len(caipTxHash) - 1; i >= 0; i-- { + if caipTxHash[i] == ':' { + lastColon = i + break + } + } + + if lastColon == -1 || lastColon == 0 || lastColon == len(caipTxHash)-1 { + return "", "", errors.Errorf("invalid CAIP tx hash format: %s", caipTxHash) + } + + return caipTxHash[:lastColon], caipTxHash[lastColon+1:], nil +} diff --git a/universalClient/tss/sessionmanager/sessionmanager.go b/universalClient/tss/sessionmanager/sessionmanager.go index 0feb9841..83cef323 100644 --- a/universalClient/tss/sessionmanager/sessionmanager.go +++ b/universalClient/tss/sessionmanager/sessionmanager.go @@ -1,10 +1,12 @@ package sessionmanager import ( + "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" + "math/big" "strconv" "sync" "time" @@ -12,17 +14,23 @@ import ( "github.com/pkg/errors" "github.com/rs/zerolog" + "github.com/pushchain/push-chain-node/universalClient/chains/common" + "github.com/pushchain/push-chain-node/universalClient/pushcore" "github.com/pushchain/push-chain-node/universalClient/store" "github.com/pushchain/push-chain-node/universalClient/tss/coordinator" "github.com/pushchain/push-chain-node/universalClient/tss/dkls" "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" "github.com/pushchain/push-chain-node/universalClient/tss/keyshare" "github.com/pushchain/push-chain-node/universalClient/tss/vote" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" ) // SendFunc is a function type for sending messages to participants. type SendFunc func(ctx context.Context, peerID string, data []byte) error +// retryBlockDelay is the number of blocks to delay before retrying a failed event +const retryBlockDelay = 10 + // sessionState holds all state for a single session. type sessionState struct { session dkls.Session @@ -38,6 +46,8 @@ type SessionManager struct { eventStore *eventstore.Store coordinator *coordinator.Coordinator keyshareManager *keyshare.Manager + pushCore *pushcore.Client // For validating gas prices + txBuilderFactory common.OutboundTxBuilderFactory // For building tx to verify hash send SendFunc partyID string // Our validator address (pushvaloper format) logger zerolog.Logger @@ -54,6 +64,8 @@ func NewSessionManager( eventStore *eventstore.Store, coord *coordinator.Coordinator, keyshareManager *keyshare.Manager, + pushCore *pushcore.Client, + txBuilderFactory common.OutboundTxBuilderFactory, send SendFunc, partyID string, sessionExpiryTime time.Duration, @@ -64,6 +76,8 @@ func NewSessionManager( eventStore: eventStore, coordinator: coord, keyshareManager: keyshareManager, + pushCore: pushCore, + txBuilderFactory: txBuilderFactory, send: send, partyID: partyID, sessionExpiryTime: sessionExpiryTime, @@ -134,6 +148,13 @@ func (sm *SessionManager) handleSetupMessage(ctx context.Context, senderPeerID s } sm.mu.Unlock() + // 4.5. For SIGN events, verify the signing hash independently + if event.Type == string(coordinator.ProtocolSign) { + if err := sm.verifySignMetadata(ctx, event, msg.SignMetadata); err != nil { + return errors.Wrap(err, "sign metadata verification failed") + } + } + // 5. Create session based on protocol type session, err := sm.createSession(ctx, event, msg) if err != nil { @@ -144,7 +165,7 @@ func (sm *SessionManager) handleSetupMessage(ctx context.Context, senderPeerID s sm.mu.Lock() sm.sessions[msg.EventID] = &sessionState{ session: session, - protocolType: event.ProtocolType, + protocolType: event.Type, coordinator: senderPeerID, expiryTime: time.Now().Add(sm.sessionExpiryTime), participants: msg.Participants, @@ -158,7 +179,7 @@ func (sm *SessionManager) handleSetupMessage(ctx context.Context, senderPeerID s sm.logger.Info(). Str("event_id", msg.EventID). - Str("protocol", event.ProtocolType). + Str("protocol", event.Type). Msg("created session from setup message") // 8. Send ACK to coordinator @@ -368,7 +389,7 @@ func (sm *SessionManager) handleSessionFinished(ctx context.Context, eventID str // Handle based on protocol type switch state.protocolType { - case "keygen": + case string(coordinator.ProtocolKeygen): // Save keyshare using SHA256 hash of eventID if err := sm.keyshareManager.Store(result.Keyshare, storageID); err != nil { return errors.Wrapf(err, "failed to store keyshare for event %s", eventID) @@ -382,7 +403,7 @@ func (sm *SessionManager) handleSessionFinished(ctx context.Context, eventID str Str("keyshare_hash", hex.EncodeToString(keyshareHash[:])). Msg("saved keyshare from keygen") - case "keyrefresh": + case string(coordinator.ProtocolKeyrefresh): // Save new keyshare using SHA256 hash of eventID if err := sm.keyshareManager.Store(result.Keyshare, storageID); err != nil { return errors.Wrapf(err, "failed to store keyshare for event %s", eventID) @@ -396,7 +417,7 @@ func (sm *SessionManager) handleSessionFinished(ctx context.Context, eventID str Str("keyshare_hash", hex.EncodeToString(keyshareHash[:])). Msg("saved new keyshare from keyrefresh") - case "quorumchange": + case string(coordinator.ProtocolQuorumChange): // Quorumchange produces a new keyshare // Save new keyshare using SHA256 hash of eventID if err := sm.keyshareManager.Store(result.Keyshare, storageID); err != nil { @@ -412,42 +433,58 @@ func (sm *SessionManager) handleSessionFinished(ctx context.Context, eventID str Int("participant_count", len(result.Participants)). Msg("saved new keyshare from quorumchange with updated participants") - case "sign": - // TODO: Save signature to database for outbound Tx Processing + case string(coordinator.ProtocolSign): sm.logger.Info(). Str("event_id", eventID). Str("signature", hex.EncodeToString(result.Signature)). Str("public_key", hex.EncodeToString(result.PublicKey)). - Msg("signature generated and verified from sign session") + Msg("signature generated from sign session") + + // Get event data for broadcasting + event, err := sm.eventStore.GetEvent(eventID) + if err != nil { + return errors.Wrapf(err, "failed to get event %s for broadcasting", eventID) + } + + // All nodes broadcast for redundancy - duplicates are handled gracefully + if err := sm.handleSigningComplete(ctx, eventID, event.EventData, result.Signature); err != nil { + // handleSigningComplete only returns errors for critical failures (e.g., GetTxHash, UpdateTxHash, UpdateStatus) + // Broadcast errors are logged but don't cause function to return error + sm.logger.Error().Err(err).Str("event_id", eventID).Msg("failed to complete signing process") + return errors.Wrapf(err, "failed to complete signing process for event %s", eventID) + } default: return errors.Errorf("unknown protocol type: %s", state.protocolType) } - // Vote on TSS key process (keygen/keyrefresh/quorumchange only) - if sm.voteHandler != nil && (state.protocolType == "keygen" || state.protocolType == "keyrefresh" || state.protocolType == "quorumchange") { - pubKeyHex := hex.EncodeToString(result.PublicKey) - - paEventIDInt, err := strconv.ParseUint(eventID, 10, 64) - if err != nil { - return errors.Wrapf(err, "failed to parse process id from %s", eventID) - } - voteTxHash, err := sm.voteHandler.VoteTssKeyProcess(ctx, pubKeyHex, storageID, paEventIDInt) - if err != nil { - sm.logger.Warn().Err(err).Str("event_id", eventID).Msg("TSS vote failed - marking PENDING") + // Vote and mark completed for key events (keygen/keyrefresh/quorumchange) + // SIGN events are handled separately in handleSigningComplete + if state.protocolType != string(coordinator.ProtocolSign) { + if sm.voteHandler != nil { + pubKeyHex := hex.EncodeToString(result.PublicKey) - if err := sm.eventStore.UpdateStatus(eventID, eventstore.StatusPending, err.Error()); err != nil { - return errors.Wrapf(err, "failed to update event status to PENDING") + paEventIDInt, err := strconv.ParseUint(eventID, 10, 64) + if err != nil { + return errors.Wrapf(err, "failed to parse process id from %s", eventID) } - return nil // Event will be retried + voteTxHash, err := sm.voteHandler.VoteTssKeyProcess(ctx, pubKeyHex, storageID, paEventIDInt) + if err != nil { + // Vote failed after TSS signing - do NOT retry, let it expire naturally + // This prevents double signing since TSS signing is already complete + sm.logger.Error(). + Err(err). + Str("event_id", eventID). + Msg("TSS vote failed after signing - event will expire naturally (no retry to prevent double signing)") + // Leave event in IN_PROGRESS status - it will expire and be handled by maintenance handler + return errors.Wrapf(err, "failed to vote for key process after TSS signing") + } + sm.logger.Info().Str("vote_tx_hash", voteTxHash).Str("event_id", eventID).Msg("TSS vote succeeded") } - sm.logger.Info().Str("vote_tx_hash", voteTxHash).Str("event_id", eventID).Msg("TSS vote succeeded") - } - - // Update event status to SUCCESS (only reached if vote succeeded or not required) - if err := sm.eventStore.UpdateStatus(eventID, eventstore.StatusSuccess, ""); err != nil { - return errors.Wrapf(err, "failed to update event status") + if err := sm.eventStore.UpdateStatus(eventID, eventstore.StatusCompleted, ""); err != nil { + return errors.Wrapf(err, "failed to update event status") + } } sm.logger.Info().Str("event_id", eventID).Msg("session finished successfully") @@ -456,11 +493,11 @@ func (sm *SessionManager) handleSessionFinished(ctx context.Context, eventID str } // createSession creates a new DKLS session based on event type. -func (sm *SessionManager) createSession(ctx context.Context, event *store.TSSEvent, msg *coordinator.Message) (dkls.Session, error) { +func (sm *SessionManager) createSession(ctx context.Context, event *store.PCEvent, msg *coordinator.Message) (dkls.Session, error) { threshold := coordinator.CalculateThreshold(len(msg.Participants)) - switch event.ProtocolType { - case "keygen": + switch event.Type { + case string(coordinator.ProtocolKeygen): return dkls.NewKeygenSession( msg.Payload, // setupData msg.EventID, // sessionID @@ -469,9 +506,9 @@ func (sm *SessionManager) createSession(ctx context.Context, event *store.TSSEve threshold, ) - case "keyrefresh": + case string(coordinator.ProtocolKeyrefresh): // Get current keyID - keyID, err := sm.coordinator.GetCurrentTSSKeyId() + keyID, _, err := sm.coordinator.GetCurrentTSSKey() if err != nil { return nil, errors.Wrap(err, "failed to get current TSS keyId") } @@ -491,9 +528,9 @@ func (sm *SessionManager) createSession(ctx context.Context, event *store.TSSEve oldKeyshare, ) - case "quorumchange": + case string(coordinator.ProtocolQuorumChange): // Get current keyID - keyID, err := sm.coordinator.GetCurrentTSSKeyId() + keyID, _, err := sm.coordinator.GetCurrentTSSKey() if err != nil { return nil, errors.Wrap(err, "failed to get current TSS keyId for quorumchange") } @@ -525,9 +562,9 @@ func (sm *SessionManager) createSession(ctx context.Context, event *store.TSSEve oldKeyshare, ) - case "sign": + case string(coordinator.ProtocolSign): // Get current keyID - keyID, err := sm.coordinator.GetCurrentTSSKeyId() + keyID, _, err := sm.coordinator.GetCurrentTSSKey() if err != nil { return nil, errors.Wrap(err, "failed to get current TSS keyId") } @@ -555,16 +592,16 @@ func (sm *SessionManager) createSession(ctx context.Context, event *store.TSSEve ) default: - return nil, errors.Errorf("unknown protocol type: %s", event.ProtocolType) + return nil, errors.Errorf("unknown protocol type: %s", event.Type) } } // validateParticipants validates that participants match protocol requirements. // For keygen/keyrefresh: participants must match exactly with eligible participants (same elements). // For sign: participants must be a valid >2/3 subset of eligible participants. -func (sm *SessionManager) validateParticipants(participants []string, event *store.TSSEvent) error { +func (sm *SessionManager) validateParticipants(participants []string, event *store.PCEvent) error { // Get eligible validators for this protocol - eligible := sm.coordinator.GetEligibleUV(string(event.ProtocolType)) + eligible := sm.coordinator.GetEligibleUV(string(event.Type)) if len(eligible) == 0 { return errors.New("no eligible validators for protocol") } @@ -584,26 +621,26 @@ func (sm *SessionManager) validateParticipants(participants []string, event *sto participantSet := make(map[string]bool) for _, partyID := range participants { if !eligibleSet[partyID] { - return errors.Errorf("participant %s is not eligible for protocol %s", partyID, event.ProtocolType) + return errors.Errorf("participant %s is not eligible for protocol %s", partyID, event.Type) } participantSet[partyID] = true } // Protocol-specific validation - switch event.ProtocolType { - case "keygen", "keyrefresh", "quorumchange": + switch event.Type { + case string(coordinator.ProtocolKeygen), string(coordinator.ProtocolKeyrefresh), string(coordinator.ProtocolQuorumChange): // For keygen, keyrefresh, and quorumchange: participants must match exactly with eligible participants if len(participants) != len(eligibleList) { - return errors.Errorf("participants count %d does not match eligible count %d for %s", len(participants), len(eligibleList), event.ProtocolType) + return errors.Errorf("participants count %d does not match eligible count %d for %s", len(participants), len(eligibleList), event.Type) } // Check all eligible are in participants for _, eligibleID := range eligibleList { if !participantSet[eligibleID] { - return errors.Errorf("eligible participant %s is missing from participants list for %s", eligibleID, event.ProtocolType) + return errors.Errorf("eligible participant %s is missing from participants list for %s", eligibleID, event.Type) } } - case "sign": + case string(coordinator.ProtocolSign): // For sign: participants must be exactly equal to threshold (no more, no less) threshold := coordinator.CalculateThreshold(len(eligibleList)) if len(participants) != threshold { @@ -612,7 +649,7 @@ func (sm *SessionManager) validateParticipants(participants []string, event *sto // All participants must be from eligible set (already validated above) default: - return errors.Errorf("unknown protocol type: %s", event.ProtocolType) + return errors.Errorf("unknown protocol type: %s", event.Type) } return nil @@ -698,9 +735,9 @@ func (sm *SessionManager) checkExpiredSessions(ctx context.Context, blockDelay u // Clean up session sm.cleanSession(eventID, state) - // Update event: mark as pending and set new block number (current + delay) - newBlockNumber := currentBlock + blockDelay - if err := sm.eventStore.UpdateStatusAndBlockNumber(eventID, eventstore.StatusPending, newBlockNumber); err != nil { + // Update event: mark as pending and set new block height (current + delay) + newBlockHeight := currentBlock + blockDelay + if err := sm.eventStore.UpdateStatusAndBlockHeight(eventID, eventstore.StatusPending, newBlockHeight); err != nil { sm.logger.Warn(). Err(err). Str("event_id", eventID). @@ -708,9 +745,201 @@ func (sm *SessionManager) checkExpiredSessions(ctx context.Context, blockDelay u } else { sm.logger.Info(). Str("event_id", eventID). - Uint64("new_block_number", newBlockNumber). + Uint64("new_block_height", newBlockHeight). Msg("expired session removed, event marked as pending for retry") } } } } + +// GasPriceTolerancePercent defines the acceptable deviation from oracle gas price (e.g., 10 = 10%) +const GasPriceTolerancePercent = 10 + +// verifySignMetadata validates the coordinator's signing request by: +// 1. Verifying the gas price is within acceptable range of on-chain oracle +// 2. Building the transaction independently using the same gas price +// 3. Comparing the resulting hash with coordinator's hash - must match exactly +func (sm *SessionManager) verifySignMetadata(ctx context.Context, event *store.PCEvent, meta *coordinator.SignMetadata) error { + if meta == nil { + return errors.New("sign metadata is required for SIGN events") + } + + if meta.GasPrice == nil { + return errors.New("gas price is missing in metadata") + } + + if len(meta.SigningHash) == 0 { + return errors.New("signing hash is missing in metadata") + } + + // Parse the event data to get outbound transaction details + var outboundData uexecutortypes.OutboundCreatedEvent + if err := json.Unmarshal(event.EventData, &outboundData); err != nil { + return errors.Wrap(err, "failed to parse outbound event data") + } + + // 1. Validate gas price is reasonable (within tolerance of oracle price) + if err := sm.validateGasPrice(ctx, outboundData.DestinationChain, meta.GasPrice); err != nil { + return errors.Wrap(err, "gas price validation failed") + } + + // 2. Build the transaction independently using the same gas price + if sm.txBuilderFactory == nil { + sm.logger.Warn().Msg("txBuilderFactory not configured, skipping hash verification") + return nil + } + + builder, err := sm.txBuilderFactory.CreateBuilder(outboundData.DestinationChain) + if err != nil { + return errors.Wrapf(err, "failed to create tx builder for chain %s", outboundData.DestinationChain) + } + + // Build transaction with the coordinator's gas price + txResult, err := builder.BuildTransaction(ctx, &outboundData, meta.GasPrice) + if err != nil { + return errors.Wrap(err, "failed to build transaction for verification") + } + + // 3. Compare hashes - must match exactly + if !bytes.Equal(txResult.SigningHash, meta.SigningHash) { + sm.logger.Error(). + Str("our_hash", hex.EncodeToString(txResult.SigningHash)). + Str("coordinator_hash", hex.EncodeToString(meta.SigningHash)). + Str("event_id", event.EventID). + Msg("signing hash mismatch - rejecting signing request") + return errors.New("signing hash mismatch: our computed hash does not match coordinator's hash") + } + + sm.logger.Debug(). + Str("event_id", event.EventID). + Str("gas_price", meta.GasPrice.String()). + Str("signing_hash", hex.EncodeToString(meta.SigningHash)). + Msg("sign metadata verified - hash matches") + + return nil +} + +// validateGasPrice checks that the provided gas price is within acceptable bounds of the oracle price. +func (sm *SessionManager) validateGasPrice(ctx context.Context, chainID string, gasPrice *big.Int) error { + if sm.pushCore == nil { + sm.logger.Warn().Msg("pushCore not configured, skipping gas price validation") + return nil + } + + if gasPrice == nil { + return errors.New("gas price is nil") + } + + // Get the current oracle gas price + oraclePrice, err := sm.pushCore.GetGasPrice(ctx, chainID) + if err != nil { + return errors.Wrap(err, "failed to get oracle gas price") + } + + // Check if gas price is within tolerance + // Allow coordinator's price to be within ±GasPriceTolerancePercent of oracle price + tolerance := new(big.Int).Div(oraclePrice, big.NewInt(100/GasPriceTolerancePercent)) + minPrice := new(big.Int).Sub(oraclePrice, tolerance) + maxPrice := new(big.Int).Add(oraclePrice, tolerance) + + if gasPrice.Cmp(minPrice) < 0 { + return errors.Errorf("gas price %s is too low (min: %s, oracle: %s)", gasPrice.String(), minPrice.String(), oraclePrice.String()) + } + if gasPrice.Cmp(maxPrice) > 0 { + return errors.Errorf("gas price %s is too high (max: %s, oracle: %s)", gasPrice.String(), maxPrice.String(), oraclePrice.String()) + } + + return nil +} + +// handleSigningComplete assembles and broadcasts the signed transaction. +// All nodes call this for redundancy - duplicate broadcasts are handled gracefully by the chain. +func (sm *SessionManager) handleSigningComplete(ctx context.Context, eventID string, eventData []byte, signature []byte) error { + // Parse event data to get outbound details + var outboundData uexecutortypes.OutboundCreatedEvent + if err := json.Unmarshal(eventData, &outboundData); err != nil { + return errors.Wrap(err, "failed to parse outbound event data") + } + + if sm.txBuilderFactory == nil { + return errors.New("tx builder factory not configured") + } + + // Get builder for destination chain + builder, err := sm.txBuilderFactory.CreateBuilder(outboundData.DestinationChain) + if err != nil { + return errors.Wrapf(err, "failed to create tx builder for chain %s", outboundData.DestinationChain) + } + + // Get gas price from oracle (same as was used during signing) + gasPrice, err := sm.pushCore.GetGasPrice(ctx, outboundData.DestinationChain) + if err != nil { + return errors.Wrapf(err, "failed to get gas price for chain %s", outboundData.DestinationChain) + } + + // Build the transaction (same deterministic result as coordinator) + txResult, err := builder.BuildTransaction(ctx, &outboundData, gasPrice) + if err != nil { + return errors.Wrap(err, "failed to build transaction") + } + + sm.logger.Info(). + Str("event_id", eventID). + Str("destination_chain", outboundData.DestinationChain). + Int("signature_len", len(signature)). + Msg("assembling and broadcasting signed transaction") + + // Extract recovery ID from signature (if present, last byte) + var recoveryID byte = 0 + sigBytes := signature + if len(signature) == 65 { + recoveryID = signature[64] + sigBytes = signature[:64] + } + + // Assemble signed transaction + signedTx, err := builder.AssembleSignedTransaction(txResult.RawTx, sigBytes, recoveryID) + if err != nil { + return errors.Wrap(err, "failed to assemble signed transaction") + } + + // Calculate txHash from signed transaction (can be done before broadcasting) + txHash, err := builder.GetTxHash(signedTx) + if err != nil { + return errors.Wrap(err, "failed to get tx hash from signed transaction") + } + + // Format tx hash in CAIP format: {chainId}:{txHash} + caipTxHash := outboundData.DestinationChain + ":" + txHash + + // Always store the txHash (calculated from signed tx, independent of broadcast) + if err := sm.eventStore.UpdateTxHash(eventID, caipTxHash); err != nil { + sm.logger.Error().Err(err).Str("event_id", eventID).Msg("failed to update tx hash") + } + + // Broadcast to destination chain (errors are logged but don't prevent marking as BROADCASTED) + _, broadcastErr := builder.BroadcastTransaction(ctx, signedTx) + if broadcastErr != nil { + sm.logger.Warn(). + Err(broadcastErr). + Str("event_id", eventID). + Str("tx_hash", txHash). + Str("caip_tx_hash", caipTxHash). + Msg("broadcast failed - txHash stored, will expire automatically if not confirmed") + } else { + sm.logger.Info(). + Str("event_id", eventID). + Str("tx_hash", txHash). + Str("caip_tx_hash", caipTxHash). + Str("destination_chain", outboundData.DestinationChain). + Msg("transaction broadcasted successfully") + } + + // Mark as BROADCASTED since we have the txHash (will expire automatically if not confirmed) + if err := sm.eventStore.UpdateStatus(eventID, eventstore.StatusBroadcasted, ""); err != nil { + sm.logger.Error().Err(err).Str("event_id", eventID).Msg("failed to update event status to BROADCASTED") + return errors.Wrap(err, "failed to update event status to BROADCASTED") + } + + return nil +} diff --git a/universalClient/tss/sessionmanager/sessionmanager_test.go b/universalClient/tss/sessionmanager/sessionmanager_test.go index 9fb6262b..65176617 100644 --- a/universalClient/tss/sessionmanager/sessionmanager_test.go +++ b/universalClient/tss/sessionmanager/sessionmanager_test.go @@ -70,7 +70,7 @@ func (m *mockSession) Close() { func setupTestSessionManager(t *testing.T) (*SessionManager, *coordinator.Coordinator, *eventstore.Store, *keyshare.Manager, *pushcore.Client, *gorm.DB) { db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{}) require.NoError(t, err) - require.NoError(t, db.AutoMigrate(&store.TSSEvent{})) + require.NoError(t, db.AutoMigrate(&store.PCEvent{})) evtStore := eventstore.NewStore(db, zerolog.Nop()) keyshareMgr, err := keyshare.NewManager(t.TempDir(), "test-password") @@ -115,6 +115,7 @@ func setupTestSessionManager(t *testing.T) (*SessionManager, *coordinator.Coordi evtStore, testClient, keyshareMgr, + nil, // txBuilderFactory - nil for tests "validator1", 100, // coordinatorRange 100*time.Millisecond, @@ -136,6 +137,8 @@ func setupTestSessionManager(t *testing.T) (*SessionManager, *coordinator.Coordi evtStore, coord, keyshareMgr, + nil, // pushCore - nil for testing + nil, // txBuilderFactory - nil for testing sendFn, "validator1", 3*time.Minute, // sessionExpiryTime @@ -173,11 +176,11 @@ func TestHandleSetupMessage_Validation(t *testing.T) { ctx := context.Background() // Create a test event by inserting it directly into the database - event := store.TSSEvent{ - EventID: "event1", - ProtocolType: "keygen", - Status: eventstore.StatusPending, - BlockNumber: 100, + event := store.PCEvent{ + EventID: "event1", + BlockHeight: 100, + Type: "KEYGEN", + Status: eventstore.StatusPending, } require.NoError(t, testDB.Create(&event).Error) @@ -248,7 +251,7 @@ func TestHandleStepMessage_Validation(t *testing.T) { sm.mu.Lock() sm.sessions["event1"] = &sessionState{ session: mockSess, - protocolType: "keygen", + protocolType: "KEYGEN", coordinator: "coordinator1", expiryTime: time.Now().Add(5 * time.Minute), participants: []string{"validator2", "validator3"}, @@ -274,11 +277,11 @@ func TestSessionManager_Integration(t *testing.T) { ctx := context.Background() // Create a keygen event by inserting it directly into the database - event := store.TSSEvent{ - EventID: "keygen-event", - ProtocolType: "keygen", - Status: eventstore.StatusPending, - BlockNumber: 100, + event := store.PCEvent{ + EventID: "keygen-event", + BlockHeight: 100, + Type: "KEYGEN", + Status: eventstore.StatusPending, } require.NoError(t, testDB.Create(&event).Error) diff --git a/universalClient/tss/tss.go b/universalClient/tss/tss.go index 622542ec..68cf5876 100644 --- a/universalClient/tss/tss.go +++ b/universalClient/tss/tss.go @@ -15,6 +15,7 @@ import ( "github.com/pkg/errors" "github.com/rs/zerolog" + "github.com/pushchain/push-chain-node/universalClient/chains/common" "github.com/pushchain/push-chain-node/universalClient/db" "github.com/pushchain/push-chain-node/universalClient/pushcore" "github.com/pushchain/push-chain-node/universalClient/tss/coordinator" @@ -45,6 +46,9 @@ type Config struct { DialTimeout time.Duration IOTimeout time.Duration + // Outbound transaction builder factory (required for sign operations) + TxBuilderFactory common.OutboundTxBuilderFactory + // Session expiry checker configuration SessionExpiryTime time.Duration // How long a session can be inactive before expiring (default: 5m) SessionExpiryCheckInterval time.Duration // How often to check for expired sessions (default: 30s) @@ -92,6 +96,7 @@ type Node struct { keyshareManager *keyshare.Manager database *db.DB pushCore *pushcore.Client + txBuilderFactory common.OutboundTxBuilderFactory logger zerolog.Logger eventStore *eventstore.Store coordinator *coordinator.Coordinator @@ -220,6 +225,7 @@ func NewNode(ctx context.Context, cfg Config) (*Node, error) { keyshareManager: mgr, database: database, pushCore: cfg.PushCore, + txBuilderFactory: cfg.TxBuilderFactory, logger: logger, eventStore: evtStore, sessionManager: nil, // Will be initialized in Start() @@ -285,6 +291,7 @@ func (n *Node) Start(ctx context.Context) error { n.eventStore, n.pushCore, n.keyshareManager, + n.txBuilderFactory, // OutboundTxBuilderFactory for building transactions n.validatorAddress, n.coordinatorRange, n.coordinatorPollInterval, @@ -302,6 +309,8 @@ func (n *Node) Start(ctx context.Context) error { n.eventStore, n.coordinator, n.keyshareManager, + n.pushCore, // For gas price verification + n.txBuilderFactory, // For building tx to verify signing hash func(ctx context.Context, peerID string, data []byte) error { return n.Send(ctx, peerID, data) }, diff --git a/universalClient/tss/vote/handler.go b/universalClient/tss/vote/handler.go index 59efd3e5..69d85f39 100644 --- a/universalClient/tss/vote/handler.go +++ b/universalClient/tss/vote/handler.go @@ -8,6 +8,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/rs/zerolog" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" utsstypes "github.com/pushchain/push-chain-node/x/utss/types" ) @@ -33,63 +34,63 @@ func NewHandler(txSigner TxSigner, log zerolog.Logger, granter string) *Handler } } -// VoteTssKeyProcess votes on a completed TSS key process. -// Returns vote tx hash on success, error on failure. -func (h *Handler) VoteTssKeyProcess(ctx context.Context, tssPubKey string, keyID string, processId uint64) (string, error) { - h.log.Info(). - Str("tss_pubkey", tssPubKey). - Str("key_id", keyID). - Msg("starting TSS key process vote") +const ( + // Default gas limit for vote transactions + defaultGasLimit = uint64(500000000) + // Default fee amount for vote transactions + defaultFeeAmount = "1000000000000000000upc" + // Default timeout for vote transactions + defaultVoteTimeout = 30 * time.Second +) - // Validate inputs +// validateHandler checks that the handler is properly configured +func (h *Handler) validateHandler() error { if h.txSigner == nil { - return "", fmt.Errorf("txSigner is nil - cannot sign transactions") + return fmt.Errorf("txSigner is nil - cannot sign transactions") } - if h.granter == "" { - return "", fmt.Errorf("granter address is empty - AuthZ not properly configured") + return fmt.Errorf("granter address is empty - AuthZ not properly configured") } + return nil +} - // Create MsgVoteTssKeyProcess - msg := &utsstypes.MsgVoteTssKeyProcess{ - Signer: h.granter, // The granter (operator) is the signer - TssPubkey: tssPubKey, - KeyId: keyID, - ProcessId: processId, +// prepareTxParams prepares gas limit and fee amount for transactions +func (h *Handler) prepareTxParams() (uint64, sdk.Coins, error) { + feeAmount, err := sdk.ParseCoinsNormalized(defaultFeeAmount) + if err != nil { + return 0, nil, fmt.Errorf("failed to parse fee amount: %w", err) } + return defaultGasLimit, feeAmount, nil +} - h.log.Debug(). - Str("msg_signer", msg.Signer). - Str("tss_pubkey", msg.TssPubkey). - Str("key_id", msg.KeyId). - Msg("created MsgVoteTssKeyProcess message") - - // Wrap message for AuthZ execution - msgs := []sdk.Msg{msg} - - // Configure gas and fees - using same values as other vote handlers - gasLimit := uint64(500000000) - feeAmount, err := sdk.ParseCoinsNormalized("1000000000000000000upc") +// broadcastVoteTx handles the common transaction broadcasting logic +// Returns the transaction hash on success, error on failure. +func (h *Handler) broadcastVoteTx( + ctx context.Context, + msgs []sdk.Msg, + memo string, + logFields map[string]interface{}, + errorPrefix string, +) (string, error) { + gasLimit, feeAmount, err := h.prepareTxParams() if err != nil { - return "", fmt.Errorf("failed to parse fee amount: %w", err) + return "", err } - memo := fmt.Sprintf("Vote on TSS key process: %s", keyID) - h.log.Debug(). Uint64("gas_limit", gasLimit). Str("fee_amount", feeAmount.String()). Str("memo", memo). + Fields(logFields). Msg("prepared transaction parameters, calling SignAndBroadcastAuthZTx") - // Create timeout context for the AuthZ transaction (30 second timeout) - voteCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + // Create timeout context for the AuthZ transaction + voteCtx, cancel := context.WithTimeout(ctx, defaultVoteTimeout) defer cancel() // Sign and broadcast the AuthZ transaction - h.log.Info(). - Str("key_id", keyID). - Msg("calling SignAndBroadcastAuthZTx") + logEvent := h.log.Info().Fields(logFields) + logEvent.Msg("calling SignAndBroadcastAuthZTx") txResp, err := h.txSigner.SignAndBroadcastAuthZTx( voteCtx, @@ -100,40 +101,192 @@ func (h *Handler) VoteTssKeyProcess(ctx context.Context, tssPubKey string, keyID ) h.log.Debug(). - Str("key_id", keyID). + Fields(logFields). Bool("success", err == nil). Msg("SignAndBroadcastAuthZTx completed") if err != nil { h.log.Error(). - Str("key_id", keyID). + Fields(logFields). Err(err). Msg("SignAndBroadcastAuthZTx failed") - return "", fmt.Errorf("failed to broadcast TSS vote transaction: %w", err) + return "", fmt.Errorf("failed to broadcast %s transaction: %w", errorPrefix, err) } h.log.Debug(). - Str("key_id", keyID). + Fields(logFields). Str("response_tx_hash", txResp.TxHash). Uint32("response_code", txResp.Code). Msg("received transaction response, checking status") if txResp.Code != 0 { h.log.Error(). - Str("key_id", keyID). + Fields(logFields). Str("response_tx_hash", txResp.TxHash). Uint32("response_code", txResp.Code). Str("raw_log", txResp.RawLog). - Msg("TSS vote transaction was rejected by blockchain") - return "", fmt.Errorf("TSS vote transaction failed with code %d: %s", txResp.Code, txResp.RawLog) + Str("error_prefix", errorPrefix). + Msg("vote transaction was rejected by blockchain") + return "", fmt.Errorf("%s transaction failed with code %d: %s", errorPrefix, txResp.Code, txResp.RawLog) } + return txResp.TxHash, nil +} + +// VoteTssKeyProcess votes on a completed TSS key process. +// Returns vote tx hash on success, error on failure. +func (h *Handler) VoteTssKeyProcess(ctx context.Context, tssPubKey string, keyID string, processId uint64) (string, error) { h.log.Info(). - Str("tx_hash", txResp.TxHash). + Str("tss_pubkey", tssPubKey). + Str("key_id", keyID). + Msg("starting TSS key process vote") + + // Validate handler configuration + if err := h.validateHandler(); err != nil { + return "", err + } + + // Create MsgVoteTssKeyProcess + msg := &utsstypes.MsgVoteTssKeyProcess{ + Signer: h.granter, // The granter (operator) is the signer + TssPubkey: tssPubKey, + KeyId: keyID, + ProcessId: processId, + } + + h.log.Debug(). + Str("msg_signer", msg.Signer). + Str("tss_pubkey", msg.TssPubkey). + Str("key_id", msg.KeyId). + Msg("created MsgVoteTssKeyProcess message") + + // Wrap message for AuthZ execution + msgs := []sdk.Msg{msg} + memo := fmt.Sprintf("Vote on TSS key process: %s", keyID) + + logFields := map[string]interface{}{ + "key_id": keyID, + } + + txHash, err := h.broadcastVoteTx(ctx, msgs, memo, logFields, "TSS vote") + if err != nil { + return "", err + } + + h.log.Info(). + Str("tx_hash", txHash). Str("key_id", keyID). Str("tss_pubkey", tssPubKey). - Int64("gas_used", txResp.GasUsed). Msg("successfully voted on TSS key process") - return txResp.TxHash, nil + return txHash, nil +} + +// VoteOutbound votes on an outbound transaction observation. +// txID is the outbound tx ID (abi.encode(utxId, outboundId)). +// isSuccess indicates whether the transaction succeeded. +// For success: txHash and blockHeight must be provided (blockHeight > 0). +// For revert: reason must be provided; txHash and blockHeight are optional (if txHash is provided, blockHeight must be > 0). +func (h *Handler) VoteOutbound(ctx context.Context, txID string, isSuccess bool, txHash string, blockHeight uint64, reason string) (string, error) { + if isSuccess { + h.log.Info(). + Str("tx_id", txID). + Str("tx_hash", txHash). + Uint64("block_height", blockHeight). + Msg("starting outbound success vote") + } else { + h.log.Info(). + Str("tx_id", txID). + Str("reason", reason). + Str("tx_hash", txHash). + Uint64("block_height", blockHeight). + Msg("starting outbound revert vote") + } + + // Validate handler configuration + if err := h.validateHandler(); err != nil { + return "", err + } + + // Validate specific inputs + if txID == "" { + return "", fmt.Errorf("txID cannot be empty") + } + + if isSuccess { + if txHash == "" { + return "", fmt.Errorf("txHash cannot be empty for success vote") + } + if blockHeight == 0 { + return "", fmt.Errorf("blockHeight must be > 0 for success vote") + } + } else { + if reason == "" { + return "", fmt.Errorf("reason cannot be empty for revert vote") + } + // If txHash is provided, blockHeight must be > 0 + if txHash != "" && blockHeight == 0 { + return "", fmt.Errorf("blockHeight must be > 0 when txHash is provided") + } + } + + // Create OutboundObservation + observedTx := uexecutortypes.OutboundObservation{ + Success: isSuccess, + BlockHeight: blockHeight, + TxHash: txHash, + ErrorMsg: reason, + } + + // Create MsgVoteOutbound + msg := &uexecutortypes.MsgVoteOutbound{ + Signer: h.granter, + TxId: txID, + ObservedTx: &observedTx, + } + + h.log.Debug(). + Str("msg_signer", msg.Signer). + Str("tx_id", msg.TxId). + Bool("success", observedTx.Success). + Str("tx_hash", observedTx.TxHash). + Uint64("block_height", observedTx.BlockHeight). + Str("error_msg", observedTx.ErrorMsg). + Msg("created MsgVoteOutbound message") + + // Wrap message for AuthZ execution + msgs := []sdk.Msg{msg} + + var memo string + if isSuccess { + memo = fmt.Sprintf("Vote outbound success: %s", txID) + } else { + memo = fmt.Sprintf("Vote outbound revert: %s - %s", txID, reason) + } + + logFields := map[string]interface{}{ + "tx_id": txID, + "is_success": isSuccess, + } + + voteTxHash, err := h.broadcastVoteTx(ctx, msgs, memo, logFields, "outbound vote") + if err != nil { + return "", err + } + + if isSuccess { + h.log.Info(). + Str("tx_hash", voteTxHash). + Str("tx_id", txID). + Str("external_tx_hash", txHash). + Msg("successfully voted on outbound success") + } else { + h.log.Info(). + Str("tx_hash", voteTxHash). + Str("tx_id", txID). + Str("reason", reason). + Msg("successfully voted on outbound revert") + } + + return voteTxHash, nil } diff --git a/universalClient/tss/vote/handler_test.go b/universalClient/tss/vote/handler_test.go new file mode 100644 index 00000000..ac01caa3 --- /dev/null +++ b/universalClient/tss/vote/handler_test.go @@ -0,0 +1,574 @@ +package vote + +import ( + "context" + "errors" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" + utsstypes "github.com/pushchain/push-chain-node/x/utss/types" +) + +// MockTxSigner is a mock implementation of TxSigner +type MockTxSigner struct { + mock.Mock +} + +func (m *MockTxSigner) SignAndBroadcastAuthZTx( + ctx context.Context, + msgs []sdk.Msg, + memo string, + gasLimit uint64, + feeAmount sdk.Coins, +) (*sdk.TxResponse, error) { + args := m.Called(ctx, msgs, memo, gasLimit, feeAmount) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*sdk.TxResponse), args.Error(1) +} + +func TestNewHandler(t *testing.T) { + mockSigner := &MockTxSigner{} + logger := zerolog.Nop() + granter := "push1test123" + + handler := NewHandler(mockSigner, logger, granter) + + assert.NotNil(t, handler) + assert.Equal(t, mockSigner, handler.txSigner) + assert.Equal(t, granter, handler.granter) +} + +func TestHandler_validateHandler(t *testing.T) { + tests := []struct { + name string + txSigner TxSigner + granter string + wantError bool + errorMsg string + }{ + { + name: "valid handler", + txSigner: &MockTxSigner{}, + granter: "push1test123", + wantError: false, + }, + { + name: "nil txSigner", + txSigner: nil, + granter: "push1test123", + wantError: true, + errorMsg: "txSigner is nil", + }, + { + name: "empty granter", + txSigner: &MockTxSigner{}, + granter: "", + wantError: true, + errorMsg: "granter address is empty", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := &Handler{ + txSigner: tt.txSigner, + granter: tt.granter, + log: zerolog.Nop(), + } + + err := handler.validateHandler() + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestHandler_prepareTxParams(t *testing.T) { + handler := &Handler{ + log: zerolog.Nop(), + } + + gasLimit, feeAmount, err := handler.prepareTxParams() + + require.NoError(t, err) + assert.Equal(t, defaultGasLimit, gasLimit) + assert.NotNil(t, feeAmount) + assert.Equal(t, "1000000000000000000upc", feeAmount.String()) +} + +func TestHandler_VoteTssKeyProcess_Success(t *testing.T) { + mockSigner := &MockTxSigner{} + logger := zerolog.Nop() + granter := "push1test123" + handler := NewHandler(mockSigner, logger, granter) + + tssPubKey := "0x1234567890abcdef" + keyID := "key-123" + processId := uint64(42) + expectedTxHash := "0xabcdef123456" + + // Setup mock response + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&sdk.TxResponse{ + TxHash: expectedTxHash, + Code: 0, + GasUsed: 100000, + }, nil) + + txHash, err := handler.VoteTssKeyProcess(context.Background(), tssPubKey, keyID, processId) + + require.NoError(t, err) + assert.Equal(t, expectedTxHash, txHash) + mockSigner.AssertExpectations(t) +} + +func TestHandler_VoteTssKeyProcess_ValidationErrors(t *testing.T) { + tests := []struct { + name string + txSigner TxSigner + granter string + wantError bool + errorMsg string + }{ + { + name: "nil txSigner", + txSigner: nil, + granter: "push1test123", + wantError: true, + errorMsg: "txSigner is nil", + }, + { + name: "empty granter", + txSigner: &MockTxSigner{}, + granter: "", + wantError: true, + errorMsg: "granter address is empty", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := NewHandler(tt.txSigner, zerolog.Nop(), tt.granter) + + _, err := handler.VoteTssKeyProcess(context.Background(), "0x123", "key-123", 1) + + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + }) + } +} + +func TestHandler_VoteTssKeyProcess_BroadcastError(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + broadcastErr := errors.New("network error") + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, broadcastErr) + + _, err := handler.VoteTssKeyProcess(context.Background(), "0x123", "key-123", 1) + + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to broadcast TSS vote transaction") + assert.Contains(t, err.Error(), "network error") +} + +func TestHandler_VoteTssKeyProcess_TransactionRejected(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&sdk.TxResponse{ + TxHash: "0xrejected", + Code: 5, + RawLog: "insufficient funds", + }, nil) + + _, err := handler.VoteTssKeyProcess(context.Background(), "0x123", "key-123", 1) + + require.Error(t, err) + assert.Contains(t, err.Error(), "TSS vote transaction failed with code 5") + assert.Contains(t, err.Error(), "insufficient funds") +} + +func TestHandler_VoteOutbound_Success(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + txID := "0xtx123" + txHash := "0xexternal123" + blockHeight := uint64(1000) + expectedVoteTxHash := "0xvote123" + + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&sdk.TxResponse{ + TxHash: expectedVoteTxHash, + Code: 0, + GasUsed: 200000, + }, nil) + + voteTxHash, err := handler.VoteOutbound(context.Background(), txID, true, txHash, blockHeight, "") + + require.NoError(t, err) + assert.Equal(t, expectedVoteTxHash, voteTxHash) + + // Verify the message was created correctly + calls := mockSigner.Calls + require.Len(t, calls, 1) + msgs := calls[0].Arguments[1].([]sdk.Msg) + require.Len(t, msgs, 1) + msg, ok := msgs[0].(*uexecutortypes.MsgVoteOutbound) + require.True(t, ok) + assert.Equal(t, txID, msg.TxId) + assert.True(t, msg.ObservedTx.Success) + assert.Equal(t, txHash, msg.ObservedTx.TxHash) + assert.Equal(t, blockHeight, msg.ObservedTx.BlockHeight) +} + +func TestHandler_VoteOutbound_Revert(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + txID := "0xtx123" + reason := "transaction reverted" + expectedVoteTxHash := "0xvote456" + + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&sdk.TxResponse{ + TxHash: expectedVoteTxHash, + Code: 0, + GasUsed: 200000, + }, nil) + + voteTxHash, err := handler.VoteOutbound(context.Background(), txID, false, "", 0, reason) + + require.NoError(t, err) + assert.Equal(t, expectedVoteTxHash, voteTxHash) + + // Verify the message was created correctly + calls := mockSigner.Calls + require.Len(t, calls, 1) + msgs := calls[0].Arguments[1].([]sdk.Msg) + require.Len(t, msgs, 1) + msg, ok := msgs[0].(*uexecutortypes.MsgVoteOutbound) + require.True(t, ok) + assert.Equal(t, txID, msg.TxId) + assert.False(t, msg.ObservedTx.Success) + assert.Equal(t, reason, msg.ObservedTx.ErrorMsg) +} + +func TestHandler_VoteOutbound_RevertWithTxHash(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + txID := "0xtx123" + txHash := "0xexternal456" + blockHeight := uint64(2000) + reason := "transaction reverted on chain" + expectedVoteTxHash := "0xvote789" + + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&sdk.TxResponse{ + TxHash: expectedVoteTxHash, + Code: 0, + GasUsed: 200000, + }, nil) + + voteTxHash, err := handler.VoteOutbound(context.Background(), txID, false, txHash, blockHeight, reason) + + require.NoError(t, err) + assert.Equal(t, expectedVoteTxHash, voteTxHash) + + // Verify the message was created correctly + calls := mockSigner.Calls + require.Len(t, calls, 1) + msgs := calls[0].Arguments[1].([]sdk.Msg) + require.Len(t, msgs, 1) + msg, ok := msgs[0].(*uexecutortypes.MsgVoteOutbound) + require.True(t, ok) + assert.False(t, msg.ObservedTx.Success) + assert.Equal(t, txHash, msg.ObservedTx.TxHash) + assert.Equal(t, blockHeight, msg.ObservedTx.BlockHeight) + assert.Equal(t, reason, msg.ObservedTx.ErrorMsg) +} + +func TestHandler_VoteOutbound_ValidationErrors(t *testing.T) { + tests := []struct { + name string + txSigner TxSigner + granter string + txID string + isSuccess bool + txHash string + blockHeight uint64 + reason string + wantError bool + errorMsg string + }{ + { + name: "nil txSigner", + txSigner: nil, + granter: "push1test123", + txID: "0xtx123", + isSuccess: true, + txHash: "0xhash", + blockHeight: 1000, + wantError: true, + errorMsg: "txSigner is nil", + }, + { + name: "empty granter", + txSigner: &MockTxSigner{}, + granter: "", + txID: "0xtx123", + isSuccess: true, + txHash: "0xhash", + blockHeight: 1000, + wantError: true, + errorMsg: "granter address is empty", + }, + { + name: "empty txID", + txSigner: &MockTxSigner{}, + granter: "push1test123", + txID: "", + isSuccess: true, + txHash: "0xhash", + blockHeight: 1000, + wantError: true, + errorMsg: "txID cannot be empty", + }, + { + name: "success vote - empty txHash", + txSigner: &MockTxSigner{}, + granter: "push1test123", + txID: "0xtx123", + isSuccess: true, + txHash: "", + blockHeight: 1000, + wantError: true, + errorMsg: "txHash cannot be empty for success vote", + }, + { + name: "success vote - zero blockHeight", + txSigner: &MockTxSigner{}, + granter: "push1test123", + txID: "0xtx123", + isSuccess: true, + txHash: "0xhash", + blockHeight: 0, + wantError: true, + errorMsg: "blockHeight must be > 0 for success vote", + }, + { + name: "revert vote - empty reason", + txSigner: &MockTxSigner{}, + granter: "push1test123", + txID: "0xtx123", + isSuccess: false, + txHash: "", + blockHeight: 0, + reason: "", + wantError: true, + errorMsg: "reason cannot be empty for revert vote", + }, + { + name: "revert vote - txHash provided but zero blockHeight", + txSigner: &MockTxSigner{}, + granter: "push1test123", + txID: "0xtx123", + isSuccess: false, + txHash: "0xhash", + blockHeight: 0, + reason: "some reason", + wantError: true, + errorMsg: "blockHeight must be > 0 when txHash is provided", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := NewHandler(tt.txSigner, zerolog.Nop(), tt.granter) + + _, err := handler.VoteOutbound(context.Background(), tt.txID, tt.isSuccess, tt.txHash, tt.blockHeight, tt.reason) + + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + }) + } +} + +func TestHandler_VoteOutbound_BroadcastError(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + broadcastErr := errors.New("broadcast failed") + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, broadcastErr) + + _, err := handler.VoteOutbound(context.Background(), "0xtx123", true, "0xhash", 1000, "") + + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to broadcast outbound vote transaction") + assert.Contains(t, err.Error(), "broadcast failed") +} + +func TestHandler_VoteOutbound_TransactionRejected(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&sdk.TxResponse{ + TxHash: "0xrejected", + Code: 10, + RawLog: "invalid signature", + }, nil) + + _, err := handler.VoteOutbound(context.Background(), "0xtx123", true, "0xhash", 1000, "") + + require.Error(t, err) + assert.Contains(t, err.Error(), "outbound vote transaction failed with code 10") + assert.Contains(t, err.Error(), "invalid signature") +} + +func TestHandler_broadcastVoteTx_ContextTimeout(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + // Create a context that's already cancelled + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + msgs := []sdk.Msg{&utsstypes.MsgVoteTssKeyProcess{}} + logFields := map[string]interface{}{"test": "value"} + + // Mock should not be called because context is cancelled + // But we'll set it up anyway to verify timeout behavior + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, context.Canceled) + + _, err := handler.broadcastVoteTx(ctx, msgs, "test memo", logFields, "test") + + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to broadcast test transaction") +} + +func TestHandler_broadcastVoteTx_VerifyParameters(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + msgs := []sdk.Msg{&utsstypes.MsgVoteTssKeyProcess{ + Signer: "push1test123", + TssPubkey: "0x123", + KeyId: "key-123", + ProcessId: 1, + }} + memo := "test memo" + logFields := map[string]interface{}{"key_id": "key-123"} + + expectedTxHash := "0xsuccess" + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, msgs, memo, defaultGasLimit, mock.MatchedBy(func(feeAmount sdk.Coins) bool { + return feeAmount.String() == "1000000000000000000upc" + })). + Return(&sdk.TxResponse{ + TxHash: expectedTxHash, + Code: 0, + }, nil) + + txHash, err := handler.broadcastVoteTx(context.Background(), msgs, memo, logFields, "test") + + require.NoError(t, err) + assert.Equal(t, expectedTxHash, txHash) + mockSigner.AssertExpectations(t) +} + +func TestHandler_VoteTssKeyProcess_MessageCreation(t *testing.T) { + mockSigner := &MockTxSigner{} + granter := "push1operator123" + handler := NewHandler(mockSigner, zerolog.Nop(), granter) + + tssPubKey := "0xabcdef123456" + keyID := "key-456" + processId := uint64(99) + + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&sdk.TxResponse{ + TxHash: "0xhash", + Code: 0, + }, nil). + Run(func(args mock.Arguments) { + msgs := args.Get(1).([]sdk.Msg) + require.Len(t, msgs, 1) + msg, ok := msgs[0].(*utsstypes.MsgVoteTssKeyProcess) + require.True(t, ok) + assert.Equal(t, granter, msg.Signer) + assert.Equal(t, tssPubKey, msg.TssPubkey) + assert.Equal(t, keyID, msg.KeyId) + assert.Equal(t, processId, msg.ProcessId) + }) + + _, err := handler.VoteTssKeyProcess(context.Background(), tssPubKey, keyID, processId) + require.NoError(t, err) + mockSigner.AssertExpectations(t) +} + +func TestHandler_VoteOutbound_MemoFormat(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + txID := "0xtx789" + + // Test success memo + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, "Vote outbound success: 0xtx789", mock.Anything, mock.Anything). + Return(&sdk.TxResponse{TxHash: "0xhash1", Code: 0}, nil) + + _, err := handler.VoteOutbound(context.Background(), txID, true, "0xhash", 1000, "") + require.NoError(t, err) + + // Reset mock + mockSigner.ExpectedCalls = nil + mockSigner.Calls = nil + + // Test revert memo + reason := "expired" + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, "Vote outbound revert: 0xtx789 - expired", mock.Anything, mock.Anything). + Return(&sdk.TxResponse{TxHash: "0xhash2", Code: 0}, nil) + + _, err = handler.VoteOutbound(context.Background(), txID, false, "", 0, reason) + require.NoError(t, err) + mockSigner.AssertExpectations(t) +} + +func TestHandler_broadcastVoteTx_ContextCancellation(t *testing.T) { + mockSigner := &MockTxSigner{} + handler := NewHandler(mockSigner, zerolog.Nop(), "push1test123") + + msgs := []sdk.Msg{&utsstypes.MsgVoteTssKeyProcess{}} + logFields := map[string]interface{}{} + + // Create a context that's already cancelled + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + // Mock should return context cancelled error + mockSigner.On("SignAndBroadcastAuthZTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, context.Canceled) + + _, err := handler.broadcastVoteTx(ctx, msgs, "test", logFields, "test") + + // Should return error due to cancelled context + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to broadcast test transaction") +} diff --git a/utils/conversion.go b/utils/conversion.go index 880a4ad3..c6693bde 100644 --- a/utils/conversion.go +++ b/utils/conversion.go @@ -2,6 +2,7 @@ package utils import ( "encoding/hex" + "fmt" "math/big" "strings" ) @@ -14,3 +15,26 @@ func StringToBigInt(s string) *big.Int { bi, _ := new(big.Int).SetString(s, 10) return bi } + +// Returns evm chainId, e.g. push-chain-42101 -> 42101 +func ExtractEvmChainID(chainID string) (string, error) { + parts := strings.Split(chainID, "_") + if len(parts) != 2 { + return "", fmt.Errorf("invalid chain-id format: %s", chainID) + } + + idPart := parts[1] + idParts := strings.Split(idPart, "-") + if len(idParts) < 1 { + return "", fmt.Errorf("invalid chain-id format: %s", chainID) + } + + evmChainID := idParts[0] + + // Ensure numeric + if _, ok := new(big.Int).SetString(evmChainID, 10); !ok { + return "", fmt.Errorf("invalid EVM chain id in tendermint chain-id: %s", chainID) + } + + return evmChainID, nil +} diff --git a/x/uexecutor/keeper/create_outbound.go b/x/uexecutor/keeper/create_outbound.go new file mode 100644 index 00000000..fe5bb59a --- /dev/null +++ b/x/uexecutor/keeper/create_outbound.go @@ -0,0 +1,215 @@ +package keeper + +import ( + "context" + "fmt" + "strings" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + evmtypes "github.com/cosmos/evm/x/vm/types" + "github.com/pushchain/push-chain-node/x/uexecutor/types" + uregistrytypes "github.com/pushchain/push-chain-node/x/uregistry/types" +) + +func (k Keeper) BuildOutboundsFromReceipt( + ctx context.Context, + utxId string, + receipt *evmtypes.MsgEthereumTxResponse, +) ([]*types.OutboundTx, error) { + + outbounds := []*types.OutboundTx{} + universalGatewayPC := strings.ToLower(uregistrytypes.SYSTEM_CONTRACTS["UNIVERSAL_GATEWAY_PC"].Address) + + for _, lg := range receipt.Logs { + if lg.Removed { + continue + } + + if strings.ToLower(lg.Address) != universalGatewayPC { + continue + } + + if len(lg.Topics) == 0 { + continue + } + + if strings.ToLower(lg.Topics[0]) != strings.ToLower(types.UniversalTxOutboundEventSig) { + continue + } + + event, err := types.DecodeUniversalTxOutboundFromLog(lg) + if err != nil { + return nil, fmt.Errorf("failed to decode UniversalTxWithdraw: %w", err) + } + + // Get the external asset addr + tokenCfg, err := k.uregistryKeeper.GetTokenConfigByPRC20( + ctx, + event.ChainId, + event.Token, // PRC20 address + ) + if err != nil { + return nil, err + } + + outbound := &types.OutboundTx{ + DestinationChain: event.ChainId, + Recipient: event.Target, + Amount: event.Amount.String(), + ExternalAssetAddr: tokenCfg.Address, + Prc20AssetAddr: event.Token, + Sender: event.Sender, + Payload: event.Payload, + GasLimit: event.GasLimit.String(), + TxType: event.TxType, + PcTx: &types.OriginatingPcTx{ + TxHash: receipt.Hash, + LogIndex: fmt.Sprintf("%d", lg.Index), + }, + RevertInstructions: &types.RevertInstructions{ + FundRecipient: event.RevertRecipient, + }, + OutboundStatus: types.Status_PENDING, + Id: event.TxID, + } + + outbounds = append(outbounds, outbound) + } + + return outbounds, nil +} + +func (k Keeper) CreateUniversalTxFromPCTx( + ctx context.Context, + pcTx types.PCTx, +) (*types.UniversalTx, error) { + + universalTxKey, err := k.BuildPcUniversalTxKey(ctx, pcTx) + if err != nil { + return nil, errors.Wrap(err, "failed to create UniversalTx key") + } + + found, err := k.HasUniversalTx(ctx, universalTxKey) + if err != nil { + return nil, errors.Wrap(err, "failed to check UniversalTx") + } + if found { + return nil, fmt.Errorf("universal tx already exists for pc tx %s", pcTx.TxHash) + } + + utx := types.UniversalTx{ + Id: universalTxKey, + InboundTx: nil, // no inbound + PcTx: []*types.PCTx{&pcTx}, // origin is PC + OutboundTx: nil, + UniversalStatus: types.UniversalTxStatus_PC_EXECUTED_SUCCESS, + } + + if err := k.CreateUniversalTx(ctx, universalTxKey, utx); err != nil { + return nil, err + } + + return &utx, nil +} + +// AttachOutboundsToExistingUniversalTx +// Used when UniversalTx already exists (e.g. inbound execution) +// It attaches outbounds extracted from receipt to the existing utx. +func (k Keeper) AttachOutboundsToExistingUniversalTx( + ctx sdk.Context, + receipt *evmtypes.MsgEthereumTxResponse, + utx types.UniversalTx, +) error { + outbounds, err := k.BuildOutboundsFromReceipt(ctx, utx.Id, receipt) + if err != nil { + return err + } + + return k.attachOutboundsToUtx(ctx, utx.Id, outbounds, "") +} + +// CreateUniversalTxFromReceiptIfOutbound +// Creates a UniversalTx ONLY if outbound events exist in the receipt. +// Safe to call from ExecutePayload, EVM hooks +func (k Keeper) CreateUniversalTxFromReceiptIfOutbound( + ctx sdk.Context, + receipt *evmtypes.MsgEthereumTxResponse, + pcTx types.PCTx, +) error { + universalTxKey, err := k.BuildPcUniversalTxKey(ctx, pcTx) + if err != nil { + return errors.Wrap(err, "failed to create UniversalTx key") + } + + outbounds, err := k.BuildOutboundsFromReceipt(ctx, universalTxKey, receipt) + if err != nil { + return err + } + + if len(outbounds) == 0 { + return nil + } + + utx, err := k.CreateUniversalTxFromPCTx(ctx, pcTx) + if err != nil { + return err + } + + return k.attachOutboundsToUtx(ctx, utx.Id, outbounds, "") +} + +func (k Keeper) attachOutboundsToUtx( + ctx sdk.Context, + utxId string, + outbounds []*types.OutboundTx, + revertMsg string, // revert msg if the outbound is for a inbound revert +) error { + + if len(outbounds) == 0 { + return nil + } + return k.UpdateUniversalTx(ctx, utxId, func(utx *types.UniversalTx) error { + + for _, outbound := range outbounds { + + utx.OutboundTx = append(utx.OutboundTx, outbound) + + // ABI-encode (utx_id, outbound_id) + txIDHex, err := types.EncodeOutboundTxIDHex(utxId, outbound.Id) + if err != nil { + return fmt.Errorf("failed to encode outbound txID: %w", err) + } + + var pcTxHash string + var logIndex string + + if outbound.PcTx != nil { + pcTxHash = outbound.PcTx.TxHash + logIndex = outbound.PcTx.LogIndex + } + + evt, err := types.NewOutboundCreatedEvent(types.OutboundCreatedEvent{ + UniversalTxId: utxId, + OutboundId: outbound.Id, + TxID: txIDHex, + DestinationChain: outbound.DestinationChain, + Recipient: outbound.Recipient, + Amount: outbound.Amount, + AssetAddr: outbound.ExternalAssetAddr, + Sender: outbound.Sender, + Payload: outbound.Payload, + GasLimit: outbound.GasLimit, + TxType: outbound.TxType.String(), + PcTxHash: pcTxHash, + LogIndex: logIndex, + RevertMsg: revertMsg, + }) + if err == nil { + ctx.EventManager().EmitEvent(evt) + } + } + + return nil + }) +} diff --git a/x/uexecutor/keeper/evm_hooks.go b/x/uexecutor/keeper/evm_hooks.go new file mode 100644 index 00000000..d65dc5fa --- /dev/null +++ b/x/uexecutor/keeper/evm_hooks.go @@ -0,0 +1,87 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + evmtypes "github.com/cosmos/evm/x/vm/types" + "github.com/ethereum/go-ethereum/common" + core "github.com/ethereum/go-ethereum/core" + ethtypes "github.com/ethereum/go-ethereum/core/types" + + "github.com/pushchain/push-chain-node/x/uexecutor/types" +) + +// EVMHooks implements the EVM post-processing hooks. +// This hook will be invoked after every EVM transaction execution +// and is responsible for detecting outbound events and creating UniversalTx if needed. +type EVMHooks struct { + k Keeper +} + +// NewEVMHooks creates a new instance of EVMHooks. +func NewEVMHooks(k Keeper) evmtypes.EvmHooks { + return EVMHooks{k: k} +} + +// PostTxProcessing is called by the EVM module after transaction execution. +// It inspects the receipt and creates UniversalTx + Outbound only if +// UniversalTxWithdraw event is detected. +func (h EVMHooks) PostTxProcessing( + ctx sdk.Context, + sender common.Address, + msg core.Message, + receipt *ethtypes.Receipt, +) error { + if receipt == nil || len(receipt.Logs) == 0 { + return nil + } + + protoReceipt := &evmtypes.MsgEthereumTxResponse{ + Hash: receipt.TxHash.Hex(), + GasUsed: receipt.GasUsed, + Logs: convertReceiptLogs(receipt.Logs), + } + + // Build pcTx representation + pcTx := types.PCTx{ + Sender: sender.Hex(), + TxHash: protoReceipt.Hash, + GasUsed: protoReceipt.GasUsed, + BlockHeight: uint64(ctx.BlockHeight()), + Status: "SUCCESS", + } + + // This will: + // - check if outbound exists + // - create universal tx if needed + // - attach outbounds + // - emit events + return h.k.CreateUniversalTxFromReceiptIfOutbound(ctx, protoReceipt, pcTx) +} + +func convertReceiptLogs(logs []*ethtypes.Log) []*evmtypes.Log { + out := make([]*evmtypes.Log, 0, len(logs)) + + for _, l := range logs { + out = append(out, &evmtypes.Log{ + Address: l.Address.Hex(), + Topics: convertTopics(l.Topics), + Data: l.Data, + BlockNumber: l.BlockNumber, + TxHash: l.TxHash.Hex(), + TxIndex: uint64(l.TxIndex), + BlockHash: l.BlockHash.Hex(), + Index: uint64(l.Index), + Removed: l.Removed, + }) + } + + return out +} + +func convertTopics(topics []common.Hash) []string { + out := make([]string, len(topics)) + for i, t := range topics { + out[i] = t.Hex() + } + return out +} diff --git a/x/uexecutor/keeper/execute_inbound.go b/x/uexecutor/keeper/execute_inbound.go index 9c949924..2bbf9f0c 100644 --- a/x/uexecutor/keeper/execute_inbound.go +++ b/x/uexecutor/keeper/execute_inbound.go @@ -9,16 +9,16 @@ import ( func (k Keeper) ExecuteInbound(ctx context.Context, utx types.UniversalTx) error { switch utx.InboundTx.TxType { - case types.InboundTxType_GAS: // fee abstraction + case types.TxType_GAS: // fee abstraction return k.ExecuteInboundGas(ctx, *utx.InboundTx) - case types.InboundTxType_FUNDS: // synthetic + case types.TxType_FUNDS: // synthetic return k.ExecuteInboundFunds(ctx, utx) - case types.InboundTxType_FUNDS_AND_PAYLOAD: // synthetic + payload + case types.TxType_FUNDS_AND_PAYLOAD: // synthetic + payload return k.ExecuteInboundFundsAndPayload(ctx, utx) - case types.InboundTxType_GAS_AND_PAYLOAD: // fee abstraction + payload + case types.TxType_GAS_AND_PAYLOAD: // fee abstraction + payload return k.ExecuteInboundGasAndPayload(ctx, utx) default: diff --git a/x/uexecutor/keeper/execute_inbound_funds.go b/x/uexecutor/keeper/execute_inbound_funds.go index ef828dd7..e99d5935 100644 --- a/x/uexecutor/keeper/execute_inbound_funds.go +++ b/x/uexecutor/keeper/execute_inbound_funds.go @@ -11,16 +11,18 @@ import ( func (k Keeper) ExecuteInboundFunds(ctx context.Context, utx types.UniversalTx) error { sdkCtx := sdk.UnwrapSDKContext(ctx) + inbound := utx.InboundTx + receipt, err := k.depositPRC20( sdkCtx, - utx.InboundTx.SourceChain, - utx.InboundTx.AssetAddr, - common.HexToAddress(utx.InboundTx.Recipient), // recipient is inbound recipient - utx.InboundTx.Amount, + inbound.SourceChain, + inbound.AssetAddr, + common.HexToAddress(inbound.Recipient), // recipient is inbound recipient + inbound.Amount, ) _, ueModuleAddressStr := k.GetUeModuleAddress(ctx) - universalTxKey := types.GetInboundUniversalTxKey(*utx.InboundTx) + universalTxKey := types.GetInboundUniversalTxKey(*inbound) updateErr := k.UpdateUniversalTx(ctx, universalTxKey, func(utx *types.UniversalTx) error { pcTx := types.PCTx{ TxHash: "", // no hash if depositPRC20 failed @@ -48,5 +50,24 @@ func (k Keeper) ExecuteInboundFunds(ctx context.Context, utx types.UniversalTx) return updateErr } + if err != nil { + revertOutbound := types.OutboundTx{ + DestinationChain: inbound.SourceChain, + Recipient: func() string { + if inbound.RevertInstructions != nil { + return inbound.RevertInstructions.FundRecipient + } + return inbound.Sender + }(), + Amount: inbound.Amount, + ExternalAssetAddr: inbound.AssetAddr, + Sender: inbound.Sender, + TxType: types.TxType_INBOUND_REVERT, + OutboundStatus: types.Status_PENDING, + Id: types.GetOutboundRevertId(inbound.TxHash), + } + _ = k.attachOutboundsToUtx(sdkCtx, utx.Id, []*types.OutboundTx{&revertOutbound}, err.Error()) + } + return nil } diff --git a/x/uexecutor/keeper/execute_inbound_funds_and_payload.go b/x/uexecutor/keeper/execute_inbound_funds_and_payload.go index 6b3e6520..d96c4089 100644 --- a/x/uexecutor/keeper/execute_inbound_funds_and_payload.go +++ b/x/uexecutor/keeper/execute_inbound_funds_and_payload.go @@ -16,6 +16,9 @@ func (k Keeper) ExecuteInboundFundsAndPayload(ctx context.Context, utx types.Uni _, ueModuleAddressStr := k.GetUeModuleAddress(ctx) universalTxKey := types.GetInboundUniversalTxKey(*utx.InboundTx) + shouldRevert := false + var revertReason string + // Build universalAccountId universalAccountId := types.UniversalAccountId{ ChainNamespace: strings.Split(utx.InboundTx.SourceChain, ":")[0], @@ -33,8 +36,12 @@ func (k Keeper) ExecuteInboundFundsAndPayload(ctx context.Context, utx types.Uni ueaAddr, isDeployed, err := k.CallFactoryToGetUEAAddressForOrigin(sdkCtx, ueModuleAccAddress, factoryAddress, &universalAccountId) if err != nil { execErr = fmt.Errorf("factory lookup failed: %w", err) + shouldRevert = true + revertReason = execErr.Error() } else if !isDeployed { execErr = fmt.Errorf("UEA is not deployed") + shouldRevert = true + revertReason = execErr.Error() } else { // --- Step 2: deposit PRC20 into UEA receipt, err = k.depositPRC20( @@ -46,6 +53,8 @@ func (k Keeper) ExecuteInboundFundsAndPayload(ctx context.Context, utx types.Uni ) if err != nil { execErr = fmt.Errorf("depositPRC20 failed: %w", err) + shouldRevert = true + revertReason = execErr.Error() } } @@ -75,6 +84,31 @@ func (k Keeper) ExecuteInboundFundsAndPayload(ctx context.Context, utx types.Uni // If deposit failed, stop here (don’t attempt payload execution) if execErr != nil { + if shouldRevert { + revertOutbound := &types.OutboundTx{ + DestinationChain: utx.InboundTx.SourceChain, + Recipient: func() string { + if utx.InboundTx.RevertInstructions != nil { + return utx.InboundTx.RevertInstructions.FundRecipient + } + return utx.InboundTx.Sender + }(), + Amount: utx.InboundTx.Amount, + ExternalAssetAddr: utx.InboundTx.AssetAddr, + Sender: utx.InboundTx.Sender, + TxType: types.TxType_INBOUND_REVERT, + OutboundStatus: types.Status_PENDING, + Id: types.GetOutboundRevertId(utx.InboundTx.TxHash), + } + + _ = k.attachOutboundsToUtx( + sdkCtx, + universalTxKey, + []*types.OutboundTx{revertOutbound}, + revertReason, + ) + } + return nil } @@ -112,6 +146,10 @@ func (k Keeper) ExecuteInboundFundsAndPayload(ctx context.Context, utx types.Uni payloadPcTx.TxHash = receipt.Hash payloadPcTx.GasUsed = receipt.GasUsed payloadPcTx.Status = "SUCCESS" + + if receipt != nil { + _ = k.AttachOutboundsToExistingUniversalTx(sdkCtx, receipt, utx) + } } updateErr = k.UpdateUniversalTx(ctx, universalTxKey, func(utx *types.UniversalTx) error { diff --git a/x/uexecutor/keeper/execute_inbound_gas.go b/x/uexecutor/keeper/execute_inbound_gas.go index a9c28fcc..f1388e8e 100644 --- a/x/uexecutor/keeper/execute_inbound_gas.go +++ b/x/uexecutor/keeper/execute_inbound_gas.go @@ -27,15 +27,22 @@ func (k Keeper) ExecuteInboundGas(ctx context.Context, inbound types.Inbound) er var execErr error var receipt *evmtypes.MsgEthereumTxResponse + shouldRevert := false + var revertReason string + // --- step 1: get token config tokenConfig, err := k.uregistryKeeper.GetTokenConfig(ctx, inbound.SourceChain, inbound.AssetAddr) if err != nil { execErr = fmt.Errorf("GetTokenConfig failed: %w", err) + shouldRevert = true + revertReason = execErr.Error() } else { // --- step 2: parse amount amount := new(big.Int) if amount, ok := amount.SetString(inbound.Amount, 10); !ok { execErr = fmt.Errorf("invalid amount: %s", inbound.Amount) + shouldRevert = true + revertReason = execErr.Error() } else { // --- step 3: resolve / deploy UEA prc20AddressHex := common.HexToAddress(tokenConfig.NativeRepresentation.ContractAddress) @@ -49,12 +56,16 @@ func (k Keeper) ExecuteInboundGas(ctx context.Context, inbound types.Inbound) er ueaAddr, isDeployed, fErr := k.CallFactoryToGetUEAAddressForOrigin(sdkCtx, ueModuleAccAddress, factoryAddress, &universalAccountId) if fErr != nil { execErr = fmt.Errorf("CallFactory failed: %w", fErr) + shouldRevert = true + revertReason = execErr.Error() } else { if !isDeployed { // Deploy new UEA and record a pcTx for it deployReceipt, dErr := k.DeployUEAV2(ctx, ueModuleAccAddress, &universalAccountId) if dErr != nil { execErr = fmt.Errorf("DeployUEA failed: %w", dErr) + shouldRevert = true + revertReason = execErr.Error() } else { // Parse deployed address from return data deployedAddr := common.BytesToAddress(deployReceipt.Ret) @@ -78,6 +89,10 @@ func (k Keeper) ExecuteInboundGas(ctx context.Context, inbound types.Inbound) er if execErr == nil { // --- step 4: deposit + swap receipt, execErr = k.CallPRC20DepositAutoSwap(sdkCtx, prc20AddressHex, ueaAddr, amount) + if execErr != nil { + shouldRevert = true + revertReason = execErr.Error() + } } } } @@ -108,6 +123,31 @@ func (k Keeper) ExecuteInboundGas(ctx context.Context, inbound types.Inbound) er return updateErr } + if execErr != nil && shouldRevert { + revertOutbound := &types.OutboundTx{ + DestinationChain: inbound.SourceChain, + Recipient: func() string { + if inbound.RevertInstructions != nil { + return inbound.RevertInstructions.FundRecipient + } + return inbound.Sender + }(), + Amount: inbound.Amount, + ExternalAssetAddr: inbound.AssetAddr, + Sender: inbound.Sender, + TxType: types.TxType_INBOUND_REVERT, + OutboundStatus: types.Status_PENDING, + Id: types.GetOutboundRevertId(inbound.TxHash), + } + + _ = k.attachOutboundsToUtx( + sdkCtx, + universalTxKey, + []*types.OutboundTx{revertOutbound}, + revertReason, + ) + } + // Never return execErr, only nil return nil } diff --git a/x/uexecutor/keeper/execute_inbound_gas_and_payload.go b/x/uexecutor/keeper/execute_inbound_gas_and_payload.go index 9290bf24..cb90282e 100644 --- a/x/uexecutor/keeper/execute_inbound_gas_and_payload.go +++ b/x/uexecutor/keeper/execute_inbound_gas_and_payload.go @@ -14,7 +14,7 @@ import ( func (k Keeper) ExecuteInboundGasAndPayload(ctx context.Context, utx types.UniversalTx) error { sdkCtx := sdk.UnwrapSDKContext(ctx) - _, ueModuleAddressStr := k.GetUeModuleAddress(ctx) + ueModuleAccAddress, ueModuleAddressStr := k.GetUeModuleAddress(ctx) universalTxKey := types.GetInboundUniversalTxKey(*utx.InboundTx) universalAccountId := types.UniversalAccountId{ @@ -24,26 +24,39 @@ func (k Keeper) ExecuteInboundGasAndPayload(ctx context.Context, utx types.Unive } factoryAddress := common.HexToAddress(types.FACTORY_PROXY_ADDRESS_HEX) - ueModuleAccAddress, _ := k.GetUeModuleAddress(ctx) var execErr error var receipt *evmtypes.MsgEthereumTxResponse var ueaAddr common.Address + shouldRevert := false + var revertReason string + // --- Step 1: token config tokenConfig, err := k.uregistryKeeper.GetTokenConfig(ctx, utx.InboundTx.SourceChain, utx.InboundTx.AssetAddr) if err != nil { execErr = fmt.Errorf("GetTokenConfig failed: %w", err) + shouldRevert = true + revertReason = execErr.Error() } else { // --- Step 2: parse amount amount := new(big.Int) if amount, ok := amount.SetString(utx.InboundTx.Amount, 10); !ok { execErr = fmt.Errorf("invalid amount: %s", utx.InboundTx.Amount) + shouldRevert = true + revertReason = execErr.Error() } else { - // --- Step 3: check factory for UEA - ueaAddrRes, isDeployed, fErr := k.CallFactoryToGetUEAAddressForOrigin(sdkCtx, ueModuleAccAddress, factoryAddress, &universalAccountId) + // --- Step 3: resolve / deploy UEA + ueaAddrRes, isDeployed, fErr := k.CallFactoryToGetUEAAddressForOrigin( + sdkCtx, + ueModuleAccAddress, + factoryAddress, + &universalAccountId, + ) if fErr != nil { execErr = fmt.Errorf("factory lookup failed: %w", fErr) + shouldRevert = true + revertReason = execErr.Error() } else { ueaAddr = ueaAddrRes @@ -51,12 +64,11 @@ func (k Keeper) ExecuteInboundGasAndPayload(ctx context.Context, utx types.Unive deployReceipt, dErr := k.DeployUEAV2(ctx, ueModuleAccAddress, &universalAccountId) if dErr != nil { execErr = fmt.Errorf("DeployUEAV2 failed: %w", dErr) + shouldRevert = true + revertReason = execErr.Error() } else { - // Parse deployed address from return data - deployedAddr := common.BytesToAddress(deployReceipt.Ret) - ueaAddr = deployedAddr + ueaAddr = common.BytesToAddress(deployReceipt.Ret) - // Store deployment pcTx deployPcTx := types.PCTx{ TxHash: deployReceipt.Hash, Sender: ueModuleAddressStr, @@ -73,8 +85,19 @@ func (k Keeper) ExecuteInboundGasAndPayload(ctx context.Context, utx types.Unive if execErr == nil { // --- Step 4: deposit + autoswap - prc20AddressHex := common.HexToAddress(tokenConfig.NativeRepresentation.ContractAddress) - receipt, execErr = k.CallPRC20DepositAutoSwap(sdkCtx, prc20AddressHex, ueaAddr, amount) + prc20AddressHex := common.HexToAddress( + tokenConfig.NativeRepresentation.ContractAddress, + ) + receipt, execErr = k.CallPRC20DepositAutoSwap( + sdkCtx, + prc20AddressHex, + ueaAddr, + amount, + ) + if execErr != nil { + shouldRevert = true + revertReason = execErr.Error() + } } } } @@ -93,6 +116,7 @@ func (k Keeper) ExecuteInboundGasAndPayload(ctx context.Context, utx types.Unive depositPcTx.GasUsed = receipt.GasUsed depositPcTx.Status = "SUCCESS" } + updateErr := k.UpdateUniversalTx(ctx, universalTxKey, func(utx *types.UniversalTx) error { utx.PcTx = append(utx.PcTx, &depositPcTx) if execErr != nil { @@ -104,17 +128,41 @@ func (k Keeper) ExecuteInboundGasAndPayload(ctx context.Context, utx types.Unive return updateErr } - // If deposit failed, don’t attempt payload execution - if execErr != nil { + // --- create revert ONLY for pre-deposit / deposit failures + if execErr != nil && shouldRevert { + revertOutbound := &types.OutboundTx{ + DestinationChain: utx.InboundTx.SourceChain, + Recipient: func() string { + if utx.InboundTx.RevertInstructions != nil { + return utx.InboundTx.RevertInstructions.FundRecipient + } + return utx.InboundTx.Sender + }(), + Amount: utx.InboundTx.Amount, + ExternalAssetAddr: utx.InboundTx.AssetAddr, + Sender: utx.InboundTx.Sender, + TxType: types.TxType_INBOUND_REVERT, + OutboundStatus: types.Status_PENDING, + Id: types.GetOutboundRevertId(utx.InboundTx.TxHash), + } + + _ = k.attachOutboundsToUtx( + sdkCtx, + universalTxKey, + []*types.OutboundTx{revertOutbound}, + revertReason, + ) + return nil } + // --- funds deposited successfully → continue with payload + ueModuleAddr, _ := k.GetUeModuleAddress(ctx) - // --- Step 5: compute and store payload hash + // --- Step 5: payload hash payloadHashErr := k.StoreVerifiedPayloadHash(sdkCtx, utx, ueaAddr, ueModuleAddr) if payloadHashErr != nil { - // Update UniversalTx with payload hash error and stop errorPcTx := types.PCTx{ Sender: ueModuleAddressStr, BlockHeight: uint64(sdkCtx.BlockHeight()), @@ -130,7 +178,13 @@ func (k Keeper) ExecuteInboundGasAndPayload(ctx context.Context, utx types.Unive } // --- Step 6: execute payload - receipt, err = k.ExecutePayloadV2(ctx, ueModuleAddr, &universalAccountId, utx.InboundTx.UniversalPayload, utx.InboundTx.VerificationData) + receipt, err = k.ExecutePayloadV2( + ctx, + ueModuleAddr, + &universalAccountId, + utx.InboundTx.UniversalPayload, + utx.InboundTx.VerificationData, + ) payloadPcTx := types.PCTx{ Sender: ueModuleAddressStr, @@ -143,6 +197,10 @@ func (k Keeper) ExecuteInboundGasAndPayload(ctx context.Context, utx types.Unive payloadPcTx.TxHash = receipt.Hash payloadPcTx.GasUsed = receipt.GasUsed payloadPcTx.Status = "SUCCESS" + + if receipt != nil { + _ = k.AttachOutboundsToExistingUniversalTx(sdkCtx, receipt, utx) + } } updateErr = k.UpdateUniversalTx(ctx, universalTxKey, func(utx *types.UniversalTx) error { @@ -158,6 +216,5 @@ func (k Keeper) ExecuteInboundGasAndPayload(ctx context.Context, utx types.Unive return updateErr } - // never return execErr or err return nil } diff --git a/x/uexecutor/keeper/msg_execute_payload.go b/x/uexecutor/keeper/msg_execute_payload.go index 473b760a..59144d54 100644 --- a/x/uexecutor/keeper/msg_execute_payload.go +++ b/x/uexecutor/keeper/msg_execute_payload.go @@ -59,10 +59,24 @@ func (k Keeper) ExecutePayload(ctx context.Context, evmFrom common.Address, univ return err } + // Step 4 + pcTx := types.PCTx{ + Sender: evmFrom.Hex(), + TxHash: receipt.Hash, + GasUsed: receipt.GasUsed, + BlockHeight: uint64(sdkCtx.BlockHeight()), + Status: "SUCCESS", + } + + // Step 5: create outbound + UTX only if needed + if err := k.CreateUniversalTxFromReceiptIfOutbound(sdkCtx, receipt, pcTx); err != nil { + return err + } + gasUnitsUsed := receipt.GasUsed gasUnitsUsedBig := new(big.Int).SetUint64(gasUnitsUsed) - // Step 4: Handle fee calculation and deduction + // Step 6: Handle fee calculation and deduction ueaAccAddr := sdk.AccAddress(ueaAddr.Bytes()) baseFee := k.feemarketKeeper.GetBaseFee(sdkCtx) diff --git a/x/uexecutor/keeper/msg_server.go b/x/uexecutor/keeper/msg_server.go index 1770dfc7..2d9b44f0 100755 --- a/x/uexecutor/keeper/msg_server.go +++ b/x/uexecutor/keeper/msg_server.go @@ -6,6 +6,7 @@ import ( "cosmossdk.io/errors" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" "github.com/pushchain/push-chain-node/utils" "github.com/pushchain/push-chain-node/x/uexecutor/types" @@ -168,3 +169,43 @@ func (ms msgServer) VoteGasPrice(ctx context.Context, msg *types.MsgVoteGasPrice } return &types.MsgVoteGasPriceResponse{}, nil } + +// VoteOutbound implements types.MsgServer. +func (ms msgServer) VoteOutbound(ctx context.Context, msg *types.MsgVoteOutbound) (*types.MsgVoteOutboundResponse, error) { + signerAccAddr, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return nil, fmt.Errorf("invalid signer address: %w", err) + } + + // Convert account to validator operator address + signerValAddr := sdk.ValAddress(signerAccAddr) + + // Lookup the linked universal validator for this signer + isBonded, err := ms.k.uvalidatorKeeper.IsBondedUniversalValidator(ctx, msg.Signer) + if err != nil { + return nil, errors.Wrapf(err, "failed to check bonded status for signer %s", msg.Signer) + } + if !isBonded { + return nil, fmt.Errorf("universal validator for signer %s is not bonded", msg.Signer) + } + + isTombstoned, err := ms.k.uvalidatorKeeper.IsTombstonedUniversalValidator(ctx, msg.Signer) + if err != nil { + return nil, errors.Wrapf(err, "failed to check tombstoned status for signer %s", msg.Signer) + } + if isTombstoned { + return nil, fmt.Errorf("universal validator for signer %s is tombstoned", msg.Signer) + } + + utxID, outboundID, err := types.DecodeOutboundTxIDHex(msg.TxId) + if err != nil { + return nil, errors.Wrap(sdkerrors.ErrInvalidRequest, "invalid tx_id: decode failed") + } + + err = ms.k.VoteOutbound(ctx, signerValAddr, utxID, outboundID, *msg.ObservedTx) + if err != nil { + return nil, err + } + + return &types.MsgVoteOutboundResponse{}, nil +} diff --git a/x/uexecutor/keeper/msg_vote_inbound.go b/x/uexecutor/keeper/msg_vote_inbound.go index 70c6ec2e..1bcc460d 100644 --- a/x/uexecutor/keeper/msg_vote_inbound.go +++ b/x/uexecutor/keeper/msg_vote_inbound.go @@ -46,6 +46,7 @@ func (k Keeper) VoteInbound(ctx context.Context, universalValidator sdk.ValAddre // Voting is finalized utx := types.UniversalTx{ + Id: universalTxKey, InboundTx: &inbound, PcTx: nil, OutboundTx: nil, diff --git a/x/uexecutor/keeper/msg_vote_outbound.go b/x/uexecutor/keeper/msg_vote_outbound.go new file mode 100644 index 00000000..7abbcdf2 --- /dev/null +++ b/x/uexecutor/keeper/msg_vote_outbound.go @@ -0,0 +1,88 @@ +package keeper + +import ( + "context" + "fmt" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/pushchain/push-chain-node/x/uexecutor/types" +) + +// VoteOutbound is for uvalidators for voting on observed outbound tx on external chain +func (k Keeper) VoteOutbound( + ctx context.Context, + universalValidator sdk.ValAddress, + utxId string, + outboundId string, + observedTx types.OutboundObservation, +) error { + sdkCtx := sdk.UnwrapSDKContext(ctx) + + // Step 1: Fetch UniversalTx + utx, found, err := k.GetUniversalTx(ctx, utxId) + if err != nil { + return err + } + if !found { + return errors.Wrap(err, "UniversalTx not found") + } + if utx.OutboundTx == nil { + return errors.Wrap(err, "No outbound tx found in the specified UniversalTx") + } + + // Step 2: Find outbound by id + var outbound types.OutboundTx + found = false + for _, ob := range utx.OutboundTx { + if ob.Id == outboundId { + outbound = *ob + found = true + break + } + } + if !found { + return errors.Wrap(err, "Outbound not found") + } + + // Prevent double-finalization + if outbound.OutboundStatus != types.Status_PENDING { + return fmt.Errorf("outbound with key %s is already finalized", outboundId) + } + + // Use temp context to prevent partial writes + tmpCtx, commit := sdkCtx.CacheContext() + + // Step 3: Vote on outbound ballot + isFinalized, _, err := k.VoteOnOutboundBallot( + tmpCtx, + universalValidator, + utxId, + outboundId, + observedTx, + ) + if err != nil { + return err + } + + commit() + + // Step 4: Exit if not finalized yet + if !isFinalized { + return nil + } + + // Step 5: Update outbound state to OBSERVED + outbound.OutboundStatus = types.Status_OBSERVED + outbound.ObservedTx = &observedTx + + // Persist the state inside UniversalTx + if err := k.UpdateOutbound(ctx, utxId, outbound); err != nil { + return err + } + + // Step 6: Finalize outbound (refund if failed) - Don't return error + _ = k.FinalizeOutbound(ctx, utxId, outbound) + + return nil +} diff --git a/x/uexecutor/keeper/outbound.go b/x/uexecutor/keeper/outbound.go new file mode 100644 index 00000000..f812652a --- /dev/null +++ b/x/uexecutor/keeper/outbound.go @@ -0,0 +1,98 @@ +package keeper + +import ( + "context" + "fmt" + "math/big" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/ethereum/go-ethereum/common" + "github.com/pushchain/push-chain-node/x/uexecutor/types" +) + +func (k Keeper) UpdateOutbound(ctx context.Context, utxId string, outbound types.OutboundTx) error { + return k.UpdateUniversalTx(ctx, utxId, func(utx *types.UniversalTx) error { + if utx.OutboundTx == nil { + return fmt.Errorf("outbound tx list is not initialized for utx %s", utxId) + } + + updated := false + for i, ob := range utx.OutboundTx { + if ob.Id == outbound.Id { + utx.OutboundTx[i] = &outbound + updated = true + break + } + } + + if !updated { + return fmt.Errorf( + "outbound with id %s not found in utx %s", + outbound.Id, + utxId, + ) + } + + return nil + }) +} + +func (k Keeper) FinalizeOutbound(ctx context.Context, utxId string, outbound types.OutboundTx) error { + // If not observed yet, do nothing + if outbound.OutboundStatus != types.Status_OBSERVED { + return nil + } + + obs := outbound.ObservedTx + if obs == nil || obs.Success { + return nil + } + + // Only refund for funds-related tx types + if outbound.TxType != types.TxType_FUNDS && + outbound.TxType != types.TxType_FUNDS_AND_PAYLOAD { + return nil + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + + // Decide refund recipient safely + recipient := outbound.Sender + if outbound.RevertInstructions != nil && + outbound.RevertInstructions.FundRecipient != "" { + recipient = outbound.RevertInstructions.FundRecipient + } + + // Mint tokens back + amount := new(big.Int) + amount, ok := amount.SetString(outbound.Amount, 10) + if !ok { + return fmt.Errorf("invalid amount: %s", outbound.Amount) + } + receipt, err := k.CallPRC20Deposit(sdkCtx, common.HexToAddress(outbound.Prc20AssetAddr), common.HexToAddress(recipient), amount) + + // Update outbound status + outbound.OutboundStatus = types.Status_REVERTED + + pcTx := types.PCTx{ + TxHash: "", // no hash if depositPRC20 failed + Sender: outbound.Sender, + GasUsed: 0, + BlockHeight: uint64(sdkCtx.BlockHeight()), + } + + if err != nil { + pcTx.Status = "FAILED" + pcTx.ErrorMsg = err.Error() + } else { + pcTx.TxHash = receipt.Hash + pcTx.GasUsed = receipt.GasUsed + pcTx.Status = "SUCCESS" + pcTx.ErrorMsg = "" + } + + outbound.PcRevertExecution = &pcTx + + // Store Reverted tx in Outbound + return k.UpdateOutbound(ctx, utxId, outbound) +} diff --git a/x/uexecutor/keeper/universal_tx.go b/x/uexecutor/keeper/universal_tx.go index 0d1ec929..d260c026 100644 --- a/x/uexecutor/keeper/universal_tx.go +++ b/x/uexecutor/keeper/universal_tx.go @@ -7,6 +7,8 @@ import ( // sdk "github.com/cosmos/cosmos-sdk/types" "cosmossdk.io/collections" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/pushchain/push-chain-node/utils" "github.com/pushchain/push-chain-node/x/uexecutor/types" ) @@ -87,3 +89,16 @@ func (k Keeper) GetUniversalTxStatus(ctx context.Context, key string) (types.Uni } return utx.UniversalStatus, true, nil } + +func (k Keeper) BuildPcUniversalTxKey(ctx context.Context, pc types.PCTx) (string, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + + evmChainID, err := utils.ExtractEvmChainID(sdkCtx.ChainID()) + if err != nil { + return "", err + } + + pcCaip := fmt.Sprintf("eip155:%s", evmChainID) + + return types.GetPcUniversalTxKey(pcCaip, pc), nil +} diff --git a/x/uexecutor/keeper/voting.go b/x/uexecutor/keeper/voting.go index 0fced64d..19674823 100644 --- a/x/uexecutor/keeper/voting.go +++ b/x/uexecutor/keeper/voting.go @@ -65,3 +65,53 @@ func (k Keeper) VoteOnInboundBallot( return isFinalized, isNew, nil } + +func (k Keeper) VoteOnOutboundBallot( + ctx context.Context, + universalValidator sdk.ValAddress, + utxId string, + outboundId string, + observedTx types.OutboundObservation, +) (isFinalized bool, + isNew bool, + err error) { + ballotKey, err := types.GetOutboundBallotKey(utxId, outboundId, observedTx) + if err != nil { + return false, false, err + } + + universalValidatorSet, err := k.uvalidatorKeeper.GetEligibleVoters(ctx) + if err != nil { + return false, false, err + } + + // number of validators + totalValidators := len(universalValidatorSet) + + // votesNeeded = ceil(2/3 * totalValidators) + // >2/3 quorum similar to tendermint + votesNeeded := (types.VotesThresholdNumerator*totalValidators)/types.VotesThresholdDenominator + 1 + + // Convert []sdk.ValAddress → []string + universalValidatorSetStrs := make([]string, len(universalValidatorSet)) + for i, v := range universalValidatorSet { + universalValidatorSetStrs[i] = v.IdentifyInfo.CoreValidatorAddress + } + + // Step 2: Call VoteOnBallot for this inbound synthetic + _, isFinalized, isNew, err = k.uvalidatorKeeper.VoteOnBallot( + ctx, + ballotKey, + uvalidatortypes.BallotObservationType_BALLOT_OBSERVATION_TYPE_OUTBOUND_TX, + universalValidator.String(), + uvalidatortypes.VoteResult_VOTE_RESULT_SUCCESS, + universalValidatorSetStrs, + int64(votesNeeded), + int64(types.DefaultExpiryAfterBlocks), + ) + if err != nil { + return false, false, err + } + + return isFinalized, isNew, nil +} diff --git a/x/uexecutor/mocks/mock_uregistrykeeper.go b/x/uexecutor/mocks/mock_uregistrykeeper.go index 8d66223c..fdb88db8 100644 --- a/x/uexecutor/mocks/mock_uregistrykeeper.go +++ b/x/uexecutor/mocks/mock_uregistrykeeper.go @@ -65,6 +65,21 @@ func (mr *MockUregistryKeeperMockRecorder) GetTokenConfig(ctx, chain, address in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTokenConfig", reflect.TypeOf((*MockUregistryKeeper)(nil).GetTokenConfig), ctx, chain, address) } +// GetTokenConfigByPRC20 mocks base method. +func (m *MockUregistryKeeper) GetTokenConfigByPRC20(ctx context.Context, chain, prc20Addr string) (uregistrytypes.TokenConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTokenConfigByPRC20", ctx, chain, prc20Addr) + ret0, _ := ret[0].(uregistrytypes.TokenConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTokenConfigByPRC20 indicates an expected call of GetTokenConfigByPRC20. +func (mr *MockUregistryKeeperMockRecorder) GetTokenConfigByPRC20(ctx, chain, prc20Addr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTokenConfigByPRC20", reflect.TypeOf((*MockUregistryKeeper)(nil).GetTokenConfigByPRC20), ctx, chain, prc20Addr) +} + // IsChainInboundEnabled mocks base method. func (m *MockUregistryKeeper) IsChainInboundEnabled(ctx context.Context, chain string) (bool, error) { m.ctrl.T.Helper() diff --git a/x/uexecutor/types/constants.go b/x/uexecutor/types/constants.go index 989fec97..35d94535 100644 --- a/x/uexecutor/types/constants.go +++ b/x/uexecutor/types/constants.go @@ -1,6 +1,9 @@ package types -import "github.com/ethereum/go-ethereum/common" +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) const ( FACTORY_PROXY_ADDRESS_HEX = "0x00000000000000000000000000000000000000eA" @@ -38,3 +41,7 @@ const ( // Default number of blocks after which ballot expires DefaultExpiryAfterBlocks = 10000 ) + +var UniversalTxOutboundEventSig = crypto.Keccak256Hash([]byte( + "UniversalTxOutbound(bytes32,address,string,address,bytes,uint256,address,uint256,uint256,bytes,uint256,address,uint8)", +)).Hex() diff --git a/x/uexecutor/types/events.go b/x/uexecutor/types/events.go new file mode 100644 index 00000000..e7caffd3 --- /dev/null +++ b/x/uexecutor/types/events.go @@ -0,0 +1,63 @@ +package types + +import ( + "encoding/json" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const ( + EventTypeOutboundCreated = "outbound_created" +) + +// OutboundCreatedEvent represents an emitted outbound transaction. +type OutboundCreatedEvent struct { + UniversalTxId string `json:"utx_id"` + OutboundId string `json:"outbound_id"` + TxID string `json:"tx_id"` // txId: abi.encode(utx_id, outbound_id) + DestinationChain string `json:"destination_chain"` + Recipient string `json:"recipient"` + Amount string `json:"amount"` + AssetAddr string `json:"asset_addr"` + Sender string `json:"sender"` + Payload string `json:"payload"` + GasLimit string `json:"gas_limit"` + TxType string `json:"tx_type"` + PcTxHash string `json:"pc_tx_hash"` + LogIndex string `json:"log_index"` + RevertMsg string `json:"revert_msg"` +} + +// NewOutboundCreatedEvent creates a Cosmos SDK event for outbound creation. +func NewOutboundCreatedEvent(e OutboundCreatedEvent) (sdk.Event, error) { + if e.TxID == "" { + return sdk.Event{}, fmt.Errorf("tx_id must not be empty") + } + + bz, err := json.Marshal(e) + if err != nil { + return sdk.Event{}, fmt.Errorf("failed to marshal outbound event: %w", err) + } + + event := sdk.NewEvent( + EventTypeOutboundCreated, + sdk.NewAttribute("utx_id", e.UniversalTxId), + sdk.NewAttribute("outbound_id", e.OutboundId), + sdk.NewAttribute("tx_id", e.TxID), + sdk.NewAttribute("destination_chain", e.DestinationChain), + sdk.NewAttribute("recipient", e.Recipient), + sdk.NewAttribute("amount", e.Amount), + sdk.NewAttribute("asset_addr", e.AssetAddr), + sdk.NewAttribute("sender", e.Sender), + sdk.NewAttribute("payload", e.Payload), + sdk.NewAttribute("gas_limit", e.GasLimit), + sdk.NewAttribute("tx_type", e.TxType), + sdk.NewAttribute("pc_tx_hash", e.PcTxHash), + sdk.NewAttribute("log_index", e.LogIndex), + sdk.NewAttribute("revert_msg", e.RevertMsg), + sdk.NewAttribute("data", string(bz)), // full JSON payload for indexers + ) + + return event, nil +} diff --git a/x/uexecutor/types/expected_keepers.go b/x/uexecutor/types/expected_keepers.go index 1346328f..49bef48e 100644 --- a/x/uexecutor/types/expected_keepers.go +++ b/x/uexecutor/types/expected_keepers.go @@ -22,6 +22,11 @@ type UregistryKeeper interface { IsChainOutboundEnabled(ctx context.Context, chain string) (bool, error) IsChainInboundEnabled(ctx context.Context, chain string) (bool, error) GetTokenConfig(ctx context.Context, chain, address string) (uregistrytypes.TokenConfig, error) + GetTokenConfigByPRC20( + ctx context.Context, + chain string, + prc20Addr string, + ) (uregistrytypes.TokenConfig, error) } // EVMKeeper defines the expected interface for the EVM module. diff --git a/x/uexecutor/types/gateway_pc_event_decode.go b/x/uexecutor/types/gateway_pc_event_decode.go new file mode 100644 index 00000000..8a2da39b --- /dev/null +++ b/x/uexecutor/types/gateway_pc_event_decode.go @@ -0,0 +1,94 @@ +package types + +import ( + "encoding/hex" + "fmt" + "math/big" + + evmtypes "github.com/cosmos/evm/x/vm/types" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" +) + +type UniversalTxOutboundEvent struct { + TxID string // 0x... bytes32 + Sender string // 0x... address + ChainId string // destination chain (CAIP-2 string) + Token string // 0x... ERC20 or zero address for native + Target string // 0x-hex encoded bytes (non-EVM recipient) + Amount *big.Int // amount of Token to bridge + GasToken string // 0x... token used to pay gas fee + GasFee *big.Int // amount of GasToken paid to relayer + GasLimit *big.Int // gas limit for destination execution + Payload string // 0x-hex calldata + ProtocolFee *big.Int // fee kept by protocol + RevertRecipient string // where funds go on full revert + TxType TxType // ← single source of truth from proto +} + +func DecodeUniversalTxOutboundFromLog(log *evmtypes.Log) (*UniversalTxOutboundEvent, error) { + if len(log.Topics) == 0 || log.Topics[0] != UniversalTxOutboundEventSig { + return nil, fmt.Errorf("not a UniversalTxOutbound event") + } + if len(log.Topics) < 4 { + return nil, fmt.Errorf("insufficient topics") + } + + event := &UniversalTxOutboundEvent{ + TxID: log.Topics[1], + Sender: common.HexToAddress(log.Topics[2]).Hex(), + Token: common.HexToAddress(log.Topics[3]).Hex(), + } + + // ABI types + stringType, _ := abi.NewType("string", "", nil) + bytesType, _ := abi.NewType("bytes", "", nil) + uint256Type, _ := abi.NewType("uint256", "", nil) + addressType, _ := abi.NewType("address", "", nil) + uint8Type, _ := abi.NewType("uint8", "", nil) + + arguments := abi.Arguments{ + {Type: stringType}, // chainId + {Type: bytesType}, // target + {Type: uint256Type}, // amount + {Type: addressType}, // gasToken + {Type: uint256Type}, // gasFee + {Type: uint256Type}, // gasLimit + {Type: bytesType}, // payload + {Type: uint256Type}, // protocolFee + {Type: addressType}, // revertRecipient + {Type: uint8Type}, // txType + } + + values, err := arguments.Unpack(log.Data) + if err != nil { + return nil, fmt.Errorf("failed to unpack UniversalTxOutbound: %w", err) + } + + if len(values) != 10 { + return nil, fmt.Errorf("unexpected number of unpacked values: %d", len(values)) + } + + i := 0 + event.ChainId = values[i].(string) + i++ + event.Target = "0x" + hex.EncodeToString(values[i].([]byte)) + i++ + event.Amount = values[i].(*big.Int) + i++ + event.GasToken = values[i].(common.Address).Hex() + i++ + event.GasFee = values[i].(*big.Int) + i++ + event.GasLimit = values[i].(*big.Int) + i++ + event.Payload = "0x" + hex.EncodeToString(values[i].([]byte)) + i++ + event.ProtocolFee = values[i].(*big.Int) + i++ + event.RevertRecipient = values[i].(common.Address).Hex() + i++ + event.TxType = SolidityTxTypeToProto(values[i].(uint8)) + + return event, nil +} diff --git a/x/uexecutor/types/inbound.go b/x/uexecutor/types/inbound.go index b7aab960..2a641b1e 100644 --- a/x/uexecutor/types/inbound.go +++ b/x/uexecutor/types/inbound.go @@ -60,13 +60,13 @@ func (p Inbound) ValidateBasic() error { } // Validate tx_type enum - if _, ok := InboundTxType_name[int32(p.TxType)]; !ok || p.TxType == InboundTxType_UNSPECIFIED_TX { + if _, ok := TxType_name[int32(p.TxType)]; !ok || p.TxType == TxType_UNSPECIFIED_TX { return errors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid tx_type: %v", p.TxType) } // Validate payload only if tx_type requires it switch p.TxType { - case InboundTxType_FUNDS_AND_PAYLOAD, InboundTxType_GAS_AND_PAYLOAD: + case TxType_FUNDS_AND_PAYLOAD, TxType_GAS_AND_PAYLOAD: if p.UniversalPayload == nil { return errors.Wrap(sdkerrors.ErrInvalidRequest, "payload is required for payload tx types") } diff --git a/x/uexecutor/types/inbound_test.go b/x/uexecutor/types/inbound_test.go index e08a2ca4..0cf55b98 100644 --- a/x/uexecutor/types/inbound_test.go +++ b/x/uexecutor/types/inbound_test.go @@ -16,7 +16,7 @@ func TestInbound_ValidateBasic(t *testing.T) { Amount: "1000", AssetAddr: "0x000000000000000000000000000000000000cafe", LogIndex: "1", - TxType: types.InboundTxType_FUNDS, + TxType: types.TxType_FUNDS, } tests := []struct { @@ -134,7 +134,7 @@ func TestInbound_ValidateBasic(t *testing.T) { name: "unspecified tx_type", inbound: func() types.Inbound { ib := validInbound - ib.TxType = types.InboundTxType_UNSPECIFIED_TX + ib.TxType = types.TxType_UNSPECIFIED_TX return ib }(), expectError: true, diff --git a/x/uexecutor/types/keys.go b/x/uexecutor/types/keys.go index 7c4ce3e4..d2c7c880 100755 --- a/x/uexecutor/types/keys.go +++ b/x/uexecutor/types/keys.go @@ -53,3 +53,33 @@ func GetInboundBallotKey(inbound Inbound) (string, error) { } return hex.EncodeToString(bz), nil } + +func GetPcUniversalTxKey(pcCaip string, pc PCTx) string { + data := fmt.Sprintf("%s:%s", pcCaip, pc.TxHash) + hash := sha256.Sum256([]byte(data)) + return hex.EncodeToString(hash[:]) +} + +func GetOutboundBallotKey( + utxId string, + outboundIndex string, + observedTx OutboundObservation, +) (string, error) { + + bz, err := observedTx.Marshal() + if err != nil { + return "", err + } + + data := append([]byte(utxId+":"+outboundIndex+":"), bz...) + hash := sha256.Sum256(data) + + return hex.EncodeToString(hash[:]), nil +} + +// Outbound Id for a inbound revert tx +func GetOutboundRevertId(inboundTxHash string) string { + data := fmt.Sprintf("%s:REVERT", inboundTxHash) + hash := sha256.Sum256([]byte(data)) + return hex.EncodeToString(hash[:]) +} diff --git a/x/uexecutor/types/msg_vote_outbound.go b/x/uexecutor/types/msg_vote_outbound.go new file mode 100644 index 00000000..7a37c966 --- /dev/null +++ b/x/uexecutor/types/msg_vote_outbound.go @@ -0,0 +1,100 @@ +package types + +import ( + "strings" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +var ( + _ sdk.Msg = &MsgVoteOutbound{} +) + +// NewMsgVoteOutbound creates new instance of MsgVoteOutbound +func NewMsgVoteOutbound( + sender sdk.Address, + txID string, + observedTx OutboundObservation, +) *MsgVoteOutbound { + return &MsgVoteOutbound{ + Signer: sender.String(), + TxId: txID, + ObservedTx: &observedTx, + } +} + +// Route returns the name of the module +func (msg MsgVoteOutbound) Route() string { return ModuleName } + +// Type returns the action +func (msg MsgVoteOutbound) Type() string { return "msg_vote_outbound" } + +// GetSignBytes implements the LegacyMsg interface. +func (msg MsgVoteOutbound) GetSignBytes() []byte { + return sdk.MustSortJSON(AminoCdc.MustMarshalJSON(&msg)) +} + +// GetSigners returns the expected signers for a MsgVoteOutbound message. +func (msg *MsgVoteOutbound) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Signer) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgVoteOutbound) ValidateBasic() error { + // validate signer + if _, err := sdk.AccAddressFromBech32(msg.Signer); err != nil { + return errors.Wrap(err, "invalid signer address") + } + + // tx_id must be non-empty + if strings.TrimSpace(msg.TxId) == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "tx_id cannot be empty") + } + + // Decode tx_id into (utxID, outboundID) + utxID, outboundID, err := DecodeOutboundTxIDHex(msg.TxId) + if err != nil { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "invalid tx_id: decode failed") + } + + if strings.TrimSpace(utxID) == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "decoded utx_id cannot be empty") + } + if strings.TrimSpace(outboundID) == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "decoded outbound_id cannot be empty") + } + + // observed_tx must NOT be nil + if msg.ObservedTx == nil { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "observed_tx cannot be nil") + } + + // Validate observed_tx content + obs := msg.ObservedTx + + if obs.Success { + // Success requires tx_hash AND block_height > 0 + if strings.TrimSpace(obs.TxHash) == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, + "observed_tx.tx_hash required when success=true") + } + if obs.BlockHeight == 0 { + return errors.Wrap(sdkerrors.ErrInvalidRequest, + "observed_tx.block_height must be > 0 when success=true") + } + + } else { + // Failure case: + // tx_hash MAY be empty. + // BUT if tx_hash is present, block_height must be > 0. + if strings.TrimSpace(obs.TxHash) != "" && obs.BlockHeight == 0 { + return errors.Wrap(sdkerrors.ErrInvalidRequest, + "observed_tx.block_height must be > 0 when tx_hash is provided") + } + } + + return nil +} diff --git a/x/uexecutor/types/outbound_tx.go b/x/uexecutor/types/outbound_tx.go index fb066192..a9be1d24 100644 --- a/x/uexecutor/types/outbound_tx.go +++ b/x/uexecutor/types/outbound_tx.go @@ -31,30 +31,128 @@ func (p OutboundTx) ValidateBasic() error { return errors.Wrap(sdkerrors.ErrInvalidRequest, "destination_chain must be in CAIP-2 format :") } - // Validate tx_hash (non-empty) - if strings.TrimSpace(p.TxHash) == "" { - return errors.Wrap(sdkerrors.ErrInvalidRequest, "tx_hash cannot be empty") - } - - // Validate recipient (non-empty, valid hex address) + // recipient must not be empty if strings.TrimSpace(p.Recipient) == "" { return errors.Wrap(sdkerrors.ErrInvalidAddress, "recipient cannot be empty") } - if !utils.IsValidAddress(p.Recipient, utils.HEX) { - return errors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid recipient address: %s", p.Recipient) + + // sender + if strings.TrimSpace(p.Sender) == "" { + return errors.Wrap(sdkerrors.ErrInvalidAddress, "sender cannot be empty") + } + if !utils.IsValidAddress(p.Sender, utils.HEX) { + return errors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid sender address: %s", p.Sender) } - // Validate amount as uint256 (non-empty, >0) - if strings.TrimSpace(p.Amount) == "" { - return errors.Wrap(sdkerrors.ErrInvalidRequest, "amount cannot be empty") + // tx type support + switch p.TxType { + case TxType_FUNDS, TxType_FUNDS_AND_PAYLOAD, TxType_PAYLOAD: + // supported + default: + return errors.Wrapf(sdkerrors.ErrInvalidRequest, "unsupported tx_type: %s", p.TxType.String()) } - if bi, ok := new(big.Int).SetString(p.Amount, 10); !ok || bi.Sign() <= 0 { - return errors.Wrap(sdkerrors.ErrInvalidRequest, "amount must be a valid positive uint256") + + // amount validation (only for funds-related txs) + if p.TxType == TxType_FUNDS || p.TxType == TxType_FUNDS_AND_PAYLOAD { + if strings.TrimSpace(p.Amount) == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "amount cannot be empty for funds tx") + } + if bi, ok := new(big.Int).SetString(p.Amount, 10); !ok || bi.Sign() <= 0 { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "amount must be a valid positive uint256") + } + } + + // payload validation (required for payload txs) + if p.TxType == TxType_PAYLOAD || p.TxType == TxType_FUNDS_AND_PAYLOAD { + if strings.TrimSpace(p.Payload) == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "payload cannot be empty for payload tx") + } + } + + // external_asset_addr required when amount is involved + if (p.TxType == TxType_FUNDS || p.TxType == TxType_FUNDS_AND_PAYLOAD) && strings.TrimSpace(p.ExternalAssetAddr) == "" { + return errors.Wrap(sdkerrors.ErrInvalidAddress, "external_asset_addr cannot be empty for funds tx") + } + + if strings.TrimSpace(p.Prc20AssetAddr) != "" { + if !utils.IsValidAddress(p.Prc20AssetAddr, utils.HEX) { + return errors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid prc20 address: %s", p.Sender) + } + } + + // gas_limit (uint) + if strings.TrimSpace(p.GasLimit) != "" { + if _, ok := new(big.Int).SetString(p.GasLimit, 10); !ok { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "gas_limit must be a valid uint") + } } - // Validate asset_addr (non-empty) - if strings.TrimSpace(p.AssetAddr) == "" { - return errors.Wrap(sdkerrors.ErrInvalidAddress, "asset_addr cannot be empty") + // pc_tx validation + if strings.TrimSpace(p.PcTx.TxHash) == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "pc_tx.tx_hash cannot be empty") + } + if strings.TrimSpace(p.PcTx.LogIndex) == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "pc_tx.log_index cannot be empty") + } + + // observed tx validation (if present) + if p.ObservedTx != nil { + if strings.TrimSpace(p.ObservedTx.TxHash) != "" { + if p.ObservedTx.BlockHeight == 0 { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "observed_tx.block_height must be > 0") + } + } + } + + // index + if strings.TrimSpace(p.Id) == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "id cannot be empty") + } + + // status validation + // outbound_status validation + switch p.OutboundStatus { + case Status_UNSPECIFIED: + return errors.Wrap(sdkerrors.ErrInvalidRequest, "outbound_status cannot be UNSPECIFIED") + + case Status_PENDING: + // PENDING must NOT have observed tx data + if p.ObservedTx != nil && strings.TrimSpace(p.ObservedTx.TxHash) != "" { + return errors.Wrap( + sdkerrors.ErrInvalidRequest, + "observed_tx must be empty when outbound_status is PENDING", + ) + } + + case Status_OBSERVED: + // OBSERVED must have observed tx + if p.ObservedTx == nil { + return errors.Wrap( + sdkerrors.ErrInvalidRequest, + "observed_tx is required when outbound_status is OBSERVED", + ) + } + + if strings.TrimSpace(p.ObservedTx.TxHash) == "" { + return errors.Wrap( + sdkerrors.ErrInvalidRequest, + "observed_tx.tx_hash is required when outbound_status is OBSERVED", + ) + } + + if p.ObservedTx.BlockHeight == 0 { + return errors.Wrap( + sdkerrors.ErrInvalidRequest, + "observed_tx.block_height must be > 0 when outbound_status is OBSERVED", + ) + } + + default: + return errors.Wrapf( + sdkerrors.ErrInvalidRequest, + "invalid outbound_status: %d", + p.OutboundStatus, + ) } return nil diff --git a/x/uexecutor/types/outbound_tx_test.go b/x/uexecutor/types/outbound_tx_test.go index 09a0b85f..1f3d7a10 100644 --- a/x/uexecutor/types/outbound_tx_test.go +++ b/x/uexecutor/types/outbound_tx_test.go @@ -7,15 +7,27 @@ import ( "github.com/stretchr/testify/require" ) -func TestOutboundTx_ValidateBasic(t *testing.T) { - validOutbound := types.OutboundTx{ - DestinationChain: "eip155:11155111", - TxHash: "0x123abc", - Recipient: "0x000000000000000000000000000000000000beef", - Amount: "1000", - AssetAddr: "0x000000000000000000000000000000000000cafe", +func baseValidOutbound() types.OutboundTx { + return types.OutboundTx{ + DestinationChain: "eip155:11155111", + Recipient: "0x000000000000000000000000000000000000beef", + Sender: "0x000000000000000000000000000000000000dead", + Amount: "1000", + ExternalAssetAddr: "0x000000000000000000000000000000000000cafe", + Prc20AssetAddr: "0x000000000000000000000000000000000000bafe", + Payload: "0xabcdef", + GasLimit: "21000", + TxType: types.TxType_FUNDS_AND_PAYLOAD, + PcTx: &types.OriginatingPcTx{ + TxHash: "0xpc123", + LogIndex: "1", + }, + Id: "0", + OutboundStatus: types.Status_PENDING, } +} +func TestOutboundTx_ValidateBasic(t *testing.T) { tests := []struct { name string outbound types.OutboundTx @@ -23,64 +35,71 @@ func TestOutboundTx_ValidateBasic(t *testing.T) { errContains string }{ { - name: "valid outbound", - outbound: validOutbound, + name: "valid FUNDS tx", + outbound: func() types.OutboundTx { + ob := baseValidOutbound() + ob.TxType = types.TxType_FUNDS + ob.Payload = "" + return ob + }(), expectError: false, }, { - name: "empty destination chain", + name: "valid PAYLOAD tx", outbound: func() types.OutboundTx { - ob := validOutbound - ob.DestinationChain = "" + ob := baseValidOutbound() + ob.TxType = types.TxType_PAYLOAD + ob.Amount = "" + ob.ExternalAssetAddr = "" return ob }(), - expectError: true, - errContains: "destination_chain cannot be empty", + expectError: false, }, { - name: "invalid destination chain format", + name: "empty destination_chain", outbound: func() types.OutboundTx { - ob := validOutbound - ob.DestinationChain = "eip155" // missing ":" + ob := baseValidOutbound() + ob.DestinationChain = "" return ob }(), expectError: true, - errContains: "CAIP-2 format", + errContains: "destination_chain cannot be empty", }, { - name: "empty tx_hash", + name: "invalid CAIP-2 chain", outbound: func() types.OutboundTx { - ob := validOutbound - ob.TxHash = "" + ob := baseValidOutbound() + ob.DestinationChain = "eip155" return ob }(), expectError: true, - errContains: "tx_hash cannot be empty", + errContains: "CAIP-2", }, { - name: "empty recipient", + name: "empty sender", outbound: func() types.OutboundTx { - ob := validOutbound - ob.Recipient = "" + ob := baseValidOutbound() + ob.Sender = "" return ob }(), expectError: true, - errContains: "recipient cannot be empty", + errContains: "sender cannot be empty", }, { - name: "invalid recipient address", + name: "unsupported tx type", outbound: func() types.OutboundTx { - ob := validOutbound - ob.Recipient = "0xzzzzzzzz" + ob := baseValidOutbound() + ob.TxType = types.TxType_GAS return ob }(), expectError: true, - errContains: "invalid recipient address", + errContains: "unsupported tx_type", }, { - name: "empty amount", + name: "FUNDS tx missing amount", outbound: func() types.OutboundTx { - ob := validOutbound + ob := baseValidOutbound() + ob.TxType = types.TxType_FUNDS ob.Amount = "" return ob }(), @@ -88,25 +107,57 @@ func TestOutboundTx_ValidateBasic(t *testing.T) { errContains: "amount cannot be empty", }, { - name: "negative amount", + name: "PAYLOAD tx missing payload", outbound: func() types.OutboundTx { - ob := validOutbound - ob.Amount = "-100" + ob := baseValidOutbound() + ob.TxType = types.TxType_PAYLOAD + ob.Payload = "" return ob }(), expectError: true, - errContains: "amount must be a valid positive uint256", + errContains: "payload cannot be empty", }, { - name: "empty asset_addr", + name: "FUNDS tx missing asset_addr", outbound: func() types.OutboundTx { - ob := validOutbound - ob.AssetAddr = "" + ob := baseValidOutbound() + ob.TxType = types.TxType_FUNDS + ob.ExternalAssetAddr = "" return ob }(), expectError: true, errContains: "asset_addr cannot be empty", }, + { + name: "empty pc_tx hash", + outbound: func() types.OutboundTx { + ob := baseValidOutbound() + ob.PcTx.TxHash = "" + return ob + }(), + expectError: true, + errContains: "pc_tx.tx_hash cannot be empty", + }, + { + name: "empty pc_tx log_index", + outbound: func() types.OutboundTx { + ob := baseValidOutbound() + ob.PcTx.LogIndex = "" + return ob + }(), + expectError: true, + errContains: "pc_tx.log_index cannot be empty", + }, + { + name: "empty index", + outbound: func() types.OutboundTx { + ob := baseValidOutbound() + ob.Id = "" + return ob + }(), + expectError: true, + errContains: "id cannot be empty", + }, } for _, tc := range tests { diff --git a/x/uexecutor/types/tx.pb.go b/x/uexecutor/types/tx.pb.go index 3c0b969c..0e1ead47 100644 --- a/x/uexecutor/types/tx.pb.go +++ b/x/uexecutor/types/tx.pb.go @@ -653,6 +653,105 @@ func (m *MsgVoteInboundResponse) XXX_DiscardUnknown() { var xxx_messageInfo_MsgVoteInboundResponse proto.InternalMessageInfo +// MsgVoteOutbound allows a universal validator to vote on an outbound tx observation. +type MsgVoteOutbound struct { + // signer is the Cosmos address initiating the tx (used for tx signing) + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + TxId string `protobuf:"bytes,2,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` + ObservedTx *OutboundObservation `protobuf:"bytes,3,opt,name=observed_tx,json=observedTx,proto3" json:"observed_tx,omitempty"` +} + +func (m *MsgVoteOutbound) Reset() { *m = MsgVoteOutbound{} } +func (m *MsgVoteOutbound) String() string { return proto.CompactTextString(m) } +func (*MsgVoteOutbound) ProtoMessage() {} +func (*MsgVoteOutbound) Descriptor() ([]byte, []int) { + return fileDescriptor_88d6216044506365, []int{12} +} +func (m *MsgVoteOutbound) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgVoteOutbound) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgVoteOutbound.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgVoteOutbound) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgVoteOutbound.Merge(m, src) +} +func (m *MsgVoteOutbound) XXX_Size() int { + return m.Size() +} +func (m *MsgVoteOutbound) XXX_DiscardUnknown() { + xxx_messageInfo_MsgVoteOutbound.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgVoteOutbound proto.InternalMessageInfo + +func (m *MsgVoteOutbound) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +func (m *MsgVoteOutbound) GetTxId() string { + if m != nil { + return m.TxId + } + return "" +} + +func (m *MsgVoteOutbound) GetObservedTx() *OutboundObservation { + if m != nil { + return m.ObservedTx + } + return nil +} + +// MsgVoteInboundResponse defines the response for MsgExecutePayload. +type MsgVoteOutboundResponse struct { +} + +func (m *MsgVoteOutboundResponse) Reset() { *m = MsgVoteOutboundResponse{} } +func (m *MsgVoteOutboundResponse) String() string { return proto.CompactTextString(m) } +func (*MsgVoteOutboundResponse) ProtoMessage() {} +func (*MsgVoteOutboundResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_88d6216044506365, []int{13} +} +func (m *MsgVoteOutboundResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgVoteOutboundResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgVoteOutboundResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgVoteOutboundResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgVoteOutboundResponse.Merge(m, src) +} +func (m *MsgVoteOutboundResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgVoteOutboundResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgVoteOutboundResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgVoteOutboundResponse proto.InternalMessageInfo + // MsgVoteGasPrice is broadcasted by Universal Validators to submit their observed gas prices type MsgVoteGasPrice struct { Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` @@ -665,7 +764,7 @@ func (m *MsgVoteGasPrice) Reset() { *m = MsgVoteGasPrice{} } func (m *MsgVoteGasPrice) String() string { return proto.CompactTextString(m) } func (*MsgVoteGasPrice) ProtoMessage() {} func (*MsgVoteGasPrice) Descriptor() ([]byte, []int) { - return fileDescriptor_88d6216044506365, []int{12} + return fileDescriptor_88d6216044506365, []int{14} } func (m *MsgVoteGasPrice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -730,7 +829,7 @@ func (m *MsgVoteGasPriceResponse) Reset() { *m = MsgVoteGasPriceResponse func (m *MsgVoteGasPriceResponse) String() string { return proto.CompactTextString(m) } func (*MsgVoteGasPriceResponse) ProtoMessage() {} func (*MsgVoteGasPriceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_88d6216044506365, []int{13} + return fileDescriptor_88d6216044506365, []int{15} } func (m *MsgVoteGasPriceResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -772,6 +871,8 @@ func init() { proto.RegisterType((*MsgMigrateUEAResponse)(nil), "uexecutor.v1.MsgMigrateUEAResponse") proto.RegisterType((*MsgVoteInbound)(nil), "uexecutor.v1.MsgVoteInbound") proto.RegisterType((*MsgVoteInboundResponse)(nil), "uexecutor.v1.MsgVoteInboundResponse") + proto.RegisterType((*MsgVoteOutbound)(nil), "uexecutor.v1.MsgVoteOutbound") + proto.RegisterType((*MsgVoteOutboundResponse)(nil), "uexecutor.v1.MsgVoteOutboundResponse") proto.RegisterType((*MsgVoteGasPrice)(nil), "uexecutor.v1.MsgVoteGasPrice") proto.RegisterType((*MsgVoteGasPriceResponse)(nil), "uexecutor.v1.MsgVoteGasPriceResponse") } @@ -779,63 +880,67 @@ func init() { func init() { proto.RegisterFile("uexecutor/v1/tx.proto", fileDescriptor_88d6216044506365) } var fileDescriptor_88d6216044506365 = []byte{ - // 884 bytes of a gzipped FileDescriptorProto + // 950 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xbf, 0x6f, 0xdb, 0x46, - 0x14, 0x36, 0x6d, 0x45, 0x81, 0x9e, 0xd5, 0xc4, 0x62, 0xe4, 0x48, 0xa2, 0x13, 0xc5, 0x61, 0xda, - 0xc6, 0x55, 0x6a, 0xb1, 0x51, 0x81, 0x0c, 0xda, 0xa4, 0xc4, 0x68, 0x0d, 0x43, 0x81, 0xca, 0x46, - 0x1d, 0xb2, 0x08, 0x27, 0xf2, 0x4a, 0x11, 0x15, 0x79, 0x04, 0xef, 0x28, 0x48, 0x5b, 0xd1, 0xb1, - 0x53, 0xa7, 0xfe, 0x0f, 0x45, 0x17, 0x0f, 0xfd, 0x03, 0x3a, 0x7a, 0x6b, 0xd0, 0xa2, 0x40, 0xa7, - 0xa2, 0xb0, 0x07, 0xff, 0x1b, 0x05, 0x8f, 0x3f, 0x44, 0x52, 0xb2, 0x02, 0x78, 0xf2, 0x22, 0xdc, - 0x7d, 0xdf, 0x7b, 0x4f, 0xef, 0xfb, 0xee, 0x17, 0x61, 0xd7, 0xc3, 0x33, 0xac, 0x79, 0x8c, 0xb8, - 0xca, 0xf4, 0xb9, 0xc2, 0x66, 0x4d, 0xc7, 0x25, 0x8c, 0x88, 0xc5, 0x18, 0x6e, 0x4e, 0x9f, 0x4b, - 0x25, 0x64, 0x99, 0x36, 0x51, 0xf8, 0x6f, 0x10, 0x20, 0x55, 0x34, 0x42, 0x2d, 0x42, 0x15, 0x8b, - 0x1a, 0x7e, 0xa2, 0x45, 0x8d, 0x90, 0xa8, 0xa6, 0x0b, 0xce, 0x1d, 0x4c, 0x43, 0xa6, 0x6c, 0x10, - 0x83, 0xf0, 0xa1, 0xe2, 0x8f, 0x42, 0xb4, 0x16, 0x14, 0x1a, 0x06, 0x44, 0x30, 0x09, 0x28, 0xf9, - 0x57, 0x01, 0xee, 0xf6, 0xa8, 0x31, 0x70, 0x74, 0xc4, 0x70, 0x1f, 0xb9, 0xc8, 0xa2, 0xe2, 0x0b, - 0x28, 0x20, 0x8f, 0x8d, 0x89, 0x6b, 0xb2, 0x79, 0x55, 0xd8, 0x17, 0x0e, 0x0a, 0xdd, 0xea, 0x9f, - 0xbf, 0x1d, 0x96, 0xc3, 0xc4, 0x8e, 0xae, 0xbb, 0x98, 0xd2, 0xaf, 0x99, 0x6b, 0xda, 0x86, 0xba, - 0x08, 0x15, 0x5b, 0x90, 0x77, 0x78, 0x85, 0xea, 0xe6, 0xbe, 0x70, 0xb0, 0xdd, 0x2a, 0x37, 0x93, - 0x0a, 0x9b, 0x41, 0xf5, 0x6e, 0xee, 0xec, 0xdf, 0x47, 0x1b, 0x6a, 0x18, 0xd9, 0xfe, 0xf4, 0x87, - 0xcb, 0xd3, 0xc6, 0xa2, 0xc6, 0x8f, 0x97, 0xa7, 0x8d, 0xda, 0x42, 0x5d, 0xa6, 0x33, 0xb9, 0x06, - 0x95, 0x0c, 0xa4, 0x62, 0xea, 0x10, 0x9b, 0x62, 0xf9, 0x6f, 0x01, 0x8a, 0x3d, 0x6a, 0xbc, 0xc2, - 0xce, 0x84, 0xcc, 0x07, 0x47, 0x1d, 0xf1, 0x33, 0xc8, 0x53, 0xd3, 0xb0, 0xb1, 0xfb, 0x5e, 0x09, - 0x61, 0x9c, 0xa8, 0x42, 0xd9, 0xb3, 0xcd, 0x29, 0x76, 0x29, 0x9a, 0x0c, 0x91, 0xa6, 0x11, 0xcf, - 0x66, 0x43, 0x53, 0x0f, 0xd5, 0xec, 0xa7, 0xd5, 0x0c, 0xa2, 0xc8, 0x4e, 0x10, 0x78, 0xac, 0xab, - 0xa2, 0xb7, 0x84, 0x89, 0x15, 0xb8, 0xcd, 0x66, 0xc3, 0x31, 0xa2, 0xe3, 0xea, 0x96, 0xdf, 0x86, - 0x9a, 0x67, 0xb3, 0x2f, 0x11, 0x1d, 0xb7, 0x3f, 0xf6, 0x85, 0x87, 0xff, 0xec, 0xab, 0xbe, 0x9f, - 0x52, 0x1d, 0xcb, 0x90, 0x0f, 0xa0, 0x9c, 0x9c, 0x47, 0x7a, 0xc5, 0x1d, 0xd8, 0x1a, 0x1c, 0x75, - 0xb8, 0xb6, 0xa2, 0xea, 0x0f, 0xe5, 0x3f, 0x04, 0x28, 0xf4, 0xa8, 0xd1, 0x33, 0x6d, 0xd6, 0x7f, - 0x79, 0xd3, 0xe5, 0x3f, 0xc9, 0xc8, 0xbf, 0x97, 0x92, 0x1f, 0x68, 0x90, 0xef, 0x41, 0x29, 0x9e, - 0xc4, 0x0b, 0xfd, 0xfb, 0x26, 0x47, 0x8f, 0x78, 0x38, 0xee, 0xa3, 0xf9, 0x84, 0x20, 0xfd, 0x86, - 0xc8, 0x3d, 0x81, 0xd2, 0xa2, 0xa6, 0x13, 0xb4, 0xc6, 0x85, 0x6f, 0xb7, 0xea, 0x57, 0x14, 0x0c, - 0x05, 0xa8, 0x3b, 0x5e, 0x06, 0x11, 0x9f, 0x41, 0x69, 0x8a, 0x5d, 0xf3, 0x5b, 0x53, 0x43, 0xcc, - 0x24, 0xf6, 0x50, 0x47, 0x0c, 0x55, 0x73, 0xdc, 0xc5, 0x9d, 0x24, 0xf1, 0x0a, 0x31, 0xd4, 0x7e, - 0x96, 0xf1, 0x73, 0x2f, 0xe5, 0x67, 0xda, 0x2c, 0x79, 0x0f, 0x6a, 0x4b, 0x60, 0xec, 0xef, 0x2f, - 0x9b, 0xf0, 0x01, 0x77, 0xdd, 0x70, 0x11, 0xc3, 0x37, 0xe7, 0x24, 0x9d, 0x40, 0xc9, 0xe2, 0x3d, - 0xf9, 0x5e, 0xac, 0xf5, 0xb6, 0x17, 0x85, 0xc5, 0xde, 0x5a, 0x19, 0x44, 0x7c, 0x00, 0x05, 0xbf, - 0x55, 0xc4, 0x3c, 0x17, 0x87, 0x9e, 0x2e, 0x80, 0xf6, 0xd3, 0x8c, 0x99, 0x95, 0xcc, 0xe6, 0x8c, - 0x9c, 0x91, 0x2b, 0xb0, 0x9b, 0x02, 0x62, 0x13, 0x7f, 0x16, 0xe0, 0x4e, 0x8f, 0x1a, 0xdf, 0x10, - 0x86, 0x8f, 0xed, 0x11, 0xf1, 0xec, 0xeb, 0xec, 0x50, 0x05, 0x6e, 0x9b, 0x41, 0x72, 0x68, 0xdc, - 0x6e, 0x5a, 0x67, 0x58, 0x59, 0x8d, 0xa2, 0xda, 0x8f, 0x33, 0x7d, 0x97, 0x3c, 0xac, 0xa4, 0xbb, - 0x90, 0xab, 0x70, 0x3f, 0x8d, 0xc4, 0x2d, 0xff, 0x15, 0xbc, 0x04, 0x3e, 0xf5, 0x05, 0xa2, 0x7d, - 0xd7, 0xd4, 0xf0, 0x35, 0x7a, 0x6e, 0x40, 0x89, 0x8c, 0x28, 0x76, 0xa7, 0x58, 0x1f, 0x6a, 0x63, - 0x64, 0xda, 0xd1, 0xb2, 0x17, 0xd4, 0xbb, 0x11, 0xf1, 0xd2, 0xc7, 0x8f, 0x75, 0xb1, 0x0c, 0xb7, - 0x1c, 0xff, 0x6f, 0xf8, 0x2a, 0xe6, 0xd4, 0x60, 0x22, 0x3e, 0x86, 0xe2, 0x68, 0x42, 0xb4, 0xef, - 0x86, 0xb6, 0x67, 0x8d, 0xb0, 0xcb, 0x57, 0x27, 0xa7, 0x6e, 0x73, 0xec, 0x35, 0x87, 0xda, 0x9f, - 0x64, 0x74, 0xa6, 0x5f, 0x8c, 0xa4, 0x82, 0xf0, 0xc5, 0x48, 0x42, 0x91, 0xe0, 0xd6, 0x59, 0x0e, - 0xb6, 0x7a, 0xd4, 0x10, 0xdf, 0x40, 0x31, 0xf5, 0xfc, 0x3d, 0xcc, 0xec, 0xa6, 0xf4, 0x83, 0x23, - 0x7d, 0xb4, 0x96, 0x8e, 0xef, 0xe7, 0x13, 0x28, 0x2c, 0xde, 0x22, 0x69, 0x29, 0x27, 0xe6, 0x24, - 0xf9, 0x6a, 0x2e, 0x2e, 0xd6, 0x85, 0x7c, 0x78, 0xad, 0x57, 0x96, 0xa2, 0x03, 0x42, 0x7a, 0x74, - 0x05, 0x11, 0xd7, 0x78, 0x0b, 0x77, 0x32, 0x77, 0xe6, 0x72, 0x4a, 0x3a, 0x40, 0x7a, 0xfa, 0x9e, - 0x80, 0xb8, 0xf6, 0x6b, 0x80, 0xc4, 0x7d, 0xb1, 0xb7, 0xa2, 0x95, 0x88, 0x94, 0x9e, 0xac, 0x21, - 0xe3, 0x7a, 0x5f, 0xc1, 0x76, 0xf2, 0xe8, 0x3c, 0x58, 0xca, 0x49, 0xb0, 0xd2, 0x87, 0xeb, 0xd8, - 0xb8, 0xe4, 0x1b, 0x28, 0xa6, 0xb6, 0xf6, 0xc3, 0x95, 0x59, 0x11, 0xbd, 0x62, 0x95, 0x57, 0xed, - 0x21, 0xe9, 0xd6, 0xf7, 0x97, 0xa7, 0x0d, 0xa1, 0xdb, 0x3f, 0x3b, 0xaf, 0x0b, 0xef, 0xce, 0xeb, - 0xc2, 0x7f, 0xe7, 0x75, 0xe1, 0xa7, 0x8b, 0xfa, 0xc6, 0xbb, 0x8b, 0xfa, 0xc6, 0x3f, 0x17, 0xf5, - 0x8d, 0xb7, 0x2f, 0x0c, 0x93, 0x8d, 0xbd, 0x51, 0x53, 0x23, 0x96, 0xe2, 0x78, 0x74, 0xcc, 0xcf, - 0x04, 0x1f, 0x1d, 0xf2, 0xe1, 0xa1, 0x4d, 0x74, 0xac, 0xcc, 0x94, 0xc5, 0x0e, 0xe6, 0x9f, 0x73, - 0xa3, 0x3c, 0xff, 0x3c, 0xfb, 0xfc, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xcf, 0x87, 0x73, - 0x3c, 0x0a, 0x00, 0x00, + 0x14, 0x36, 0x6d, 0x59, 0x81, 0x9e, 0xd4, 0xc4, 0xa2, 0xe5, 0x48, 0xa6, 0x13, 0xc5, 0x66, 0xda, + 0xc6, 0x75, 0x6a, 0xb1, 0x71, 0x81, 0x0c, 0xda, 0xac, 0xc4, 0x68, 0x0d, 0x43, 0x89, 0xca, 0xda, + 0x1d, 0xb2, 0x08, 0x27, 0xf1, 0x4a, 0x11, 0xb5, 0x78, 0x04, 0xef, 0x28, 0xd0, 0x5b, 0xd1, 0xb1, + 0x53, 0xa7, 0x8e, 0xdd, 0x8b, 0x2e, 0x1e, 0xfa, 0x07, 0x74, 0x6b, 0xb6, 0x06, 0x2d, 0x0a, 0x74, + 0x2a, 0x0a, 0x7b, 0xf0, 0xbf, 0x51, 0xf0, 0x48, 0x1e, 0x7f, 0x48, 0x76, 0x02, 0x4f, 0x5e, 0x84, + 0xe3, 0xf7, 0xbd, 0xf7, 0xf1, 0x7d, 0xef, 0xee, 0xf4, 0x08, 0x2b, 0x1e, 0xf6, 0xf1, 0xd0, 0x63, + 0xc4, 0xd5, 0x26, 0x4f, 0x34, 0xe6, 0xb7, 0x1c, 0x97, 0x30, 0x22, 0x57, 0x04, 0xdc, 0x9a, 0x3c, + 0x51, 0xaa, 0x68, 0x6c, 0xd9, 0x44, 0xe3, 0xbf, 0x61, 0x80, 0x52, 0x1f, 0x12, 0x3a, 0x26, 0x54, + 0x1b, 0x53, 0x33, 0x48, 0x1c, 0x53, 0x33, 0x22, 0x1a, 0x59, 0xc1, 0x13, 0x07, 0xd3, 0x88, 0xa9, + 0x99, 0xc4, 0x24, 0x7c, 0xa9, 0x05, 0xab, 0x08, 0x5d, 0x0d, 0x85, 0xfa, 0x21, 0x11, 0x3e, 0x84, + 0x94, 0xfa, 0x8b, 0x04, 0x77, 0xba, 0xd4, 0x3c, 0x72, 0x0c, 0xc4, 0x70, 0x0f, 0xb9, 0x68, 0x4c, + 0xe5, 0xa7, 0x50, 0x42, 0x1e, 0x1b, 0x11, 0xd7, 0x62, 0x27, 0x0d, 0x69, 0x5d, 0xda, 0x2c, 0x75, + 0x1a, 0x7f, 0xfe, 0xba, 0x5d, 0x8b, 0x12, 0x77, 0x0d, 0xc3, 0xc5, 0x94, 0x7e, 0xc9, 0x5c, 0xcb, + 0x36, 0xf5, 0x24, 0x54, 0xde, 0x81, 0xa2, 0xc3, 0x15, 0x1a, 0xf3, 0xeb, 0xd2, 0x66, 0x79, 0xa7, + 0xd6, 0x4a, 0x3b, 0x6c, 0x85, 0xea, 0x9d, 0xc2, 0xeb, 0x7f, 0x1f, 0xcc, 0xe9, 0x51, 0x64, 0xfb, + 0xe3, 0xef, 0x2e, 0x4e, 0xb7, 0x12, 0x8d, 0xef, 0x2f, 0x4e, 0xb7, 0x56, 0x13, 0x77, 0xb9, 0xca, + 0xd4, 0x55, 0xa8, 0xe7, 0x20, 0x1d, 0x53, 0x87, 0xd8, 0x14, 0xab, 0x7f, 0x4b, 0x50, 0xe9, 0x52, + 0xf3, 0x39, 0x76, 0x8e, 0xc9, 0xc9, 0xd1, 0xde, 0xae, 0xfc, 0x09, 0x14, 0xa9, 0x65, 0xda, 0xd8, + 0x7d, 0xab, 0x85, 0x28, 0x4e, 0xd6, 0xa1, 0xe6, 0xd9, 0xd6, 0x04, 0xbb, 0x14, 0x1d, 0xf7, 0xd1, + 0x70, 0x48, 0x3c, 0x9b, 0xf5, 0x2d, 0x23, 0x72, 0xb3, 0x9e, 0x75, 0x73, 0x14, 0x47, 0xee, 0x86, + 0x81, 0xfb, 0x86, 0x2e, 0x7b, 0x53, 0x98, 0x5c, 0x87, 0x5b, 0xcc, 0xef, 0x8f, 0x10, 0x1d, 0x35, + 0x16, 0x82, 0x32, 0xf4, 0x22, 0xf3, 0x3f, 0x47, 0x74, 0xd4, 0xfe, 0x30, 0x30, 0x1e, 0xbd, 0x39, + 0x70, 0x7d, 0x37, 0xe3, 0x5a, 0xd8, 0x50, 0x37, 0xa1, 0x96, 0x7e, 0x8e, 0xfd, 0xca, 0x4b, 0xb0, + 0x70, 0xb4, 0xb7, 0xcb, 0xbd, 0x55, 0xf4, 0x60, 0xa9, 0xfe, 0x21, 0x41, 0xa9, 0x4b, 0xcd, 0xae, + 0x65, 0xb3, 0xde, 0xb3, 0x9b, 0x6e, 0xff, 0x61, 0xce, 0xfe, 0x72, 0xc6, 0x7e, 0xe8, 0x41, 0x5d, + 0x86, 0xaa, 0x78, 0x10, 0x1b, 0xfd, 0xdb, 0x3c, 0x47, 0xf7, 0x78, 0x38, 0xee, 0xa1, 0x93, 0x63, + 0x82, 0x8c, 0x1b, 0x62, 0xf7, 0x00, 0xaa, 0x89, 0xa6, 0x13, 0x96, 0xc6, 0x8d, 0x97, 0x77, 0x9a, + 0x97, 0x08, 0x46, 0x06, 0xf4, 0x25, 0x2f, 0x87, 0xc8, 0x8f, 0xa1, 0x3a, 0xc1, 0xae, 0xf5, 0xb5, + 0x35, 0x44, 0xcc, 0x22, 0x76, 0xdf, 0x40, 0x0c, 0x35, 0x0a, 0xbc, 0x8b, 0x4b, 0x69, 0xe2, 0x39, + 0x62, 0xa8, 0xfd, 0x38, 0xd7, 0xcf, 0xb5, 0x4c, 0x3f, 0xb3, 0xcd, 0x52, 0xd7, 0x60, 0x75, 0x0a, + 0x14, 0xfd, 0xfd, 0x79, 0x1e, 0xde, 0xe3, 0x5d, 0x37, 0x5d, 0xc4, 0xf0, 0xcd, 0xb9, 0x49, 0x07, + 0x50, 0x1d, 0xf3, 0x9a, 0x82, 0x5e, 0x5c, 0xd9, 0xdb, 0x6e, 0x1c, 0x26, 0x7a, 0x3b, 0xce, 0x21, + 0xf2, 0x3d, 0x28, 0x05, 0xa5, 0x22, 0xe6, 0xb9, 0x38, 0xea, 0x69, 0x02, 0xb4, 0x1f, 0xe5, 0x9a, + 0x59, 0xcf, 0x1d, 0xce, 0xb8, 0x33, 0x6a, 0x1d, 0x56, 0x32, 0x80, 0x68, 0xe2, 0x8f, 0x12, 0xdc, + 0xee, 0x52, 0xf3, 0x2b, 0xc2, 0xf0, 0xbe, 0x3d, 0x20, 0x9e, 0x7d, 0x9d, 0x13, 0xaa, 0xc1, 0x2d, + 0x2b, 0x4c, 0x8e, 0x1a, 0xb7, 0x92, 0xf5, 0x19, 0x29, 0xeb, 0x71, 0x54, 0x7b, 0x23, 0x57, 0x77, + 0xd5, 0xc3, 0x5a, 0xb6, 0x0a, 0xb5, 0x01, 0x77, 0xb3, 0x88, 0x28, 0xf9, 0xf7, 0x70, 0x12, 0x04, + 0xd4, 0x4b, 0x8f, 0x5d, 0xb7, 0xe6, 0x65, 0x58, 0x64, 0x7e, 0xbc, 0xd5, 0x25, 0xbd, 0xc0, 0xfc, + 0x7d, 0x43, 0xee, 0x40, 0x99, 0x0c, 0x28, 0x76, 0x27, 0xd8, 0xe8, 0x33, 0x3f, 0xda, 0xb4, 0x8d, + 0xac, 0x99, 0xf8, 0x9d, 0x2f, 0x79, 0x20, 0xdf, 0x2c, 0x1d, 0xe2, 0xac, 0x43, 0xbf, 0xfd, 0x51, + 0xce, 0x5b, 0x76, 0x4a, 0xa4, 0xab, 0x8e, 0xa6, 0x44, 0x1a, 0x12, 0x26, 0xff, 0x4a, 0x4c, 0x7e, + 0x86, 0x68, 0xcf, 0xb5, 0x86, 0xf8, 0x1a, 0x26, 0xb7, 0xa0, 0x2a, 0xfc, 0x0c, 0x47, 0xc8, 0xb2, + 0x13, 0xc3, 0x77, 0x62, 0xe2, 0x59, 0x80, 0xef, 0x1b, 0x72, 0x0d, 0x16, 0x9d, 0xe0, 0x35, 0xdc, + 0x75, 0x41, 0x0f, 0x1f, 0xe4, 0x0d, 0xa8, 0x0c, 0x8e, 0xc9, 0xf0, 0x9b, 0xbe, 0xed, 0x8d, 0x07, + 0xd8, 0xe5, 0x47, 0xb0, 0xa0, 0x97, 0x39, 0xf6, 0x82, 0x43, 0xef, 0x60, 0x38, 0x76, 0x90, 0x32, + 0x1c, 0x43, 0xb1, 0xe1, 0x9d, 0x9f, 0x16, 0x61, 0xa1, 0x4b, 0x4d, 0xf9, 0x10, 0x2a, 0x99, 0x19, + 0x7f, 0x3f, 0x77, 0x65, 0xb2, 0x53, 0x55, 0xf9, 0xe0, 0x4a, 0x5a, 0x0c, 0xa1, 0x03, 0x28, 0x25, + 0x03, 0x57, 0x99, 0xca, 0x11, 0x9c, 0xa2, 0x5e, 0xce, 0x09, 0xb1, 0x0e, 0x14, 0xa3, 0xd9, 0x55, + 0x9f, 0x8a, 0x0e, 0x09, 0xe5, 0xc1, 0x25, 0x84, 0xd0, 0x78, 0x05, 0xb7, 0x73, 0x83, 0x61, 0x3a, + 0x25, 0x1b, 0xa0, 0x3c, 0x7a, 0x4b, 0x80, 0xd0, 0x7e, 0x01, 0x90, 0xfa, 0x53, 0x5c, 0x9b, 0x51, + 0x4a, 0x4c, 0x2a, 0x0f, 0xaf, 0x20, 0x85, 0xde, 0x17, 0x50, 0x4e, 0xff, 0x3f, 0xdc, 0x9b, 0xca, + 0x49, 0xb1, 0xca, 0xfb, 0x57, 0xb1, 0x42, 0xf2, 0x10, 0x2a, 0x99, 0xfb, 0x7b, 0x7f, 0x66, 0x56, + 0x4c, 0xcf, 0xd8, 0xe5, 0x59, 0x97, 0x26, 0x56, 0x15, 0x17, 0x66, 0xb6, 0x6a, 0x4c, 0x5f, 0xa2, + 0x9a, 0x3f, 0x99, 0xca, 0xe2, 0xb7, 0x17, 0xa7, 0x5b, 0x52, 0xa7, 0xf7, 0xfa, 0xac, 0x29, 0xbd, + 0x39, 0x6b, 0x4a, 0xff, 0x9d, 0x35, 0xa5, 0x1f, 0xce, 0x9b, 0x73, 0x6f, 0xce, 0x9b, 0x73, 0xff, + 0x9c, 0x37, 0xe7, 0x5e, 0x3d, 0x35, 0x2d, 0x36, 0xf2, 0x06, 0xad, 0x21, 0x19, 0x6b, 0x8e, 0x47, + 0x47, 0xfc, 0xa6, 0xf1, 0xd5, 0x36, 0x5f, 0x6e, 0xdb, 0xc4, 0xc0, 0x9a, 0xaf, 0x25, 0xf7, 0x82, + 0x7f, 0x09, 0x0f, 0x8a, 0xfc, 0xcb, 0xf6, 0xd3, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x83, 0x92, + 0xd4, 0x5a, 0x77, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -864,6 +969,8 @@ type MsgClient interface { MigrateUEA(ctx context.Context, in *MsgMigrateUEA, opts ...grpc.CallOption) (*MsgMigrateUEAResponse, error) // VoteInbound defines a message for voting on synthetic assets bridging from external chain to PC VoteInbound(ctx context.Context, in *MsgVoteInbound, opts ...grpc.CallOption) (*MsgVoteInboundResponse, error) + // VoteOutbound defines a message for voting on a observed outbound tx on external chain + VoteOutbound(ctx context.Context, in *MsgVoteOutbound, opts ...grpc.CallOption) (*MsgVoteOutboundResponse, error) // VoteGasPrice defines a message for universal validators to vote on the gas price VoteGasPrice(ctx context.Context, in *MsgVoteGasPrice, opts ...grpc.CallOption) (*MsgVoteGasPriceResponse, error) } @@ -930,6 +1037,15 @@ func (c *msgClient) VoteInbound(ctx context.Context, in *MsgVoteInbound, opts .. return out, nil } +func (c *msgClient) VoteOutbound(ctx context.Context, in *MsgVoteOutbound, opts ...grpc.CallOption) (*MsgVoteOutboundResponse, error) { + out := new(MsgVoteOutboundResponse) + err := c.cc.Invoke(ctx, "/uexecutor.v1.Msg/VoteOutbound", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *msgClient) VoteGasPrice(ctx context.Context, in *MsgVoteGasPrice, opts ...grpc.CallOption) (*MsgVoteGasPriceResponse, error) { out := new(MsgVoteGasPriceResponse) err := c.cc.Invoke(ctx, "/uexecutor.v1.Msg/VoteGasPrice", in, out, opts...) @@ -955,6 +1071,8 @@ type MsgServer interface { MigrateUEA(context.Context, *MsgMigrateUEA) (*MsgMigrateUEAResponse, error) // VoteInbound defines a message for voting on synthetic assets bridging from external chain to PC VoteInbound(context.Context, *MsgVoteInbound) (*MsgVoteInboundResponse, error) + // VoteOutbound defines a message for voting on a observed outbound tx on external chain + VoteOutbound(context.Context, *MsgVoteOutbound) (*MsgVoteOutboundResponse, error) // VoteGasPrice defines a message for universal validators to vote on the gas price VoteGasPrice(context.Context, *MsgVoteGasPrice) (*MsgVoteGasPriceResponse, error) } @@ -981,6 +1099,9 @@ func (*UnimplementedMsgServer) MigrateUEA(ctx context.Context, req *MsgMigrateUE func (*UnimplementedMsgServer) VoteInbound(ctx context.Context, req *MsgVoteInbound) (*MsgVoteInboundResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VoteInbound not implemented") } +func (*UnimplementedMsgServer) VoteOutbound(ctx context.Context, req *MsgVoteOutbound) (*MsgVoteOutboundResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VoteOutbound not implemented") +} func (*UnimplementedMsgServer) VoteGasPrice(ctx context.Context, req *MsgVoteGasPrice) (*MsgVoteGasPriceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VoteGasPrice not implemented") } @@ -1097,6 +1218,24 @@ func _Msg_VoteInbound_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } +func _Msg_VoteOutbound_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgVoteOutbound) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).VoteOutbound(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/uexecutor.v1.Msg/VoteOutbound", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).VoteOutbound(ctx, req.(*MsgVoteOutbound)) + } + return interceptor(ctx, in, info, handler) +} + func _Msg_VoteGasPrice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgVoteGasPrice) if err := dec(in); err != nil { @@ -1143,6 +1282,10 @@ var _Msg_serviceDesc = grpc.ServiceDesc{ MethodName: "VoteInbound", Handler: _Msg_VoteInbound_Handler, }, + { + MethodName: "VoteOutbound", + Handler: _Msg_VoteOutbound_Handler, + }, { MethodName: "VoteGasPrice", Handler: _Msg_VoteGasPrice_Handler, @@ -1599,6 +1742,78 @@ func (m *MsgVoteInboundResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *MsgVoteOutbound) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgVoteOutbound) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgVoteOutbound) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObservedTx != nil { + { + size, err := m.ObservedTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.TxId) > 0 { + i -= len(m.TxId) + copy(dAtA[i:], m.TxId) + i = encodeVarintTx(dAtA, i, uint64(len(m.TxId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgVoteOutboundResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgVoteOutboundResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgVoteOutboundResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + func (m *MsgVoteGasPrice) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1862,6 +2077,36 @@ func (m *MsgVoteInboundResponse) Size() (n int) { return n } +func (m *MsgVoteOutbound) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.TxId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.ObservedTx != nil { + l = m.ObservedTx.Size() + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgVoteOutboundResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + func (m *MsgVoteGasPrice) Size() (n int) { if m == nil { return 0 @@ -3139,6 +3384,206 @@ func (m *MsgVoteInboundResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *MsgVoteOutbound) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgVoteOutbound: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgVoteOutbound: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObservedTx == nil { + m.ObservedTx = &OutboundObservation{} + } + if err := m.ObservedTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgVoteOutboundResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgVoteOutboundResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgVoteOutboundResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MsgVoteGasPrice) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/uexecutor/types/tx_id.go b/x/uexecutor/types/tx_id.go new file mode 100644 index 00000000..784198ae --- /dev/null +++ b/x/uexecutor/types/tx_id.go @@ -0,0 +1,74 @@ +package types + +import ( + "encoding/hex" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/pkg/errors" +) + +var ( + stringType, _ = abi.NewType("string", "", nil) + + // ABI arguments layout: (string utxID, string outboundID) + txIDArgs = abi.Arguments{ + {Type: stringType}, + {Type: stringType}, + } +) + +// EncodeOutboundTxIDHex returns hex string of ABI(utxID, outboundID) +func EncodeOutboundTxIDHex(utxID, outboundID string) (string, error) { + bz, err := encodeOutboundTxID(utxID, outboundID) + if err != nil { + return "", err + } + return "0x" + hex.EncodeToString(bz), nil +} + +// DecodeOutboundTxIDHex decodes a hex string into (utxID, outboundID) +func DecodeOutboundTxIDHex(txIDHex string) (string, string, error) { + bz, err := hexStringToBytes(txIDHex) + if err != nil { + return "", "", err + } + return decodeOutboundTxID(bz) +} + +// Low-level encoding (bytes) +func encodeOutboundTxID(utxID, outboundID string) ([]byte, error) { + return txIDArgs.Pack(utxID, outboundID) +} + +// Low-level decoding (bytes → strings) +func decodeOutboundTxID(bz []byte) (string, string, error) { + values, err := txIDArgs.Unpack(bz) + if err != nil { + return "", "", errors.Wrap(err, "ABI decode failed") + } + + utxID := values[0].(string) + outID := values[1].(string) + + return utxID, outID, nil +} + +// Converts "0x…" or "…" into bytes +func hexStringToBytes(input string) ([]byte, error) { + if input == "" { + return nil, errors.New("empty tx_id") + } + + // Normalize 0x prefix + if strings.HasPrefix(input, "0x") || strings.HasPrefix(input, "0X") { + input = input[2:] + } + + bz, err := hex.DecodeString(input) + if err != nil { + return nil, errors.Wrap(err, "invalid hex in tx_id") + } + + return bz, nil +} diff --git a/x/uexecutor/types/tx_type.go b/x/uexecutor/types/tx_type.go new file mode 100644 index 00000000..a4bef026 --- /dev/null +++ b/x/uexecutor/types/tx_type.go @@ -0,0 +1,37 @@ +package types + +// Solidity TX_TYPE (uint8) → Cosmos TxType +func SolidityTxTypeToProto(txTypeUint8 uint8) TxType { + switch txTypeUint8 { + case 0: + return TxType_GAS + case 1: + return TxType_GAS_AND_PAYLOAD + case 2: + return TxType_FUNDS + case 3: + return TxType_FUNDS_AND_PAYLOAD + case 4: + return TxType_PAYLOAD + case 5: + return TxType_INBOUND_REVERT + default: + return TxType_UNSPECIFIED_TX + } +} + +// Cosmos TxType → Solidity uint8 (for emitting events from core module if ever needed) +func ProtoTxTypeToSolidity(txType TxType) uint8 { + switch txType { + case TxType_GAS: + return 0 + case TxType_GAS_AND_PAYLOAD: + return 1 + case TxType_FUNDS: + return 2 + case TxType_FUNDS_AND_PAYLOAD: + return 3 + default: + return 0 // fallback + } +} diff --git a/x/uexecutor/types/types.pb.go b/x/uexecutor/types/types.pb.go index cdc11d75..da3e03ff 100644 --- a/x/uexecutor/types/types.pb.go +++ b/x/uexecutor/types/types.pb.go @@ -104,19 +104,22 @@ type Status int32 const ( Status_UNSPECIFIED Status = 0 Status_PENDING Status = 1 - Status_FINALIZED Status = 2 + Status_OBSERVED Status = 2 + Status_REVERTED Status = 3 ) var Status_name = map[int32]string{ 0: "UNSPECIFIED", 1: "PENDING", - 2: "FINALIZED", + 2: "OBSERVED", + 3: "REVERTED", } var Status_value = map[string]int32{ "UNSPECIFIED": 0, "PENDING": 1, - "FINALIZED": 2, + "OBSERVED": 2, + "REVERTED": 3, } func (x Status) String() string { @@ -127,37 +130,43 @@ func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor_fab6d3ca71d1e2a5, []int{2} } -type InboundTxType int32 +type TxType int32 const ( - InboundTxType_UNSPECIFIED_TX InboundTxType = 0 - InboundTxType_GAS InboundTxType = 1 - InboundTxType_FUNDS InboundTxType = 2 - InboundTxType_FUNDS_AND_PAYLOAD InboundTxType = 3 - InboundTxType_GAS_AND_PAYLOAD InboundTxType = 4 + TxType_UNSPECIFIED_TX TxType = 0 + TxType_GAS TxType = 1 + TxType_GAS_AND_PAYLOAD TxType = 2 + TxType_FUNDS TxType = 3 + TxType_FUNDS_AND_PAYLOAD TxType = 4 + TxType_PAYLOAD TxType = 5 + TxType_INBOUND_REVERT TxType = 6 ) -var InboundTxType_name = map[int32]string{ +var TxType_name = map[int32]string{ 0: "UNSPECIFIED_TX", 1: "GAS", - 2: "FUNDS", - 3: "FUNDS_AND_PAYLOAD", - 4: "GAS_AND_PAYLOAD", + 2: "GAS_AND_PAYLOAD", + 3: "FUNDS", + 4: "FUNDS_AND_PAYLOAD", + 5: "PAYLOAD", + 6: "INBOUND_REVERT", } -var InboundTxType_value = map[string]int32{ +var TxType_value = map[string]int32{ "UNSPECIFIED_TX": 0, "GAS": 1, - "FUNDS": 2, - "FUNDS_AND_PAYLOAD": 3, - "GAS_AND_PAYLOAD": 4, + "GAS_AND_PAYLOAD": 2, + "FUNDS": 3, + "FUNDS_AND_PAYLOAD": 4, + "PAYLOAD": 5, + "INBOUND_REVERT": 6, } -func (x InboundTxType) String() string { - return proto.EnumName(InboundTxType_name, int32(x)) +func (x TxType) String() string { + return proto.EnumName(TxType_name, int32(x)) } -func (InboundTxType) EnumDescriptor() ([]byte, []int) { +func (TxType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_fab6d3ca71d1e2a5, []int{3} } @@ -433,22 +442,22 @@ func (m *UniversalAccountId) GetOwner() string { return "" } -type InboundStatus struct { - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=uexecutor.v1.Status" json:"status,omitempty"` +type RevertInstructions struct { + FundRecipient string `protobuf:"bytes,1,opt,name=fund_recipient,json=fundRecipient,proto3" json:"fund_recipient,omitempty"` } -func (m *InboundStatus) Reset() { *m = InboundStatus{} } -func (m *InboundStatus) String() string { return proto.CompactTextString(m) } -func (*InboundStatus) ProtoMessage() {} -func (*InboundStatus) Descriptor() ([]byte, []int) { +func (m *RevertInstructions) Reset() { *m = RevertInstructions{} } +func (m *RevertInstructions) String() string { return proto.CompactTextString(m) } +func (*RevertInstructions) ProtoMessage() {} +func (*RevertInstructions) Descriptor() ([]byte, []int) { return fileDescriptor_fab6d3ca71d1e2a5, []int{4} } -func (m *InboundStatus) XXX_Unmarshal(b []byte) error { +func (m *RevertInstructions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *InboundStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RevertInstructions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_InboundStatus.Marshal(b, m, deterministic) + return xxx_messageInfo_RevertInstructions.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -458,36 +467,37 @@ func (m *InboundStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (m *InboundStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_InboundStatus.Merge(m, src) +func (m *RevertInstructions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RevertInstructions.Merge(m, src) } -func (m *InboundStatus) XXX_Size() int { +func (m *RevertInstructions) XXX_Size() int { return m.Size() } -func (m *InboundStatus) XXX_DiscardUnknown() { - xxx_messageInfo_InboundStatus.DiscardUnknown(m) +func (m *RevertInstructions) XXX_DiscardUnknown() { + xxx_messageInfo_RevertInstructions.DiscardUnknown(m) } -var xxx_messageInfo_InboundStatus proto.InternalMessageInfo +var xxx_messageInfo_RevertInstructions proto.InternalMessageInfo -func (m *InboundStatus) GetStatus() Status { +func (m *RevertInstructions) GetFundRecipient() string { if m != nil { - return m.Status + return m.FundRecipient } - return Status_UNSPECIFIED + return "" } type Inbound struct { - SourceChain string `protobuf:"bytes,1,opt,name=source_chain,json=sourceChain,proto3" json:"source_chain,omitempty"` - TxHash string `protobuf:"bytes,2,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` - Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` - Recipient string `protobuf:"bytes,4,opt,name=recipient,proto3" json:"recipient,omitempty"` - Amount string `protobuf:"bytes,5,opt,name=amount,proto3" json:"amount,omitempty"` - AssetAddr string `protobuf:"bytes,6,opt,name=asset_addr,json=assetAddr,proto3" json:"asset_addr,omitempty"` - LogIndex string `protobuf:"bytes,7,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` - TxType InboundTxType `protobuf:"varint,8,opt,name=tx_type,json=txType,proto3,enum=uexecutor.v1.InboundTxType" json:"tx_type,omitempty"` - UniversalPayload *UniversalPayload `protobuf:"bytes,9,opt,name=universal_payload,json=universalPayload,proto3" json:"universal_payload,omitempty"` - VerificationData string `protobuf:"bytes,10,opt,name=verification_data,json=verificationData,proto3" json:"verification_data,omitempty"` + SourceChain string `protobuf:"bytes,1,opt,name=source_chain,json=sourceChain,proto3" json:"source_chain,omitempty"` + TxHash string `protobuf:"bytes,2,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` + Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` + Recipient string `protobuf:"bytes,4,opt,name=recipient,proto3" json:"recipient,omitempty"` + Amount string `protobuf:"bytes,5,opt,name=amount,proto3" json:"amount,omitempty"` + AssetAddr string `protobuf:"bytes,6,opt,name=asset_addr,json=assetAddr,proto3" json:"asset_addr,omitempty"` + LogIndex string `protobuf:"bytes,7,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + TxType TxType `protobuf:"varint,8,opt,name=tx_type,json=txType,proto3,enum=uexecutor.v1.TxType" json:"tx_type,omitempty"` + UniversalPayload *UniversalPayload `protobuf:"bytes,9,opt,name=universal_payload,json=universalPayload,proto3" json:"universal_payload,omitempty"` + VerificationData string `protobuf:"bytes,10,opt,name=verification_data,json=verificationData,proto3" json:"verification_data,omitempty"` + RevertInstructions *RevertInstructions `protobuf:"bytes,11,opt,name=revert_instructions,json=revertInstructions,proto3" json:"revert_instructions,omitempty"` } func (m *Inbound) Reset() { *m = Inbound{} } @@ -571,11 +581,11 @@ func (m *Inbound) GetLogIndex() string { return "" } -func (m *Inbound) GetTxType() InboundTxType { +func (m *Inbound) GetTxType() TxType { if m != nil { return m.TxType } - return InboundTxType_UNSPECIFIED_TX + return TxType_UNSPECIFIED_TX } func (m *Inbound) GetUniversalPayload() *UniversalPayload { @@ -592,6 +602,13 @@ func (m *Inbound) GetVerificationData() string { return "" } +func (m *Inbound) GetRevertInstructions() *RevertInstructions { + if m != nil { + return m.RevertInstructions + } + return nil +} + type PCTx struct { TxHash string `protobuf:"bytes,1,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` Sender string `protobuf:"bytes,2,opt,name=sender,proto3" json:"sender,omitempty"` @@ -675,18 +692,148 @@ func (m *PCTx) GetErrorMsg() string { return "" } +type OutboundObservation struct { + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + TxHash string `protobuf:"bytes,3,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` + ErrorMsg string `protobuf:"bytes,4,opt,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"` +} + +func (m *OutboundObservation) Reset() { *m = OutboundObservation{} } +func (m *OutboundObservation) String() string { return proto.CompactTextString(m) } +func (*OutboundObservation) ProtoMessage() {} +func (*OutboundObservation) Descriptor() ([]byte, []int) { + return fileDescriptor_fab6d3ca71d1e2a5, []int{7} +} +func (m *OutboundObservation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OutboundObservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OutboundObservation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OutboundObservation) XXX_Merge(src proto.Message) { + xxx_messageInfo_OutboundObservation.Merge(m, src) +} +func (m *OutboundObservation) XXX_Size() int { + return m.Size() +} +func (m *OutboundObservation) XXX_DiscardUnknown() { + xxx_messageInfo_OutboundObservation.DiscardUnknown(m) +} + +var xxx_messageInfo_OutboundObservation proto.InternalMessageInfo + +func (m *OutboundObservation) GetSuccess() bool { + if m != nil { + return m.Success + } + return false +} + +func (m *OutboundObservation) GetBlockHeight() uint64 { + if m != nil { + return m.BlockHeight + } + return 0 +} + +func (m *OutboundObservation) GetTxHash() string { + if m != nil { + return m.TxHash + } + return "" +} + +func (m *OutboundObservation) GetErrorMsg() string { + if m != nil { + return m.ErrorMsg + } + return "" +} + +type OriginatingPcTx struct { + TxHash string `protobuf:"bytes,1,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` + LogIndex string `protobuf:"bytes,2,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` +} + +func (m *OriginatingPcTx) Reset() { *m = OriginatingPcTx{} } +func (m *OriginatingPcTx) String() string { return proto.CompactTextString(m) } +func (*OriginatingPcTx) ProtoMessage() {} +func (*OriginatingPcTx) Descriptor() ([]byte, []int) { + return fileDescriptor_fab6d3ca71d1e2a5, []int{8} +} +func (m *OriginatingPcTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OriginatingPcTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OriginatingPcTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OriginatingPcTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_OriginatingPcTx.Merge(m, src) +} +func (m *OriginatingPcTx) XXX_Size() int { + return m.Size() +} +func (m *OriginatingPcTx) XXX_DiscardUnknown() { + xxx_messageInfo_OriginatingPcTx.DiscardUnknown(m) +} + +var xxx_messageInfo_OriginatingPcTx proto.InternalMessageInfo + +func (m *OriginatingPcTx) GetTxHash() string { + if m != nil { + return m.TxHash + } + return "" +} + +func (m *OriginatingPcTx) GetLogIndex() string { + if m != nil { + return m.LogIndex + } + return "" +} + type OutboundTx struct { - DestinationChain string `protobuf:"bytes,1,opt,name=destination_chain,json=destinationChain,proto3" json:"destination_chain,omitempty"` - TxHash string `protobuf:"bytes,2,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` - Recipient string `protobuf:"bytes,3,opt,name=recipient,proto3" json:"recipient,omitempty"` - Amount string `protobuf:"bytes,4,opt,name=amount,proto3" json:"amount,omitempty"` - AssetAddr string `protobuf:"bytes,5,opt,name=asset_addr,json=assetAddr,proto3" json:"asset_addr,omitempty"` + DestinationChain string `protobuf:"bytes,1,opt,name=destination_chain,json=destinationChain,proto3" json:"destination_chain,omitempty"` + Recipient string `protobuf:"bytes,2,opt,name=recipient,proto3" json:"recipient,omitempty"` + Amount string `protobuf:"bytes,3,opt,name=amount,proto3" json:"amount,omitempty"` + ExternalAssetAddr string `protobuf:"bytes,4,opt,name=external_asset_addr,json=externalAssetAddr,proto3" json:"external_asset_addr,omitempty"` + Prc20AssetAddr string `protobuf:"bytes,5,opt,name=prc20_asset_addr,json=prc20AssetAddr,proto3" json:"prc20_asset_addr,omitempty"` + Sender string `protobuf:"bytes,6,opt,name=sender,proto3" json:"sender,omitempty"` + Payload string `protobuf:"bytes,7,opt,name=payload,proto3" json:"payload,omitempty"` + GasLimit string `protobuf:"bytes,8,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` + TxType TxType `protobuf:"varint,9,opt,name=tx_type,json=txType,proto3,enum=uexecutor.v1.TxType" json:"tx_type,omitempty"` + PcTx *OriginatingPcTx `protobuf:"bytes,10,opt,name=pc_tx,json=pcTx,proto3" json:"pc_tx,omitempty"` + ObservedTx *OutboundObservation `protobuf:"bytes,11,opt,name=observed_tx,json=observedTx,proto3" json:"observed_tx,omitempty"` + Id string `protobuf:"bytes,12,opt,name=id,proto3" json:"id,omitempty"` + OutboundStatus Status `protobuf:"varint,13,opt,name=outbound_status,json=outboundStatus,proto3,enum=uexecutor.v1.Status" json:"outbound_status,omitempty"` + RevertInstructions *RevertInstructions `protobuf:"bytes,14,opt,name=revert_instructions,json=revertInstructions,proto3" json:"revert_instructions,omitempty"` + PcRevertExecution *PCTx `protobuf:"bytes,15,opt,name=pc_revert_execution,json=pcRevertExecution,proto3" json:"pc_revert_execution,omitempty"` } func (m *OutboundTx) Reset() { *m = OutboundTx{} } func (*OutboundTx) ProtoMessage() {} func (*OutboundTx) Descriptor() ([]byte, []int) { - return fileDescriptor_fab6d3ca71d1e2a5, []int{7} + return fileDescriptor_fab6d3ca71d1e2a5, []int{9} } func (m *OutboundTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -722,45 +869,116 @@ func (m *OutboundTx) GetDestinationChain() string { return "" } -func (m *OutboundTx) GetTxHash() string { +func (m *OutboundTx) GetRecipient() string { if m != nil { - return m.TxHash + return m.Recipient } return "" } -func (m *OutboundTx) GetRecipient() string { +func (m *OutboundTx) GetAmount() string { if m != nil { - return m.Recipient + return m.Amount } return "" } -func (m *OutboundTx) GetAmount() string { +func (m *OutboundTx) GetExternalAssetAddr() string { if m != nil { - return m.Amount + return m.ExternalAssetAddr } return "" } -func (m *OutboundTx) GetAssetAddr() string { +func (m *OutboundTx) GetPrc20AssetAddr() string { if m != nil { - return m.AssetAddr + return m.Prc20AssetAddr + } + return "" +} + +func (m *OutboundTx) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *OutboundTx) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +func (m *OutboundTx) GetGasLimit() string { + if m != nil { + return m.GasLimit + } + return "" +} + +func (m *OutboundTx) GetTxType() TxType { + if m != nil { + return m.TxType + } + return TxType_UNSPECIFIED_TX +} + +func (m *OutboundTx) GetPcTx() *OriginatingPcTx { + if m != nil { + return m.PcTx + } + return nil +} + +func (m *OutboundTx) GetObservedTx() *OutboundObservation { + if m != nil { + return m.ObservedTx + } + return nil +} + +func (m *OutboundTx) GetId() string { + if m != nil { + return m.Id } return "" } +func (m *OutboundTx) GetOutboundStatus() Status { + if m != nil { + return m.OutboundStatus + } + return Status_UNSPECIFIED +} + +func (m *OutboundTx) GetRevertInstructions() *RevertInstructions { + if m != nil { + return m.RevertInstructions + } + return nil +} + +func (m *OutboundTx) GetPcRevertExecution() *PCTx { + if m != nil { + return m.PcRevertExecution + } + return nil +} + type UniversalTx struct { - InboundTx *Inbound `protobuf:"bytes,1,opt,name=inbound_tx,json=inboundTx,proto3" json:"inbound_tx,omitempty"` - PcTx []*PCTx `protobuf:"bytes,2,rep,name=pc_tx,json=pcTx,proto3" json:"pc_tx,omitempty"` - OutboundTx *OutboundTx `protobuf:"bytes,3,opt,name=outbound_tx,json=outboundTx,proto3" json:"outbound_tx,omitempty"` - UniversalStatus UniversalTxStatus `protobuf:"varint,4,opt,name=universal_status,json=universalStatus,proto3,enum=uexecutor.v1.UniversalTxStatus" json:"universal_status,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + InboundTx *Inbound `protobuf:"bytes,2,opt,name=inbound_tx,json=inboundTx,proto3" json:"inbound_tx,omitempty"` + PcTx []*PCTx `protobuf:"bytes,3,rep,name=pc_tx,json=pcTx,proto3" json:"pc_tx,omitempty"` + OutboundTx []*OutboundTx `protobuf:"bytes,4,rep,name=outbound_tx,json=outboundTx,proto3" json:"outbound_tx,omitempty"` + UniversalStatus UniversalTxStatus `protobuf:"varint,5,opt,name=universal_status,json=universalStatus,proto3,enum=uexecutor.v1.UniversalTxStatus" json:"universal_status,omitempty"` } func (m *UniversalTx) Reset() { *m = UniversalTx{} } func (*UniversalTx) ProtoMessage() {} func (*UniversalTx) Descriptor() ([]byte, []int) { - return fileDescriptor_fab6d3ca71d1e2a5, []int{8} + return fileDescriptor_fab6d3ca71d1e2a5, []int{10} } func (m *UniversalTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -789,6 +1007,13 @@ func (m *UniversalTx) XXX_DiscardUnknown() { var xxx_messageInfo_UniversalTx proto.InternalMessageInfo +func (m *UniversalTx) GetId() string { + if m != nil { + return m.Id + } + return "" +} + func (m *UniversalTx) GetInboundTx() *Inbound { if m != nil { return m.InboundTx @@ -803,7 +1028,7 @@ func (m *UniversalTx) GetPcTx() []*PCTx { return nil } -func (m *UniversalTx) GetOutboundTx() *OutboundTx { +func (m *UniversalTx) GetOutboundTx() []*OutboundTx { if m != nil { return m.OutboundTx } @@ -821,14 +1046,16 @@ func init() { proto.RegisterEnum("uexecutor.v1.VerificationType", VerificationType_name, VerificationType_value) proto.RegisterEnum("uexecutor.v1.UniversalTxStatus", UniversalTxStatus_name, UniversalTxStatus_value) proto.RegisterEnum("uexecutor.v1.Status", Status_name, Status_value) - proto.RegisterEnum("uexecutor.v1.InboundTxType", InboundTxType_name, InboundTxType_value) + proto.RegisterEnum("uexecutor.v1.TxType", TxType_name, TxType_value) proto.RegisterType((*Params)(nil), "uexecutor.v1.Params") proto.RegisterType((*UniversalPayload)(nil), "uexecutor.v1.UniversalPayload") proto.RegisterType((*MigrationPayload)(nil), "uexecutor.v1.MigrationPayload") proto.RegisterType((*UniversalAccountId)(nil), "uexecutor.v1.UniversalAccountId") - proto.RegisterType((*InboundStatus)(nil), "uexecutor.v1.InboundStatus") + proto.RegisterType((*RevertInstructions)(nil), "uexecutor.v1.RevertInstructions") proto.RegisterType((*Inbound)(nil), "uexecutor.v1.Inbound") proto.RegisterType((*PCTx)(nil), "uexecutor.v1.PCTx") + proto.RegisterType((*OutboundObservation)(nil), "uexecutor.v1.OutboundObservation") + proto.RegisterType((*OriginatingPcTx)(nil), "uexecutor.v1.OriginatingPcTx") proto.RegisterType((*OutboundTx)(nil), "uexecutor.v1.OutboundTx") proto.RegisterType((*UniversalTx)(nil), "uexecutor.v1.UniversalTx") } @@ -836,85 +1063,103 @@ func init() { func init() { proto.RegisterFile("uexecutor/v1/types.proto", fileDescriptor_fab6d3ca71d1e2a5) } var fileDescriptor_fab6d3ca71d1e2a5 = []byte{ - // 1236 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x15, 0xf5, 0xaf, 0x2b, 0xc7, 0xa6, 0x27, 0x4e, 0xa2, 0xc4, 0x89, 0xec, 0x4f, 0xc1, 0x87, - 0x18, 0x6e, 0x63, 0x21, 0x6e, 0x13, 0xa0, 0x06, 0xba, 0x50, 0x24, 0xd9, 0x61, 0xeb, 0xc8, 0x02, - 0x45, 0x19, 0x69, 0x36, 0x83, 0x31, 0x39, 0xa1, 0x88, 0x4a, 0xa4, 0xc0, 0x21, 0x55, 0x7a, 0xdd, - 0x5d, 0xd1, 0x45, 0x97, 0xd9, 0x35, 0x8f, 0xd0, 0xc7, 0x08, 0xd0, 0x4d, 0x96, 0x05, 0xba, 0x29, - 0xe2, 0x45, 0xfb, 0x18, 0xc5, 0x0c, 0x47, 0x22, 0xe5, 0xc4, 0x6d, 0x37, 0xd2, 0xdc, 0x73, 0x67, - 0x86, 0xe7, 0x9e, 0x7b, 0x66, 0x48, 0xa8, 0x85, 0x34, 0xa2, 0x66, 0x18, 0x78, 0x7e, 0x73, 0xf6, - 0xa8, 0x19, 0x9c, 0x4f, 0x29, 0xdb, 0x9b, 0xfa, 0x5e, 0xe0, 0xa1, 0x95, 0x45, 0x66, 0x6f, 0xf6, - 0xe8, 0xce, 0x86, 0xed, 0xd9, 0x9e, 0x48, 0x34, 0xf9, 0x28, 0x9e, 0x73, 0x67, 0x9d, 0x4c, 0x1c, - 0xd7, 0x6b, 0x8a, 0xdf, 0x18, 0x6a, 0x1c, 0x42, 0xb1, 0x4f, 0x7c, 0x32, 0x61, 0xe8, 0x1e, 0x00, - 0xf3, 0x26, 0x14, 0xcf, 0xc8, 0x38, 0xa4, 0xb5, 0xec, 0xb6, 0xb2, 0x53, 0xd6, 0x2b, 0x1c, 0x39, - 0xe5, 0xc0, 0xc1, 0xbd, 0xd7, 0x6f, 0xb6, 0x32, 0x7f, 0xbd, 0xd9, 0x52, 0x7e, 0xf8, 0xf3, 0x97, - 0x5d, 0x35, 0xa1, 0x31, 0x15, 0xab, 0x1b, 0xbf, 0x67, 0x41, 0x1d, 0xba, 0xce, 0x8c, 0xfa, 0x8c, - 0x8c, 0xfb, 0xe4, 0x7c, 0xec, 0x11, 0x0b, 0xad, 0x42, 0x36, 0xf0, 0x6a, 0xca, 0xb6, 0xb2, 0x53, - 0xd1, 0xb3, 0x81, 0x87, 0x36, 0xa0, 0x90, 0xec, 0x5e, 0xd1, 0xe3, 0x00, 0x21, 0xc8, 0x5b, 0x24, - 0x20, 0xb5, 0x9c, 0x00, 0xc5, 0x18, 0x6d, 0x42, 0xc5, 0x26, 0x0c, 0x8f, 0x9d, 0x89, 0x13, 0xd4, - 0xf2, 0x22, 0x51, 0xb6, 0x09, 0x3b, 0xe6, 0x31, 0xfa, 0x3f, 0xac, 0x4d, 0x48, 0x84, 0x5f, 0x51, - 0x8a, 0xa7, 0xd4, 0xc7, 0x36, 0x61, 0xb5, 0x82, 0x98, 0xb2, 0x32, 0x21, 0xd1, 0x21, 0xa5, 0x7d, - 0xea, 0x1f, 0x11, 0x86, 0x9e, 0x40, 0x8d, 0x4f, 0x9b, 0xfa, 0x8e, 0xe7, 0x3b, 0xc1, 0xf9, 0xd2, - 0xfc, 0xa2, 0x98, 0xbf, 0x31, 0x21, 0x51, 0x5f, 0xa6, 0x93, 0x75, 0x1b, 0x50, 0x70, 0x3d, 0xd7, - 0xa4, 0xb5, 0x52, 0xcc, 0x52, 0x04, 0xe8, 0x0e, 0x94, 0x2d, 0x4a, 0xac, 0xb1, 0xe3, 0xd2, 0x5a, - 0x39, 0x26, 0x34, 0x8f, 0xd1, 0x63, 0x28, 0xce, 0x30, 0x6f, 0x46, 0xad, 0xb2, 0xad, 0xec, 0xac, - 0xee, 0xd7, 0xf7, 0xd2, 0xcd, 0xd8, 0x3b, 0xa5, 0xbe, 0xf3, 0xca, 0x31, 0x49, 0xe0, 0x78, 0xae, - 0x71, 0x3e, 0xa5, 0x7a, 0x61, 0xc6, 0xff, 0x0e, 0x76, 0xd2, 0x92, 0x6e, 0x26, 0x92, 0x86, 0x73, - 0x1d, 0xf1, 0x34, 0x16, 0xb2, 0xf1, 0xa3, 0x02, 0xea, 0x73, 0xc7, 0xf6, 0xc5, 0x16, 0x73, 0x75, - 0xef, 0x42, 0x65, 0x32, 0xc7, 0xa4, 0xc8, 0x09, 0x90, 0x54, 0x91, 0xbd, 0xaa, 0x8a, 0xdc, 0x72, - 0x15, 0x57, 0xd2, 0x59, 0xec, 0xb9, 0xa0, 0xf3, 0x5a, 0x01, 0xb4, 0x68, 0x76, 0xcb, 0x34, 0xbd, - 0xd0, 0x0d, 0x34, 0x0b, 0x3d, 0x80, 0x35, 0x73, 0x44, 0x1c, 0x17, 0xbb, 0x64, 0x42, 0xd9, 0x94, - 0x98, 0x54, 0xd2, 0x5a, 0x15, 0x70, 0x6f, 0x8e, 0xa2, 0xdb, 0x50, 0x8e, 0x27, 0x3a, 0x96, 0xa4, - 0x57, 0x12, 0xb1, 0x66, 0x71, 0xda, 0xde, 0x77, 0x2e, 0xf5, 0x25, 0xbb, 0x38, 0xf8, 0x0f, 0x4a, - 0x91, 0x98, 0x45, 0xe3, 0x4b, 0xb8, 0xa6, 0xb9, 0x67, 0x5e, 0xe8, 0x5a, 0x83, 0x80, 0x04, 0x21, - 0x43, 0x9f, 0x42, 0x91, 0x89, 0x91, 0xe0, 0xb2, 0xba, 0xbf, 0xb1, 0xdc, 0x9b, 0x78, 0x96, 0x2e, - 0xe7, 0x34, 0x7e, 0xce, 0x41, 0x49, 0xae, 0x47, 0xff, 0x83, 0x15, 0xe6, 0x85, 0xbe, 0x49, 0xb1, - 0x20, 0x27, 0x6b, 0xa9, 0xc6, 0x58, 0x9b, 0x43, 0xe8, 0x16, 0x94, 0x82, 0x08, 0x8f, 0x08, 0x1b, - 0xc9, 0x3a, 0x8a, 0x41, 0xf4, 0x8c, 0xb0, 0x11, 0xba, 0x09, 0x45, 0x46, 0x5d, 0x6b, 0x51, 0x87, - 0x8c, 0x78, 0xcf, 0x7c, 0x6a, 0x3a, 0x53, 0x87, 0xba, 0x73, 0x5f, 0x27, 0x00, 0x5f, 0x45, 0x26, - 0xbc, 0x0c, 0xe9, 0x67, 0x19, 0xf1, 0xa3, 0x49, 0x18, 0xa3, 0x01, 0x26, 0x96, 0xe5, 0x4b, 0xef, - 0x56, 0x04, 0xd2, 0xb2, 0x2c, 0x9f, 0x1f, 0x96, 0xb1, 0x67, 0x63, 0xc7, 0xb5, 0x68, 0x24, 0x4d, - 0x5b, 0x1e, 0x7b, 0xb6, 0xc6, 0x63, 0xf4, 0xb9, 0xa0, 0x28, 0xcc, 0x59, 0x16, 0x02, 0x6c, 0x2e, - 0x0b, 0x20, 0xab, 0x35, 0x22, 0xe1, 0xcc, 0x62, 0x20, 0xfe, 0xd1, 0xd7, 0xb0, 0xfe, 0x81, 0x0b, - 0x85, 0xb9, 0xab, 0x97, 0xcd, 0x7d, 0xf9, 0xd0, 0xeb, 0x6a, 0x78, 0xf9, 0x1a, 0xf8, 0x04, 0xd6, - 0x67, 0xa9, 0x23, 0x80, 0xc5, 0x69, 0x07, 0xc1, 0x53, 0x4d, 0x27, 0x3a, 0x24, 0x20, 0x07, 0xf5, - 0x74, 0xab, 0xd7, 0x93, 0x56, 0x3b, 0x31, 0xcf, 0xc6, 0x5b, 0x05, 0xf2, 0xfd, 0xb6, 0x11, 0xa5, - 0xb5, 0x57, 0xae, 0xd0, 0x3e, 0xbb, 0xa4, 0xfd, 0x6d, 0xe0, 0x57, 0x08, 0x0e, 0x19, 0xb5, 0x44, - 0x57, 0xf2, 0x7a, 0xc9, 0x26, 0x6c, 0xc8, 0xa8, 0x68, 0xf5, 0xd9, 0xd8, 0x33, 0xbf, 0xc5, 0x23, - 0xea, 0xd8, 0xa3, 0xb8, 0x33, 0x79, 0xbd, 0x2a, 0xb0, 0x67, 0x02, 0x12, 0xbb, 0xc6, 0x3e, 0x2a, - 0xca, 0x5d, 0x63, 0x7f, 0x6d, 0x42, 0x85, 0xfa, 0xbe, 0xe7, 0xe3, 0x09, 0xb3, 0xe7, 0xe2, 0x0b, - 0xe0, 0x39, 0xb3, 0x0f, 0xee, 0xa6, 0x8b, 0x59, 0x4b, 0x5d, 0x9a, 0x26, 0x0e, 0xa2, 0xc6, 0xaf, - 0x0a, 0xc0, 0x49, 0x18, 0x48, 0xfd, 0xb9, 0x4c, 0x16, 0x65, 0x81, 0xe3, 0xc6, 0x2a, 0xa5, 0x4d, - 0xa7, 0xa6, 0x12, 0xff, 0xe2, 0xbc, 0x25, 0x87, 0xe5, 0xae, 0x76, 0x58, 0xfe, 0x1f, 0x1c, 0x56, - 0xb8, 0xe4, 0xb0, 0x83, 0x46, 0xba, 0x8e, 0x1b, 0x49, 0x1d, 0x9e, 0x64, 0xcf, 0xab, 0x79, 0x9d, - 0x85, 0xea, 0xc2, 0x0c, 0x06, 0x37, 0x1e, 0xc8, 0x9e, 0xe1, 0x20, 0x12, 0x75, 0x54, 0xf7, 0x6f, - 0x7c, 0xd4, 0x7b, 0x7a, 0xc5, 0x99, 0x9b, 0x10, 0x3d, 0x80, 0x82, 0x10, 0xa7, 0x96, 0xdd, 0xce, - 0xed, 0x54, 0xf7, 0xd1, 0xf2, 0x02, 0xde, 0x78, 0x3d, 0x3f, 0x35, 0x8d, 0x08, 0x7d, 0x01, 0xd5, - 0xd4, 0xd3, 0x45, 0xa5, 0xd5, 0xfd, 0xda, 0xf2, 0xf4, 0x44, 0x5c, 0x1d, 0xbc, 0x44, 0xe8, 0xaf, - 0x20, 0xf1, 0x28, 0x96, 0x4d, 0xcd, 0x8b, 0xb3, 0xb1, 0x75, 0x85, 0xb7, 0x8d, 0x48, 0xde, 0x13, - 0x6b, 0x8b, 0x85, 0x31, 0x70, 0x70, 0x3f, 0xad, 0xcc, 0xcd, 0x8f, 0xdd, 0x4c, 0x41, 0xb4, 0x7b, - 0x04, 0xea, 0xe5, 0x77, 0x00, 0xba, 0x09, 0x88, 0x39, 0xb6, 0x4b, 0xad, 0x74, 0x46, 0xcd, 0xa0, - 0x4d, 0xb8, 0x15, 0x26, 0x8f, 0x5d, 0x4a, 0x2a, 0xbb, 0xdf, 0x67, 0x61, 0xfd, 0x03, 0x52, 0xe8, - 0x3e, 0x6c, 0x0d, 0x7b, 0xda, 0x69, 0x57, 0x1f, 0xb4, 0x8e, 0xb1, 0xf1, 0x02, 0x0f, 0x8c, 0x96, - 0x31, 0x1c, 0xe0, 0x61, 0x6f, 0xd0, 0xef, 0xb6, 0xb5, 0x43, 0xad, 0xdb, 0x51, 0x33, 0xe8, 0x3a, - 0xac, 0x69, 0xbd, 0xa7, 0x27, 0xc3, 0x5e, 0x07, 0x0f, 0x86, 0xed, 0x76, 0x77, 0x30, 0x50, 0x15, - 0x74, 0x0f, 0x6e, 0xf7, 0xbb, 0xbd, 0x8e, 0xd6, 0x3b, 0xc2, 0xf3, 0x64, 0xf7, 0x45, 0xb7, 0x3d, - 0x34, 0xb4, 0x93, 0x9e, 0x9a, 0x45, 0xb7, 0xe0, 0x7a, 0xbf, 0x2d, 0x91, 0x6e, 0xb2, 0x2e, 0xc7, - 0xc9, 0xa7, 0x13, 0x87, 0x2d, 0xed, 0xb8, 0xdb, 0x51, 0xf3, 0xe8, 0x06, 0xac, 0xf7, 0xdb, 0x78, - 0xbe, 0xa5, 0xde, 0x3d, 0xed, 0xea, 0x86, 0x5a, 0x40, 0x1b, 0xa0, 0x9e, 0x0c, 0x8d, 0x78, 0x7f, - 0x99, 0x54, 0x8b, 0x4b, 0xe8, 0x7c, 0xeb, 0x12, 0xe7, 0xb9, 0x40, 0xe5, 0xbe, 0x65, 0xb4, 0x02, - 0xe5, 0x76, 0xab, 0xd7, 0xee, 0xf2, 0xa8, 0xb2, 0xfb, 0x18, 0x8a, 0xb2, 0xf2, 0x35, 0xa8, 0x2e, - 0x57, 0x59, 0x85, 0xd2, 0xfc, 0x01, 0x0a, 0xba, 0x06, 0x95, 0x43, 0xad, 0xd7, 0x3a, 0xd6, 0x5e, - 0x76, 0x3b, 0x6a, 0x76, 0xd7, 0x5c, 0xbc, 0x1a, 0xe2, 0xcb, 0x0e, 0x21, 0x58, 0x4d, 0xad, 0xc6, - 0xc6, 0x0b, 0x35, 0x83, 0x4a, 0x90, 0x3b, 0x6a, 0x71, 0x69, 0x2a, 0x50, 0x38, 0x1c, 0xf6, 0x3a, - 0x03, 0x35, 0xcb, 0xab, 0x12, 0x43, 0xdc, 0xe2, 0xfc, 0x5b, 0xdf, 0x1c, 0x9f, 0xb4, 0x3a, 0x6a, - 0x8e, 0x33, 0x3d, 0x6a, 0x2d, 0x83, 0xf9, 0xa7, 0xfd, 0xb7, 0xef, 0xeb, 0xca, 0xbb, 0xf7, 0x75, - 0xe5, 0x8f, 0xf7, 0x75, 0xe5, 0xa7, 0x8b, 0x7a, 0xe6, 0xdd, 0x45, 0x3d, 0xf3, 0xdb, 0x45, 0x3d, - 0xf3, 0xf2, 0x89, 0xed, 0x04, 0xa3, 0xf0, 0x6c, 0xcf, 0xf4, 0x26, 0xcd, 0x69, 0xc8, 0x46, 0xe2, - 0x7c, 0x8b, 0xd1, 0x43, 0x31, 0x7c, 0xe8, 0x7a, 0x16, 0x6d, 0x46, 0xcd, 0xc4, 0x43, 0xe2, 0xf3, - 0xee, 0xac, 0x28, 0x3e, 0xd4, 0x3e, 0xfb, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x35, 0xb7, 0xec, 0x61, - 0xfb, 0x09, 0x00, 0x00, + // 1530 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x73, 0xdb, 0x54, + 0x10, 0x8f, 0xfc, 0xed, 0x75, 0x6a, 0xcb, 0x2f, 0x49, 0xab, 0x7e, 0xc4, 0x49, 0x5d, 0x4a, 0x33, + 0x61, 0x9a, 0xd0, 0x00, 0x9d, 0xc1, 0x33, 0x1c, 0x1c, 0x5b, 0x49, 0x0d, 0xa9, 0x6d, 0x64, 0x3b, + 0x53, 0xb8, 0x68, 0x14, 0xe9, 0x55, 0xd6, 0x60, 0x4b, 0x1e, 0x7d, 0x18, 0xe5, 0xc0, 0x89, 0x1b, + 0xc3, 0x81, 0x63, 0x8f, 0x3d, 0x72, 0x84, 0xff, 0xa2, 0xdc, 0x7a, 0x64, 0x86, 0x0b, 0xd3, 0x1e, + 0x60, 0x86, 0x7f, 0x82, 0x79, 0xef, 0x49, 0x96, 0xe4, 0x24, 0x4c, 0x87, 0x4b, 0xa2, 0xfd, 0x78, + 0xfb, 0x7e, 0xbb, 0xbf, 0xdd, 0x95, 0x0c, 0x82, 0x87, 0x7d, 0xac, 0x7a, 0xae, 0x65, 0xef, 0xcf, + 0x1f, 0xed, 0xbb, 0xe7, 0x33, 0xec, 0xec, 0xcd, 0x6c, 0xcb, 0xb5, 0xd0, 0xea, 0xc2, 0xb2, 0x37, + 0x7f, 0x74, 0x6b, 0x5d, 0xb7, 0x74, 0x8b, 0x1a, 0xf6, 0xc9, 0x13, 0xf3, 0xb9, 0x55, 0x55, 0xa6, + 0x86, 0x69, 0xed, 0xd3, 0xbf, 0x4c, 0x55, 0x3f, 0x82, 0x5c, 0x5f, 0xb1, 0x95, 0xa9, 0x83, 0x36, + 0x01, 0x1c, 0x6b, 0x8a, 0xe5, 0xb9, 0x32, 0xf1, 0xb0, 0x90, 0xda, 0xe6, 0x76, 0x0a, 0x52, 0x91, + 0x68, 0x4e, 0x89, 0xa2, 0xb1, 0xf9, 0xe2, 0xe5, 0xd6, 0xca, 0xdf, 0x2f, 0xb7, 0xb8, 0x1f, 0xfe, + 0xfa, 0x65, 0x97, 0x8f, 0x60, 0xcc, 0xe8, 0xe9, 0xfa, 0x1f, 0x29, 0xe0, 0x47, 0xa6, 0x31, 0xc7, + 0xb6, 0xa3, 0x4c, 0xfa, 0xca, 0xf9, 0xc4, 0x52, 0x34, 0x54, 0x86, 0x94, 0x6b, 0x09, 0xdc, 0x36, + 0xb7, 0x53, 0x94, 0x52, 0xae, 0x85, 0xd6, 0x21, 0x1b, 0x45, 0x2f, 0x4a, 0x4c, 0x40, 0x08, 0x32, + 0x9a, 0xe2, 0x2a, 0x42, 0x9a, 0x2a, 0xe9, 0x33, 0xba, 0x0d, 0x45, 0x5d, 0x71, 0xe4, 0x89, 0x31, + 0x35, 0x5c, 0x21, 0x43, 0x0d, 0x05, 0x5d, 0x71, 0x4e, 0x88, 0x8c, 0xee, 0x43, 0x65, 0xaa, 0xf8, + 0xf2, 0x73, 0x8c, 0xe5, 0x19, 0xb6, 0x65, 0x5d, 0x71, 0x84, 0x2c, 0x75, 0x59, 0x9d, 0x2a, 0xfe, + 0x11, 0xc6, 0x7d, 0x6c, 0x1f, 0x2b, 0x0e, 0x7a, 0x0c, 0x02, 0x71, 0x9b, 0xd9, 0x86, 0x65, 0x1b, + 0xee, 0x79, 0xc2, 0x3f, 0x47, 0xfd, 0xd7, 0xa7, 0x8a, 0xdf, 0x0f, 0xcc, 0xd1, 0xb9, 0x75, 0xc8, + 0x9a, 0x96, 0xa9, 0x62, 0x21, 0xcf, 0x50, 0x52, 0x01, 0xdd, 0x82, 0x82, 0x86, 0x15, 0x6d, 0x62, + 0x98, 0x58, 0x28, 0x30, 0x40, 0xa1, 0x8c, 0x3e, 0x81, 0xdc, 0x5c, 0x26, 0x64, 0x08, 0xc5, 0x6d, + 0x6e, 0xa7, 0x7c, 0x50, 0xdb, 0x8b, 0x93, 0xb1, 0x77, 0x8a, 0x6d, 0xe3, 0xb9, 0xa1, 0x2a, 0xae, + 0x61, 0x99, 0xc3, 0xf3, 0x19, 0x96, 0xb2, 0x73, 0xf2, 0xaf, 0xb1, 0x13, 0x2f, 0xe9, 0xed, 0xa8, + 0xa4, 0x5e, 0x58, 0x47, 0x79, 0xc6, 0x0a, 0x59, 0xff, 0x91, 0x03, 0xfe, 0xa9, 0xa1, 0xdb, 0x34, + 0x44, 0x58, 0xdd, 0x3b, 0x50, 0x9c, 0x86, 0xba, 0xa0, 0xc8, 0x91, 0x22, 0xca, 0x22, 0x75, 0x55, + 0x16, 0xe9, 0x64, 0x16, 0x57, 0xc2, 0x59, 0xc4, 0x5c, 0xc0, 0x79, 0xc1, 0x01, 0x5a, 0x90, 0xdd, + 0x54, 0x55, 0xcb, 0x33, 0xdd, 0x8e, 0x86, 0x1e, 0x40, 0x45, 0x1d, 0x2b, 0x86, 0x29, 0x9b, 0xca, + 0x14, 0x3b, 0x33, 0x45, 0xc5, 0x01, 0xac, 0x32, 0x55, 0x77, 0x43, 0x2d, 0xba, 0x09, 0x05, 0xe6, + 0x68, 0x68, 0x01, 0xbc, 0x3c, 0x95, 0x3b, 0x1a, 0x81, 0x6d, 0x7d, 0x6b, 0x62, 0x3b, 0x40, 0xc7, + 0x84, 0x77, 0xa8, 0x94, 0xc2, 0x50, 0xd4, 0x55, 0x40, 0x12, 0x9e, 0x63, 0xdb, 0xed, 0x98, 0x8e, + 0x6b, 0x7b, 0x2a, 0xc1, 0xed, 0xa0, 0xfb, 0x50, 0x7e, 0xee, 0x99, 0x9a, 0x6c, 0x63, 0xd5, 0x98, + 0x19, 0xd8, 0x74, 0x03, 0x60, 0xd7, 0x88, 0x56, 0x0a, 0x95, 0x8d, 0xf7, 0xc3, 0x2b, 0x36, 0xa3, + 0x2b, 0x6c, 0x1a, 0x4d, 0x36, 0x62, 0xe1, 0xea, 0xff, 0xa4, 0x21, 0xdf, 0x31, 0xcf, 0x2c, 0xcf, + 0xd4, 0xd0, 0x5d, 0x58, 0x75, 0x2c, 0xcf, 0x56, 0xb1, 0x4c, 0x53, 0x08, 0x02, 0x97, 0x98, 0xae, + 0x45, 0x54, 0xe8, 0x06, 0xe4, 0x5d, 0x5f, 0x1e, 0x2b, 0xce, 0x38, 0xc8, 0x36, 0xe7, 0xfa, 0x4f, + 0x14, 0x67, 0x8c, 0xae, 0x43, 0xce, 0xc1, 0xa6, 0xb6, 0xc8, 0x36, 0x90, 0x08, 0xb3, 0x11, 0x52, + 0xd6, 0xfd, 0x91, 0x82, 0x9c, 0x52, 0xa6, 0x24, 0xd9, 0xa0, 0xeb, 0x03, 0x89, 0x0c, 0xb0, 0xe2, + 0x38, 0xd8, 0x95, 0x15, 0x4d, 0xb3, 0x83, 0x0e, 0x2f, 0x52, 0x4d, 0x53, 0xd3, 0x6c, 0x32, 0x52, + 0x13, 0x4b, 0x97, 0x0d, 0x53, 0xc3, 0x7e, 0xd0, 0xda, 0x85, 0x89, 0xa5, 0x77, 0x88, 0x8c, 0x1e, + 0x52, 0x88, 0xb4, 0x85, 0x0b, 0xb4, 0x85, 0xd7, 0x93, 0x2d, 0x3c, 0xf4, 0x69, 0xe3, 0xe6, 0x5c, + 0xfa, 0x1f, 0x7d, 0x01, 0xd5, 0x0b, 0x4d, 0x4a, 0x7b, 0xbf, 0xb4, 0xdc, 0xfb, 0xcb, 0x3b, 0x41, + 0xe2, 0xbd, 0xe5, 0x2d, 0xf1, 0x01, 0x54, 0xe7, 0xb1, 0x09, 0x91, 0xe9, 0x32, 0x00, 0x0a, 0x90, + 0x8f, 0x1b, 0xda, 0x64, 0x31, 0x7c, 0x09, 0x6b, 0x97, 0x30, 0x22, 0x94, 0xe8, 0xdd, 0xdb, 0xc9, + 0xbb, 0x2f, 0x36, 0x82, 0x84, 0xec, 0x0b, 0xba, 0x46, 0x2d, 0xde, 0x5c, 0xd5, 0x88, 0x79, 0x83, + 0x31, 0x5c, 0x7f, 0xc5, 0x41, 0xa6, 0xdf, 0x1a, 0xfa, 0x71, 0x1e, 0xb9, 0x2b, 0x78, 0x4c, 0x25, + 0x78, 0xbc, 0x09, 0x64, 0x69, 0xc9, 0x9e, 0x83, 0x35, 0xca, 0x70, 0x46, 0xca, 0xeb, 0x8a, 0x33, + 0x72, 0x30, 0x6d, 0x9b, 0xb3, 0x89, 0xa5, 0x7e, 0x23, 0x8f, 0xb1, 0xa1, 0x8f, 0x19, 0xcb, 0x19, + 0xa9, 0x44, 0x75, 0x4f, 0xa8, 0x8a, 0x46, 0x75, 0x15, 0xd7, 0x0b, 0xb7, 0x55, 0x20, 0x11, 0x22, + 0xb1, 0x6d, 0x5b, 0xb6, 0x3c, 0x75, 0xf4, 0x90, 0x48, 0xaa, 0x78, 0xea, 0xe8, 0x8d, 0x3b, 0xf1, + 0x64, 0x2a, 0xb1, 0x35, 0xad, 0xca, 0xae, 0x5f, 0xff, 0x95, 0x83, 0xb5, 0x9e, 0xe7, 0xd2, 0xbc, + 0x7a, 0x67, 0x0e, 0xb6, 0xe7, 0x6c, 0x59, 0x08, 0x90, 0x77, 0x3c, 0x55, 0xc5, 0x8e, 0x43, 0x33, + 0x2b, 0x48, 0xa1, 0x78, 0x01, 0x67, 0xea, 0x22, 0xce, 0x58, 0x59, 0xd2, 0x89, 0xb2, 0x24, 0x80, + 0x66, 0x96, 0x80, 0x3e, 0x08, 0x41, 0xd6, 0x22, 0x90, 0x56, 0x00, 0x4d, 0xb6, 0x22, 0x6c, 0xf5, + 0x29, 0x54, 0x7a, 0xb6, 0xa1, 0x1b, 0xa6, 0xe2, 0x1a, 0xa6, 0xde, 0x57, 0xff, 0x8b, 0x88, 0x44, + 0x8f, 0xa7, 0x92, 0x3d, 0xde, 0x78, 0xef, 0x92, 0x05, 0x62, 0x45, 0x91, 0x65, 0x56, 0xa2, 0xdf, + 0xb2, 0x00, 0x61, 0x89, 0x86, 0x3e, 0x69, 0x4e, 0x0d, 0x3b, 0x2e, 0xf5, 0xb1, 0xcc, 0xc4, 0x8c, + 0xf3, 0x31, 0x03, 0x1b, 0xf4, 0xc4, 0xdc, 0xa6, 0xae, 0x9e, 0xdb, 0x74, 0x62, 0x6e, 0xf7, 0x60, + 0x0d, 0xfb, 0x2e, 0xb6, 0x4d, 0xb2, 0xc6, 0xa2, 0x01, 0x66, 0x05, 0xab, 0x86, 0xa6, 0xe6, 0x62, + 0x90, 0x77, 0x80, 0x9f, 0xd9, 0xea, 0xc1, 0x87, 0x71, 0x67, 0xb6, 0x09, 0xca, 0x54, 0x1f, 0x79, + 0x46, 0x7d, 0x99, 0x4b, 0xf4, 0xa5, 0x00, 0xf9, 0x70, 0x68, 0x59, 0xff, 0x84, 0x62, 0xf2, 0xbd, + 0x5b, 0x58, 0x7a, 0xef, 0xc6, 0x96, 0x44, 0xf1, 0x1d, 0x96, 0xc4, 0x01, 0x64, 0x69, 0x49, 0xe9, + 0x2c, 0x97, 0x0e, 0x36, 0x93, 0xce, 0x4b, 0x9c, 0x4a, 0x99, 0x19, 0x61, 0xf6, 0x10, 0x4a, 0x8c, + 0x7b, 0xac, 0x91, 0x93, 0x6c, 0xac, 0xef, 0x2e, 0x9d, 0xbc, 0xd8, 0xc0, 0x12, 0x84, 0xa7, 0x86, + 0x3e, 0xf9, 0xea, 0x30, 0x34, 0x61, 0x95, 0x7d, 0x75, 0x18, 0x1a, 0xfa, 0x0c, 0x2a, 0x8b, 0xc6, + 0x0a, 0x06, 0xea, 0xda, 0x65, 0xf0, 0x07, 0xd4, 0x26, 0x95, 0x43, 0x67, 0x26, 0x5f, 0xb5, 0x71, + 0xca, 0xff, 0x7f, 0xe3, 0xa0, 0x43, 0x58, 0x9b, 0xa9, 0x72, 0x10, 0x95, 0x9d, 0x27, 0xef, 0xf0, + 0x0a, 0x0d, 0x89, 0x92, 0x21, 0xc9, 0xe6, 0x91, 0xaa, 0x33, 0x95, 0x85, 0x16, 0x43, 0xe7, 0x46, + 0x3d, 0x3e, 0xe8, 0x1b, 0x97, 0xcc, 0x90, 0xeb, 0xd7, 0x7f, 0x4e, 0x41, 0x69, 0xb1, 0x80, 0x17, + 0x95, 0xe1, 0x16, 0x95, 0xf9, 0x18, 0x20, 0x58, 0x72, 0xa4, 0xd8, 0x29, 0x7a, 0xfd, 0x46, 0xf2, + 0xfa, 0xe0, 0x35, 0x27, 0x15, 0x03, 0xc7, 0xa1, 0x8f, 0x1e, 0x84, 0xbc, 0xa6, 0xb7, 0xd3, 0x57, + 0xe0, 0x65, 0x64, 0x7e, 0x0a, 0xa5, 0x18, 0x1a, 0x21, 0x43, 0xdd, 0x85, 0xcb, 0xc9, 0x1c, 0xfa, + 0x12, 0x58, 0xd1, 0xd8, 0x7d, 0x0e, 0xd1, 0x7b, 0x22, 0x24, 0x2d, 0x4b, 0x49, 0xdb, 0xba, 0xe2, + 0xfd, 0x32, 0xf4, 0x03, 0xfe, 0x2a, 0x8b, 0x83, 0x4c, 0xd1, 0xb8, 0x17, 0xaf, 0xd4, 0xf5, 0xcb, + 0x3e, 0x1e, 0x5c, 0x7f, 0xf7, 0x18, 0xf8, 0xe5, 0xcf, 0x34, 0x74, 0x1d, 0x90, 0x63, 0xe8, 0x26, + 0xd6, 0xe2, 0x16, 0x7e, 0x05, 0xdd, 0x86, 0x1b, 0x5e, 0x74, 0x6d, 0xc2, 0xc8, 0xed, 0x7e, 0x9f, + 0x82, 0xea, 0x05, 0x50, 0xe8, 0x1e, 0x6c, 0x8d, 0xba, 0x9d, 0x53, 0x51, 0x1a, 0x34, 0x4f, 0xe4, + 0xe1, 0x33, 0x79, 0x30, 0x6c, 0x0e, 0x47, 0x03, 0x79, 0xd4, 0x1d, 0xf4, 0xc5, 0x56, 0xe7, 0xa8, + 0x23, 0xb6, 0xf9, 0x15, 0xb4, 0x06, 0x95, 0x4e, 0xf7, 0xb0, 0x37, 0xea, 0xb6, 0xe5, 0xc1, 0xa8, + 0xd5, 0x12, 0x07, 0x03, 0x9e, 0x43, 0x9b, 0x70, 0xb3, 0x2f, 0x76, 0xdb, 0x9d, 0xee, 0xb1, 0x1c, + 0x1a, 0xc5, 0x67, 0x62, 0x6b, 0x34, 0xec, 0xf4, 0xba, 0x7c, 0x0a, 0xdd, 0x80, 0xb5, 0x7e, 0x2b, + 0xd0, 0x88, 0xd1, 0xb9, 0x34, 0x01, 0x1f, 0x37, 0x1c, 0x35, 0x3b, 0x27, 0x62, 0x9b, 0xcf, 0xa0, + 0x0d, 0xa8, 0xf6, 0x5b, 0x72, 0x18, 0x52, 0x12, 0x4f, 0x45, 0x69, 0xc8, 0x67, 0xd1, 0x3a, 0xf0, + 0xbd, 0xd1, 0x90, 0xc5, 0x0f, 0x8c, 0x7c, 0x2e, 0xa1, 0x0d, 0x43, 0xe7, 0x09, 0xce, 0x85, 0x36, + 0x88, 0x5b, 0x40, 0xab, 0x50, 0x68, 0x35, 0xbb, 0x2d, 0x91, 0x48, 0xc5, 0xdd, 0x43, 0xc8, 0x05, + 0x99, 0x57, 0xa0, 0x94, 0xcc, 0xb2, 0x04, 0xf9, 0xf0, 0x02, 0x8e, 0x9c, 0xea, 0x1d, 0x0e, 0x44, + 0xe9, 0x54, 0x6c, 0xf3, 0x29, 0x22, 0x31, 0x40, 0x62, 0x9b, 0x4f, 0xef, 0x7e, 0x07, 0x39, 0xb6, + 0x51, 0x10, 0x82, 0x72, 0x2c, 0x86, 0x3c, 0x7c, 0xc6, 0xaf, 0xa0, 0x3c, 0xa4, 0x8f, 0x9b, 0xa4, + 0x40, 0x6b, 0x50, 0x39, 0x6e, 0x0e, 0xe4, 0x26, 0x01, 0xde, 0xfc, 0xea, 0xa4, 0xd7, 0x24, 0x91, + 0x8a, 0x90, 0x3d, 0x1a, 0x75, 0xdb, 0xa4, 0x10, 0x1b, 0x50, 0xa5, 0x8f, 0x09, 0x8f, 0x0c, 0x85, + 0x11, 0x08, 0x59, 0x72, 0x41, 0x58, 0xdc, 0xa0, 0x22, 0xb9, 0xc3, 0xfe, 0xab, 0x37, 0x35, 0xee, + 0xf5, 0x9b, 0x1a, 0xf7, 0xe7, 0x9b, 0x1a, 0xf7, 0xd3, 0xdb, 0xda, 0xca, 0xeb, 0xb7, 0xb5, 0x95, + 0xdf, 0xdf, 0xd6, 0x56, 0xbe, 0x7e, 0xac, 0x1b, 0xee, 0xd8, 0x3b, 0xdb, 0x53, 0xad, 0xe9, 0xfe, + 0xcc, 0x73, 0xc6, 0xf4, 0xa5, 0x40, 0x9f, 0x1e, 0xd2, 0xc7, 0x87, 0xa6, 0xa5, 0xe1, 0x7d, 0x7f, + 0x3f, 0x6a, 0x35, 0xfa, 0x43, 0xed, 0x2c, 0x47, 0x7f, 0x72, 0x7d, 0xf4, 0x6f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xf7, 0x64, 0x7e, 0x00, 0xc5, 0x0d, 0x00, 0x00, } func (this *Params) Equal(that interface{}) bool { @@ -1049,6 +1294,30 @@ func (this *UniversalAccountId) Equal(that interface{}) bool { } return true } +func (this *RevertInstructions) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RevertInstructions) + if !ok { + that2, ok := that.(RevertInstructions) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FundRecipient != that1.FundRecipient { + return false + } + return true +} func (this *Inbound) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1098,6 +1367,9 @@ func (this *Inbound) Equal(that interface{}) bool { if this.VerificationData != that1.VerificationData { return false } + if !this.RevertInstructions.Equal(that1.RevertInstructions) { + return false + } return true } func (this *PCTx) Equal(that interface{}) bool { @@ -1139,14 +1411,14 @@ func (this *PCTx) Equal(that interface{}) bool { } return true } -func (this *OutboundTx) Equal(that interface{}) bool { +func (this *OutboundObservation) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*OutboundTx) + that1, ok := that.(*OutboundObservation) if !ok { - that2, ok := that.(OutboundTx) + that2, ok := that.(OutboundObservation) if ok { that1 = &that2 } else { @@ -1158,31 +1430,55 @@ func (this *OutboundTx) Equal(that interface{}) bool { } else if this == nil { return false } - if this.DestinationChain != that1.DestinationChain { + if this.Success != that1.Success { + return false + } + if this.BlockHeight != that1.BlockHeight { return false } if this.TxHash != that1.TxHash { return false } - if this.Recipient != that1.Recipient { + if this.ErrorMsg != that1.ErrorMsg { return false } - if this.Amount != that1.Amount { + return true +} +func (this *OriginatingPcTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OriginatingPcTx) + if !ok { + that2, ok := that.(OriginatingPcTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { return false } - if this.AssetAddr != that1.AssetAddr { + if this.TxHash != that1.TxHash { + return false + } + if this.LogIndex != that1.LogIndex { return false } return true } -func (this *UniversalTx) Equal(that interface{}) bool { +func (this *OutboundTx) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*UniversalTx) + that1, ok := that.(*OutboundTx) if !ok { - that2, ok := that.(UniversalTx) + that2, ok := that.(OutboundTx) if ok { that1 = &that2 } else { @@ -1194,31 +1490,105 @@ func (this *UniversalTx) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.InboundTx.Equal(that1.InboundTx) { + if this.DestinationChain != that1.DestinationChain { return false } - if len(this.PcTx) != len(that1.PcTx) { + if this.Recipient != that1.Recipient { return false } - for i := range this.PcTx { - if !this.PcTx[i].Equal(that1.PcTx[i]) { - return false - } + if this.Amount != that1.Amount { + return false } - if !this.OutboundTx.Equal(that1.OutboundTx) { + if this.ExternalAssetAddr != that1.ExternalAssetAddr { return false } - if this.UniversalStatus != that1.UniversalStatus { + if this.Prc20AssetAddr != that1.Prc20AssetAddr { + return false + } + if this.Sender != that1.Sender { + return false + } + if this.Payload != that1.Payload { + return false + } + if this.GasLimit != that1.GasLimit { + return false + } + if this.TxType != that1.TxType { + return false + } + if !this.PcTx.Equal(that1.PcTx) { + return false + } + if !this.ObservedTx.Equal(that1.ObservedTx) { + return false + } + if this.Id != that1.Id { + return false + } + if this.OutboundStatus != that1.OutboundStatus { + return false + } + if !this.RevertInstructions.Equal(that1.RevertInstructions) { + return false + } + if !this.PcRevertExecution.Equal(that1.PcRevertExecution) { return false } return true } -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (this *UniversalTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UniversalTx) + if !ok { + that2, ok := that.(UniversalTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + if !this.InboundTx.Equal(that1.InboundTx) { + return false + } + if len(this.PcTx) != len(that1.PcTx) { + return false + } + for i := range this.PcTx { + if !this.PcTx[i].Equal(that1.PcTx[i]) { + return false + } + } + if len(this.OutboundTx) != len(that1.OutboundTx) { + return false + } + for i := range this.OutboundTx { + if !this.OutboundTx[i].Equal(that1.OutboundTx[i]) { + return false + } + } + if this.UniversalStatus != that1.UniversalStatus { + return false + } + return true +} +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } return dAtA[:n], nil } @@ -1418,7 +1788,7 @@ func (m *UniversalAccountId) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *InboundStatus) Marshal() (dAtA []byte, err error) { +func (m *RevertInstructions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1428,20 +1798,22 @@ func (m *InboundStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InboundStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *RevertInstructions) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *InboundStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RevertInstructions) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Status != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Status)) + if len(m.FundRecipient) > 0 { + i -= len(m.FundRecipient) + copy(dAtA[i:], m.FundRecipient) + i = encodeVarintTypes(dAtA, i, uint64(len(m.FundRecipient))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -1466,6 +1838,18 @@ func (m *Inbound) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RevertInstructions != nil { + { + size, err := m.RevertInstructions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if len(m.VerificationData) > 0 { i -= len(m.VerificationData) copy(dAtA[i:], m.VerificationData) @@ -1603,6 +1987,95 @@ func (m *PCTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *OutboundObservation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutboundObservation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OutboundObservation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMsg) > 0 { + i -= len(m.ErrorMsg) + copy(dAtA[i:], m.ErrorMsg) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ErrorMsg))) + i-- + dAtA[i] = 0x22 + } + if len(m.TxHash) > 0 { + i -= len(m.TxHash) + copy(dAtA[i:], m.TxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.TxHash))) + i-- + dAtA[i] = 0x1a + } + if m.BlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockHeight)) + i-- + dAtA[i] = 0x10 + } + if m.Success { + i-- + if m.Success { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OriginatingPcTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OriginatingPcTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OriginatingPcTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogIndex) > 0 { + i -= len(m.LogIndex) + copy(dAtA[i:], m.LogIndex) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LogIndex))) + i-- + dAtA[i] = 0x12 + } + if len(m.TxHash) > 0 { + i -= len(m.TxHash) + copy(dAtA[i:], m.TxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.TxHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *OutboundTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1623,32 +2096,118 @@ func (m *OutboundTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.AssetAddr) > 0 { - i -= len(m.AssetAddr) - copy(dAtA[i:], m.AssetAddr) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AssetAddr))) + if m.PcRevertExecution != nil { + { + size, err := m.PcRevertExecution.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.RevertInstructions != nil { + { + size, err := m.RevertInstructions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.OutboundStatus != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.OutboundStatus)) + i-- + dAtA[i] = 0x68 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0x62 + } + if m.ObservedTx != nil { + { + size, err := m.ObservedTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.PcTx != nil { + { + size, err := m.PcTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.TxType != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.TxType)) + i-- + dAtA[i] = 0x48 + } + if len(m.GasLimit) > 0 { + i -= len(m.GasLimit) + copy(dAtA[i:], m.GasLimit) + i = encodeVarintTypes(dAtA, i, uint64(len(m.GasLimit))) + i-- + dAtA[i] = 0x42 + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x3a + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x32 + } + if len(m.Prc20AssetAddr) > 0 { + i -= len(m.Prc20AssetAddr) + copy(dAtA[i:], m.Prc20AssetAddr) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Prc20AssetAddr))) i-- dAtA[i] = 0x2a } + if len(m.ExternalAssetAddr) > 0 { + i -= len(m.ExternalAssetAddr) + copy(dAtA[i:], m.ExternalAssetAddr) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ExternalAssetAddr))) + i-- + dAtA[i] = 0x22 + } if len(m.Amount) > 0 { i -= len(m.Amount) copy(dAtA[i:], m.Amount) i = encodeVarintTypes(dAtA, i, uint64(len(m.Amount))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x1a } if len(m.Recipient) > 0 { i -= len(m.Recipient) copy(dAtA[i:], m.Recipient) i = encodeVarintTypes(dAtA, i, uint64(len(m.Recipient))) i-- - dAtA[i] = 0x1a - } - if len(m.TxHash) > 0 { - i -= len(m.TxHash) - copy(dAtA[i:], m.TxHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.TxHash))) - i-- dAtA[i] = 0x12 } if len(m.DestinationChain) > 0 { @@ -1684,19 +2243,21 @@ func (m *UniversalTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { if m.UniversalStatus != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.UniversalStatus)) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x28 } - if m.OutboundTx != nil { - { - size, err := m.OutboundTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.OutboundTx) > 0 { + for iNdEx := len(m.OutboundTx) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OutboundTx[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - i-- - dAtA[i] = 0x1a } if len(m.PcTx) > 0 { for iNdEx := len(m.PcTx) - 1; iNdEx >= 0; iNdEx-- { @@ -1709,7 +2270,7 @@ func (m *UniversalTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } } if m.InboundTx != nil { @@ -1722,6 +2283,13 @@ func (m *UniversalTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Id))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil @@ -1836,14 +2404,15 @@ func (m *UniversalAccountId) Size() (n int) { return n } -func (m *InboundStatus) Size() (n int) { +func (m *RevertInstructions) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Status != 0 { - n += 1 + sovTypes(uint64(m.Status)) + l = len(m.FundRecipient) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } return n } @@ -1893,6 +2462,10 @@ func (m *Inbound) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.RevertInstructions != nil { + l = m.RevertInstructions.Size() + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -1927,32 +2500,110 @@ func (m *PCTx) Size() (n int) { return n } -func (m *OutboundTx) Size() (n int) { +func (m *OutboundObservation) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.DestinationChain) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Success { + n += 2 } - l = len(m.TxHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.BlockHeight != 0 { + n += 1 + sovTypes(uint64(m.BlockHeight)) } - l = len(m.Recipient) + l = len(m.TxHash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Amount) + l = len(m.ErrorMsg) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.AssetAddr) + return n +} + +func (m *OriginatingPcTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TxHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.LogIndex) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *OutboundTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DestinationChain) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Recipient) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Amount) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ExternalAssetAddr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Prc20AssetAddr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.GasLimit) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.TxType != 0 { + n += 1 + sovTypes(uint64(m.TxType)) + } + if m.PcTx != nil { + l = m.PcTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ObservedTx != nil { + l = m.ObservedTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Id) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.OutboundStatus != 0 { + n += 1 + sovTypes(uint64(m.OutboundStatus)) + } + if m.RevertInstructions != nil { + l = m.RevertInstructions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.PcRevertExecution != nil { + l = m.PcRevertExecution.Size() + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -1962,6 +2613,10 @@ func (m *UniversalTx) Size() (n int) { } var l int _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } if m.InboundTx != nil { l = m.InboundTx.Size() n += 1 + l + sovTypes(uint64(l)) @@ -1972,9 +2627,11 @@ func (m *UniversalTx) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } - if m.OutboundTx != nil { - l = m.OutboundTx.Size() - n += 1 + l + sovTypes(uint64(l)) + if len(m.OutboundTx) > 0 { + for _, e := range m.OutboundTx { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } } if m.UniversalStatus != 0 { n += 1 + sovTypes(uint64(m.UniversalStatus)) @@ -2675,7 +3332,7 @@ func (m *UniversalAccountId) Unmarshal(dAtA []byte) error { } return nil } -func (m *InboundStatus) Unmarshal(dAtA []byte) error { +func (m *RevertInstructions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2698,17 +3355,17 @@ func (m *InboundStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InboundStatus: wiretype end group for non-group") + return fmt.Errorf("proto: RevertInstructions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InboundStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RevertInstructions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FundRecipient", wireType) } - m.Status = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2718,11 +3375,24 @@ func (m *InboundStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Status |= Status(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FundRecipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -3011,7 +3681,7 @@ func (m *Inbound) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TxType |= InboundTxType(b&0x7F) << shift + m.TxType |= TxType(b&0x7F) << shift if b < 0x80 { break } @@ -3084,6 +3754,42 @@ func (m *Inbound) Unmarshal(dAtA []byte) error { } m.VerificationData = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RevertInstructions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RevertInstructions == nil { + m.RevertInstructions = &RevertInstructions{} + } + if err := m.RevertInstructions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -3321,7 +4027,7 @@ func (m *PCTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *OutboundTx) Unmarshal(dAtA []byte) error { +func (m *OutboundObservation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3344,17 +4050,17 @@ func (m *OutboundTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OutboundTx: wiretype end group for non-group") + return fmt.Errorf("proto: OutboundObservation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OutboundTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OutboundObservation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DestinationChain", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3364,25 +4070,32 @@ func (m *OutboundTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes + m.Success = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.BlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - m.DestinationChain = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TxHash", wireType) } @@ -3414,9 +4127,9 @@ func (m *OutboundTx) Unmarshal(dAtA []byte) error { } m.TxHash = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMsg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3444,11 +4157,61 @@ func (m *OutboundTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Recipient = string(dAtA[iNdEx:postIndex]) + m.ErrorMsg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OriginatingPcTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OriginatingPcTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OriginatingPcTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TxHash", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3476,11 +4239,11 @@ func (m *OutboundTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Amount = string(dAtA[iNdEx:postIndex]) + m.TxHash = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AssetAddr", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LogIndex", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3508,7 +4271,7 @@ func (m *OutboundTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AssetAddr = string(dAtA[iNdEx:postIndex]) + m.LogIndex = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3531,7 +4294,7 @@ func (m *OutboundTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *UniversalTx) Unmarshal(dAtA []byte) error { +func (m *OutboundTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3554,17 +4317,17 @@ func (m *UniversalTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UniversalTx: wiretype end group for non-group") + return fmt.Errorf("proto: OutboundTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UniversalTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OutboundTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InboundTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DestinationChain", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3574,33 +4337,29 @@ func (m *UniversalTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if m.InboundTx == nil { - m.InboundTx = &Inbound{} - } - if err := m.InboundTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.DestinationChain = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PcTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3610,7 +4369,563 @@ func (m *UniversalTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Recipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Amount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalAssetAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalAssetAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prc20AssetAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prc20AssetAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GasLimit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GasLimit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxType", wireType) + } + m.TxType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxType |= TxType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PcTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PcTx == nil { + m.PcTx = &OriginatingPcTx{} + } + if err := m.PcTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObservedTx == nil { + m.ObservedTx = &OutboundObservation{} + } + if err := m.ObservedTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutboundStatus", wireType) + } + m.OutboundStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OutboundStatus |= Status(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RevertInstructions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RevertInstructions == nil { + m.RevertInstructions = &RevertInstructions{} + } + if err := m.RevertInstructions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PcRevertExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PcRevertExecution == nil { + m.PcRevertExecution = &PCTx{} + } + if err := m.PcRevertExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UniversalTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UniversalTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UniversalTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InboundTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InboundTx == nil { + m.InboundTx = &Inbound{} + } + if err := m.InboundTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PcTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3630,7 +4945,7 @@ func (m *UniversalTx) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field OutboundTx", wireType) } @@ -3659,14 +4974,12 @@ func (m *UniversalTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.OutboundTx == nil { - m.OutboundTx = &OutboundTx{} - } - if err := m.OutboundTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.OutboundTx = append(m.OutboundTx, &OutboundTx{}) + if err := m.OutboundTx[len(m.OutboundTx)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field UniversalStatus", wireType) } diff --git a/x/uexecutor/types/universal_tx.go b/x/uexecutor/types/universal_tx.go index 66f515d2..f25a38a5 100644 --- a/x/uexecutor/types/universal_tx.go +++ b/x/uexecutor/types/universal_tx.go @@ -20,6 +20,11 @@ func (p UniversalTx) String() string { // ValidateBasic does the sanity check on the UniversalTx fields. func (p UniversalTx) ValidateBasic() error { + // Validate Id is non-empty + if len(p.Id) == 0 { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "id cannot be empty") + } + // Validate inbound_tx if err := p.InboundTx.ValidateBasic(); err != nil { return errors.Wrap(err, "invalid inbound_tx") @@ -37,8 +42,14 @@ func (p UniversalTx) ValidateBasic() error { } // Validate outbound_tx - if err := p.OutboundTx.ValidateBasic(); err != nil { - return errors.Wrap(err, "invalid outbound_tx") + // Validate each outbound_tx + for i, tx := range p.OutboundTx { + if tx == nil { + return fmt.Errorf("pc_tx[%d] is nil", i) + } + if err := tx.ValidateBasic(); err != nil { + return errors.Wrapf(err, "invalid outbound_tx at index %d", i) + } } // Validate universal_status (must be a valid enum) diff --git a/x/uexecutor/types/universal_tx_test.go b/x/uexecutor/types/universal_tx_test.go index 57976ad1..1c90beee 100644 --- a/x/uexecutor/types/universal_tx_test.go +++ b/x/uexecutor/types/universal_tx_test.go @@ -9,6 +9,7 @@ import ( func TestUniversalTx_ValidateBasic(t *testing.T) { validUniversal := types.UniversalTx{ + Id: "1", InboundTx: &types.Inbound{ SourceChain: "eip155:11155111", TxHash: "0x123abc", @@ -17,7 +18,7 @@ func TestUniversalTx_ValidateBasic(t *testing.T) { Amount: "1000", AssetAddr: "0x000000000000000000000000000000000000cafe", LogIndex: "1", - TxType: types.InboundTxType_FUNDS, + TxType: types.TxType_FUNDS, }, PcTx: []*types.PCTx{ { @@ -28,12 +29,24 @@ func TestUniversalTx_ValidateBasic(t *testing.T) { Status: "SUCCESS", }, }, - OutboundTx: &types.OutboundTx{ - DestinationChain: "eip155:11155111", - TxHash: "0x456def", - Recipient: "0x000000000000000000000000000000000000beef", - Amount: "500", - AssetAddr: "0x000000000000000000000000000000000000cafe", + OutboundTx: []*types.OutboundTx{ + { + DestinationChain: "eip155:11155111", + Recipient: "0x000000000000000000000000000000000000beef", + Sender: "0x000000000000000000000000000000000000dead", + Amount: "1000", + ExternalAssetAddr: "0x000000000000000000000000000000000000cafe", + Prc20AssetAddr: "0x000000000000000000000000000000000000bafe", + Payload: "0xabcdef", + GasLimit: "21000", + TxType: types.TxType_FUNDS_AND_PAYLOAD, + PcTx: &types.OriginatingPcTx{ + TxHash: "0xpc123", + LogIndex: "1", + }, + Id: "0", + OutboundStatus: types.Status_PENDING, + }, }, UniversalStatus: types.UniversalTxStatus_PC_EXECUTED_SUCCESS, } @@ -75,7 +88,9 @@ func TestUniversalTx_ValidateBasic(t *testing.T) { name: "invalid outbound", universal: func() types.UniversalTx { utx := validUniversal - utx.OutboundTx = &types.OutboundTx{} // Recipient empty + utx.OutboundTx = []*types.OutboundTx{ + {}, + } // Recipient empty return utx }(), expectError: true, diff --git a/x/uregistry/keeper/keeper.go b/x/uregistry/keeper/keeper.go index 90411fcb..1b5df4b7 100755 --- a/x/uregistry/keeper/keeper.go +++ b/x/uregistry/keeper/keeper.go @@ -3,6 +3,7 @@ package keeper import ( "context" "errors" + "strings" "github.com/cosmos/cosmos-sdk/codec" @@ -137,3 +138,46 @@ func (k Keeper) GetTokenConfig(ctx context.Context, chain, address string) (type func (k Keeper) SchemaBuilder() *collections.SchemaBuilder { return k.schemaBuilder } + +func (k Keeper) GetTokenConfigByPRC20( + ctx context.Context, + chain string, + prc20Addr string, +) (types.TokenConfig, error) { + + prc20Addr = strings.ToLower(strings.TrimSpace(prc20Addr)) + + var found *types.TokenConfig + + err := k.TokenConfigs.Walk(ctx, nil, func( + key string, + cfg types.TokenConfig, + ) (bool, error) { + + // chain must match + if cfg.Chain != chain { + return false, nil + } + + if cfg.NativeRepresentation == nil { + return false, nil + } + + if strings.ToLower(cfg.NativeRepresentation.ContractAddress) == prc20Addr { + found = &cfg + return true, nil // stop walk + } + + return false, nil + }) + + if err != nil { + return types.TokenConfig{}, err + } + + if found == nil { + return types.TokenConfig{}, collections.ErrNotFound + } + + return *found, nil +} diff --git a/x/uregistry/types/constants.go b/x/uregistry/types/constants.go index 37d0c687..0e71d28f 100644 --- a/x/uregistry/types/constants.go +++ b/x/uregistry/types/constants.go @@ -40,7 +40,7 @@ var SYSTEM_CONTRACTS = map[string]ContractAddresses{ ProxyAdmin: "0xf2000000000000000000000000000000000000BC", Implementation: "0xF1000000000000000000000000000000000000Bc", }, - "RESERVED_0": { + "UNIVERSAL_GATEWAY_PC": { Address: "0x00000000000000000000000000000000000000B0", ProxyAdmin: "0xf2000000000000000000000000000000000000b0", Implementation: "0xF1000000000000000000000000000000000000b0", @@ -72,22 +72,22 @@ var BYTECODE = map[string]ByteCodes{ }, "UNIVERSAL_BATCH_CALL": { IMPL_RUNTIME: ReservedImplRuntimeBytecode, - PROXY_RUNTIME: common.FromHex("0x608060405261000c61000e565b005b7f00000000000000000000000000000000000000000000000000000000000000BC73ffffffffffffffffffffffffffffffffffffffff1633036100d1575f357fffffffff00000000000000000000000000000000000000000000000000000000167f4f1ef28600000000000000000000000000000000000000000000000000000000146100c7576040517fd2b576ec00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6100cf6100d9565b565b6100cf610107565b5f806100e8366004818461043e565b8101906100f59190610492565b915091506101038282610117565b5050565b6100cf61011261017e565b6101c2565b610120826101e0565b60405173ffffffffffffffffffffffffffffffffffffffff8316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a28051156101765761017182826102b3565b505050565b610103610332565b5f6101bd7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b365f80375f80365f845af43d5f803e8080156101dc573d5ff35b3d5ffd5b8073ffffffffffffffffffffffffffffffffffffffff163b5f0361024d576040517f4c9c8ce300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60605f808473ffffffffffffffffffffffffffffffffffffffff16846040516102dc91906105ad565b5f60405180830381855af49150503d805f8114610314576040519150601f19603f3d011682016040523d82523d5f602084013e610319565b606091505b509150915061032985838361036a565b95945050505050565b34156100cf576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608261037f5761037a826103fc565b6103f5565b81511580156103a3575073ffffffffffffffffffffffffffffffffffffffff84163b155b156103f2576040517f9996b31500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85166004820152602401610244565b50805b9392505050565b80511561040c5780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f808585111561044c575f80fd5b83861115610458575f80fd5b5050820193919092039150565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f80604083850312156104a3575f80fd5b823573ffffffffffffffffffffffffffffffffffffffff811681146104c6575f80fd5b9150602083013567ffffffffffffffff8111156104e1575f80fd5b8301601f810185136104f1575f80fd5b803567ffffffffffffffff81111561050b5761050b610465565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561057757610577610465565b60405281815282820160200187101561058e575f80fd5b816020840160208301375f602083830101528093505050509250929050565b5f82515f5b818110156105cc57602081860181015185830152016105b2565b505f92019182525091905056fea2646970667358221220e70393c35b3e95d53f92887d1108e4b563be364c093a130a7bb2e621a0aa9b8f64736f6c634300081a0033"), + PROXY_RUNTIME: common.FromHex("0x608060405261000c61000e565b005b7f000000000000000000000000f2000000000000000000000000000000000000BC73ffffffffffffffffffffffffffffffffffffffff1633036100d1575f357fffffffff00000000000000000000000000000000000000000000000000000000167f4f1ef28600000000000000000000000000000000000000000000000000000000146100c7576040517fd2b576ec00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6100cf6100d9565b565b6100cf610107565b5f806100e8366004818461043e565b8101906100f59190610492565b915091506101038282610117565b5050565b6100cf61011261017e565b6101c2565b610120826101e0565b60405173ffffffffffffffffffffffffffffffffffffffff8316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a28051156101765761017182826102b3565b505050565b610103610332565b5f6101bd7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b365f80375f80365f845af43d5f803e8080156101dc573d5ff35b3d5ffd5b8073ffffffffffffffffffffffffffffffffffffffff163b5f0361024d576040517f4c9c8ce300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60605f808473ffffffffffffffffffffffffffffffffffffffff16846040516102dc91906105ad565b5f60405180830381855af49150503d805f8114610314576040519150601f19603f3d011682016040523d82523d5f602084013e610319565b606091505b509150915061032985838361036a565b95945050505050565b34156100cf576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608261037f5761037a826103fc565b6103f5565b81511580156103a3575073ffffffffffffffffffffffffffffffffffffffff84163b155b156103f2576040517f9996b31500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85166004820152602401610244565b50805b9392505050565b80511561040c5780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f808585111561044c575f80fd5b83861115610458575f80fd5b5050820193919092039150565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f80604083850312156104a3575f80fd5b823573ffffffffffffffffffffffffffffffffffffffff811681146104c6575f80fd5b9150602083013567ffffffffffffffff8111156104e1575f80fd5b8301601f810185136104f1575f80fd5b803567ffffffffffffffff81111561050b5761050b610465565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561057757610577610465565b60405281815282820160200187101561058e575f80fd5b816020840160208301375f602083830101528093505050509250929050565b5f82515f5b818110156105cc57602081860181015185830152016105b2565b505f92019182525091905056fea2646970667358221220e70393c35b3e95d53f92887d1108e4b563be364c093a130a7bb2e621a0aa9b8f64736f6c634300081a0033"), ADMIN_RUNTIME: ProxyAdminRuntimeBytecode, }, - "RESERVED_0": { + "UNIVERSAL_GATEWAY_PC": { IMPL_RUNTIME: ReservedImplRuntimeBytecode, - PROXY_RUNTIME: common.FromHex("0x608060405261000c61000e565b005b7f00000000000000000000000000000000000000000000000000000000000000b073ffffffffffffffffffffffffffffffffffffffff1633036100d1575f357fffffffff00000000000000000000000000000000000000000000000000000000167f4f1ef28600000000000000000000000000000000000000000000000000000000146100c7576040517fd2b576ec00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6100cf6100d9565b565b6100cf610107565b5f806100e8366004818461043e565b8101906100f59190610492565b915091506101038282610117565b5050565b6100cf61011261017e565b6101c2565b610120826101e0565b60405173ffffffffffffffffffffffffffffffffffffffff8316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a28051156101765761017182826102b3565b505050565b610103610332565b5f6101bd7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b365f80375f80365f845af43d5f803e8080156101dc573d5ff35b3d5ffd5b8073ffffffffffffffffffffffffffffffffffffffff163b5f0361024d576040517f4c9c8ce300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60605f808473ffffffffffffffffffffffffffffffffffffffff16846040516102dc91906105ad565b5f60405180830381855af49150503d805f8114610314576040519150601f19603f3d011682016040523d82523d5f602084013e610319565b606091505b509150915061032985838361036a565b95945050505050565b34156100cf576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608261037f5761037a826103fc565b6103f5565b81511580156103a3575073ffffffffffffffffffffffffffffffffffffffff84163b155b156103f2576040517f9996b31500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85166004820152602401610244565b50805b9392505050565b80511561040c5780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f808585111561044c575f80fd5b83861115610458575f80fd5b5050820193919092039150565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f80604083850312156104a3575f80fd5b823573ffffffffffffffffffffffffffffffffffffffff811681146104c6575f80fd5b9150602083013567ffffffffffffffff8111156104e1575f80fd5b8301601f810185136104f1575f80fd5b803567ffffffffffffffff81111561050b5761050b610465565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561057757610577610465565b60405281815282820160200187101561058e575f80fd5b816020840160208301375f602083830101528093505050509250929050565b5f82515f5b818110156105cc57602081860181015185830152016105b2565b505f92019182525091905056fea2646970667358221220e70393c35b3e95d53f92887d1108e4b563be364c093a130a7bb2e621a0aa9b8f64736f6c634300081a0033"), + PROXY_RUNTIME: common.FromHex("0x608060405261000c61000e565b005b7f000000000000000000000000f2000000000000000000000000000000000000b073ffffffffffffffffffffffffffffffffffffffff1633036100d1575f357fffffffff00000000000000000000000000000000000000000000000000000000167f4f1ef28600000000000000000000000000000000000000000000000000000000146100c7576040517fd2b576ec00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6100cf6100d9565b565b6100cf610107565b5f806100e8366004818461043e565b8101906100f59190610492565b915091506101038282610117565b5050565b6100cf61011261017e565b6101c2565b610120826101e0565b60405173ffffffffffffffffffffffffffffffffffffffff8316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a28051156101765761017182826102b3565b505050565b610103610332565b5f6101bd7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b365f80375f80365f845af43d5f803e8080156101dc573d5ff35b3d5ffd5b8073ffffffffffffffffffffffffffffffffffffffff163b5f0361024d576040517f4c9c8ce300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60605f808473ffffffffffffffffffffffffffffffffffffffff16846040516102dc91906105ad565b5f60405180830381855af49150503d805f8114610314576040519150601f19603f3d011682016040523d82523d5f602084013e610319565b606091505b509150915061032985838361036a565b95945050505050565b34156100cf576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608261037f5761037a826103fc565b6103f5565b81511580156103a3575073ffffffffffffffffffffffffffffffffffffffff84163b155b156103f2576040517f9996b31500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85166004820152602401610244565b50805b9392505050565b80511561040c5780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f808585111561044c575f80fd5b83861115610458575f80fd5b5050820193919092039150565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f80604083850312156104a3575f80fd5b823573ffffffffffffffffffffffffffffffffffffffff811681146104c6575f80fd5b9150602083013567ffffffffffffffff8111156104e1575f80fd5b8301601f810185136104f1575f80fd5b803567ffffffffffffffff81111561050b5761050b610465565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561057757610577610465565b60405281815282820160200187101561058e575f80fd5b816020840160208301375f602083830101528093505050509250929050565b5f82515f5b818110156105cc57602081860181015185830152016105b2565b505f92019182525091905056fea2646970667358221220e70393c35b3e95d53f92887d1108e4b563be364c093a130a7bb2e621a0aa9b8f64736f6c634300081a0033"), ADMIN_RUNTIME: ProxyAdminRuntimeBytecode, }, "RESERVED_1": { IMPL_RUNTIME: ReservedImplRuntimeBytecode, - PROXY_RUNTIME: common.FromHex("0x608060405261000c61000e565b005b7f00000000000000000000000000000000000000000000000000000000000000b173ffffffffffffffffffffffffffffffffffffffff1633036100d1575f357fffffffff00000000000000000000000000000000000000000000000000000000167f4f1ef28600000000000000000000000000000000000000000000000000000000146100c7576040517fd2b576ec00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6100cf6100d9565b565b6100cf610107565b5f806100e8366004818461043e565b8101906100f59190610492565b915091506101038282610117565b5050565b6100cf61011261017e565b6101c2565b610120826101e0565b60405173ffffffffffffffffffffffffffffffffffffffff8316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a28051156101765761017182826102b3565b505050565b610103610332565b5f6101bd7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b365f80375f80365f845af43d5f803e8080156101dc573d5ff35b3d5ffd5b8073ffffffffffffffffffffffffffffffffffffffff163b5f0361024d576040517f4c9c8ce300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60605f808473ffffffffffffffffffffffffffffffffffffffff16846040516102dc91906105ad565b5f60405180830381855af49150503d805f8114610314576040519150601f19603f3d011682016040523d82523d5f602084013e610319565b606091505b509150915061032985838361036a565b95945050505050565b34156100cf576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608261037f5761037a826103fc565b6103f5565b81511580156103a3575073ffffffffffffffffffffffffffffffffffffffff84163b155b156103f2576040517f9996b31500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85166004820152602401610244565b50805b9392505050565b80511561040c5780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f808585111561044c575f80fd5b83861115610458575f80fd5b5050820193919092039150565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f80604083850312156104a3575f80fd5b823573ffffffffffffffffffffffffffffffffffffffff811681146104c6575f80fd5b9150602083013567ffffffffffffffff8111156104e1575f80fd5b8301601f810185136104f1575f80fd5b803567ffffffffffffffff81111561050b5761050b610465565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561057757610577610465565b60405281815282820160200187101561058e575f80fd5b816020840160208301375f602083830101528093505050509250929050565b5f82515f5b818110156105cc57602081860181015185830152016105b2565b505f92019182525091905056fea2646970667358221220e70393c35b3e95d53f92887d1108e4b563be364c093a130a7bb2e621a0aa9b8f64736f6c634300081a0033"), + PROXY_RUNTIME: common.FromHex("0x608060405261000c61000e565b005b7f000000000000000000000000F2000000000000000000000000000000000000b173ffffffffffffffffffffffffffffffffffffffff1633036100d1575f357fffffffff00000000000000000000000000000000000000000000000000000000167f4f1ef28600000000000000000000000000000000000000000000000000000000146100c7576040517fd2b576ec00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6100cf6100d9565b565b6100cf610107565b5f806100e8366004818461043e565b8101906100f59190610492565b915091506101038282610117565b5050565b6100cf61011261017e565b6101c2565b610120826101e0565b60405173ffffffffffffffffffffffffffffffffffffffff8316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a28051156101765761017182826102b3565b505050565b610103610332565b5f6101bd7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b365f80375f80365f845af43d5f803e8080156101dc573d5ff35b3d5ffd5b8073ffffffffffffffffffffffffffffffffffffffff163b5f0361024d576040517f4c9c8ce300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60605f808473ffffffffffffffffffffffffffffffffffffffff16846040516102dc91906105ad565b5f60405180830381855af49150503d805f8114610314576040519150601f19603f3d011682016040523d82523d5f602084013e610319565b606091505b509150915061032985838361036a565b95945050505050565b34156100cf576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608261037f5761037a826103fc565b6103f5565b81511580156103a3575073ffffffffffffffffffffffffffffffffffffffff84163b155b156103f2576040517f9996b31500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85166004820152602401610244565b50805b9392505050565b80511561040c5780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f808585111561044c575f80fd5b83861115610458575f80fd5b5050820193919092039150565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f80604083850312156104a3575f80fd5b823573ffffffffffffffffffffffffffffffffffffffff811681146104c6575f80fd5b9150602083013567ffffffffffffffff8111156104e1575f80fd5b8301601f810185136104f1575f80fd5b803567ffffffffffffffff81111561050b5761050b610465565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561057757610577610465565b60405281815282820160200187101561058e575f80fd5b816020840160208301375f602083830101528093505050509250929050565b5f82515f5b818110156105cc57602081860181015185830152016105b2565b505f92019182525091905056fea2646970667358221220e70393c35b3e95d53f92887d1108e4b563be364c093a130a7bb2e621a0aa9b8f64736f6c634300081a0033"), ADMIN_RUNTIME: ProxyAdminRuntimeBytecode, }, "RESERVED_2": { IMPL_RUNTIME: ReservedImplRuntimeBytecode, - PROXY_RUNTIME: common.FromHex("0x608060405261000c61000e565b005b7f00000000000000000000000000000000000000000000000000000000000000b273ffffffffffffffffffffffffffffffffffffffff1633036100d1575f357fffffffff00000000000000000000000000000000000000000000000000000000167f4f1ef28600000000000000000000000000000000000000000000000000000000146100c7576040517fd2b576ec00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6100cf6100d9565b565b6100cf610107565b5f806100e8366004818461043e565b8101906100f59190610492565b915091506101038282610117565b5050565b6100cf61011261017e565b6101c2565b610120826101e0565b60405173ffffffffffffffffffffffffffffffffffffffff8316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a28051156101765761017182826102b3565b505050565b610103610332565b5f6101bd7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b365f80375f80365f845af43d5f803e8080156101dc573d5ff35b3d5ffd5b8073ffffffffffffffffffffffffffffffffffffffff163b5f0361024d576040517f4c9c8ce300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60605f808473ffffffffffffffffffffffffffffffffffffffff16846040516102dc91906105ad565b5f60405180830381855af49150503d805f8114610314576040519150601f19603f3d011682016040523d82523d5f602084013e610319565b606091505b509150915061032985838361036a565b95945050505050565b34156100cf576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608261037f5761037a826103fc565b6103f5565b81511580156103a3575073ffffffffffffffffffffffffffffffffffffffff84163b155b156103f2576040517f9996b31500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85166004820152602401610244565b50805b9392505050565b80511561040c5780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f808585111561044c575f80fd5b83861115610458575f80fd5b5050820193919092039150565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f80604083850312156104a3575f80fd5b823573ffffffffffffffffffffffffffffffffffffffff811681146104c6575f80fd5b9150602083013567ffffffffffffffff8111156104e1575f80fd5b8301601f810185136104f1575f80fd5b803567ffffffffffffffff81111561050b5761050b610465565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561057757610577610465565b60405281815282820160200187101561058e575f80fd5b816020840160208301375f602083830101528093505050509250929050565b5f82515f5b818110156105cc57602081860181015185830152016105b2565b505f92019182525091905056fea2646970667358221220e70393c35b3e95d53f92887d1108e4b563be364c093a130a7bb2e621a0aa9b8f64736f6c634300081a0033"), + PROXY_RUNTIME: common.FromHex("0x608060405261000c61000e565b005b7f000000000000000000000000f2000000000000000000000000000000000000b273ffffffffffffffffffffffffffffffffffffffff1633036100d1575f357fffffffff00000000000000000000000000000000000000000000000000000000167f4f1ef28600000000000000000000000000000000000000000000000000000000146100c7576040517fd2b576ec00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6100cf6100d9565b565b6100cf610107565b5f806100e8366004818461043e565b8101906100f59190610492565b915091506101038282610117565b5050565b6100cf61011261017e565b6101c2565b610120826101e0565b60405173ffffffffffffffffffffffffffffffffffffffff8316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a28051156101765761017182826102b3565b505050565b610103610332565b5f6101bd7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b365f80375f80365f845af43d5f803e8080156101dc573d5ff35b3d5ffd5b8073ffffffffffffffffffffffffffffffffffffffff163b5f0361024d576040517f4c9c8ce300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60605f808473ffffffffffffffffffffffffffffffffffffffff16846040516102dc91906105ad565b5f60405180830381855af49150503d805f8114610314576040519150601f19603f3d011682016040523d82523d5f602084013e610319565b606091505b509150915061032985838361036a565b95945050505050565b34156100cf576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608261037f5761037a826103fc565b6103f5565b81511580156103a3575073ffffffffffffffffffffffffffffffffffffffff84163b155b156103f2576040517f9996b31500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85166004820152602401610244565b50805b9392505050565b80511561040c5780518082602001fd5b6040517fd6bda27500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f808585111561044c575f80fd5b83861115610458575f80fd5b5050820193919092039150565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f80604083850312156104a3575f80fd5b823573ffffffffffffffffffffffffffffffffffffffff811681146104c6575f80fd5b9150602083013567ffffffffffffffff8111156104e1575f80fd5b8301601f810185136104f1575f80fd5b803567ffffffffffffffff81111561050b5761050b610465565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561057757610577610465565b60405281815282820160200187101561058e575f80fd5b816020840160208301375f602083830101528093505050509250929050565b5f82515f5b818110156105cc57602081860181015185830152016105b2565b505f92019182525091905056fea2646970667358221220e70393c35b3e95d53f92887d1108e4b563be364c093a130a7bb2e621a0aa9b8f64736f6c634300081a0033"), ADMIN_RUNTIME: ProxyAdminRuntimeBytecode, }, }