diff --git a/go/mysql/binlog_dump.go b/go/mysql/binlog_dump.go index cfacf742856..323ff5e6c8f 100644 --- a/go/mysql/binlog_dump.go +++ b/go/mysql/binlog_dump.go @@ -18,7 +18,6 @@ package mysql import ( "encoding/binary" - "io" "vitess.io/vitess/go/mysql/replication" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -52,7 +51,7 @@ func (c *Conn) parseComBinlogDump(data []byte) (logFile string, binlogPos uint32 return logFile, binlogPos, nil } -func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint64, position replication.Position, err error) { +func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint64, position replication.Position, nonBlock bool, err error) { // see https://dev.mysql.com/doc/internals/en/com-binlog-dump-gtid.html pos := 1 @@ -62,31 +61,36 @@ func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint6 fileNameLen, pos, ok := readUint32(data, pos) if !ok { - return logFile, logPos, position, readPacketErr + return logFile, logPos, position, nonBlock, readPacketErr + } + if pos+int(fileNameLen) > len(data) { + return logFile, logPos, position, nonBlock, readPacketErr } logFile = string(data[pos : pos+int(fileNameLen)]) pos += int(fileNameLen) logPos, pos, ok = readUint64(data, pos) if !ok { - return logFile, logPos, position, readPacketErr + return logFile, logPos, position, nonBlock, readPacketErr } dataSize, pos, ok := readUint32(data, pos) if !ok { - return logFile, logPos, position, readPacketErr + return logFile, logPos, position, nonBlock, readPacketErr + } + if pos+int(dataSize) > len(data) { + return logFile, logPos, position, nonBlock, readPacketErr } if gtidBytes := data[pos : pos+int(dataSize)]; len(gtidBytes) != 0 { gtid, err := replication.NewMysql56GTIDSetFromSIDBlock(gtidBytes) if err != nil { - return logFile, logPos, position, vterrors.Wrapf(err, "error parsing GTID from BinlogDumpGTID packet") + return logFile, logPos, position, nonBlock, vterrors.Wrapf(err, "error parsing GTID from BinlogDumpGTID packet") } // ComBinlogDumpGTID is a MySQL specific protocol. The GTID flavor is necessarily MySQL 56 position = replication.Position{GTIDSet: gtid} } - if flags2&BinlogDumpNonBlock != 0 { - return logFile, logPos, position, io.EOF - } - return logFile, logPos, position, nil + nonBlock = flags2&BinlogDumpNonBlock != 0 + + return logFile, logPos, position, nonBlock, nil } diff --git a/go/mysql/conn.go b/go/mysql/conn.go index 95016bf21d7..2b0877a65e1 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -347,7 +347,9 @@ func (c *Conn) endWriterBuffering() error { c.bufferedWriter = nil }() - c.flushTimer.Stop() + if c.flushTimer != nil { + c.flushTimer.Stop() + } return c.bufferedWriter.Flush() } @@ -596,6 +598,21 @@ func (c *Conn) ReadPacket() ([]byte, error) { return result, err } +func (c *Conn) WritePacket(data []byte) error { + return c.writePacket(data) +} + +// ReadOnePacket reads a single packet from the underlying connection without +// reassembling multi-packet messages. This is useful for streaming raw packets. +// Returns nil, nil for a zero-length packet (which follows a max-size packet). +func (c *Conn) ReadOnePacket() ([]byte, error) { + result, err := c.readOnePacket() + if err != nil { + return nil, sqlerror.NewSQLErrorf(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) + } + return result, err +} + // writePacket writes a packet, possibly cutting it into multiple // chunks. Note this is not very efficient, as the client probably // has to build the []byte and that makes a memory copy. @@ -879,6 +896,12 @@ func (c *Conn) writeErrorPacketFromError(err error) error { return c.writeErrorPacket(sqlerror.ERUnknownError, sqlerror.SSUnknownSQLState, "unknown error: %v", err) } +// WriteErrorPacketFromError is the exported version of writeErrorPacketFromError +// for use by external packages (e.g., vtgate's binlog dump handler). +func (c *Conn) WriteErrorPacketFromError(err error) error { + return c.writeErrorPacketFromError(err) +} + // writeEOFPacket writes an EOF packet, through the buffer, and // doesn't flush (as it is used as part of a query result). func (c *Conn) writeEOFPacket(flags uint16, warnings uint16) error { @@ -977,11 +1000,11 @@ func (c *Conn) handleComRegisterReplica(handler Handler, data []byte) (kontinue return false } if err := handler.ComRegisterReplica(c, replicaHost, replicaPort, replicaUser, replicaPassword); err != nil { - c.writeErrorPacketFromError(err) + c.WriteErrorPacketFromError(err) return false } if err := c.writeOKPacket(&PacketOK{}); err != nil { - c.writeErrorPacketFromError(err) + c.WriteErrorPacketFromError(err) } return true } @@ -1000,11 +1023,17 @@ func (c *Conn) handleComBinlogDump(handler Handler, data []byte) (kontinue bool) logfile, binlogPos, err := c.parseComBinlogDump(data) if err != nil { - log.Errorf("conn %v: parseComBinlogDumpGTID failed: %v", c.ID(), err) + log.Errorf("conn %v: parseComBinlogDump failed: %v", c.ID(), err) + if writeErr := c.WriteErrorPacketFromError(err); writeErr != nil { + log.Errorf("conn %v: failed to write error packet: %v", c.ID(), writeErr) + } return false } if err := handler.ComBinlogDump(c, logfile, binlogPos); err != nil { - log.Error(err.Error()) + log.Errorf("conn %v: ComBinlogDump failed: %v", c.ID(), err) + if writeErr := c.WriteErrorPacketFromError(err); writeErr != nil { + log.Errorf("conn %v: failed to write error packet: %v", c.ID(), writeErr) + } return false } return kontinue @@ -1022,13 +1051,19 @@ func (c *Conn) handleComBinlogDumpGTID(handler Handler, data []byte) (kontinue b } }() - logFile, logPos, position, err := c.parseComBinlogDumpGTID(data) + logFile, logPos, position, nonBlock, err := c.parseComBinlogDumpGTID(data) if err != nil { log.Errorf("conn %v: parseComBinlogDumpGTID failed: %v", c.ID(), err) + if writeErr := c.WriteErrorPacketFromError(err); writeErr != nil { + log.Errorf("conn %v: failed to write error packet: %v", c.ID(), writeErr) + } return false } - if err := handler.ComBinlogDumpGTID(c, logFile, logPos, position.GTIDSet); err != nil { - log.Error(err.Error()) + if err := handler.ComBinlogDumpGTID(c, logFile, logPos, position.GTIDSet, nonBlock); err != nil { + log.Errorf("conn %v: ComBinlogDumpGTID failed: %v", c.ID(), err) + if writeErr := c.WriteErrorPacketFromError(err); writeErr != nil { + log.Errorf("conn %v: failed to write error packet: %v", c.ID(), writeErr) + } return false } return kontinue @@ -1042,7 +1077,7 @@ func (c *Conn) handleComResetConnection(handler Handler) { c.PrepareData = make(map[uint32]*PrepareData) err := c.writeOKPacket(&PacketOK{}) if err != nil { - c.writeErrorPacketFromError(err) + c.WriteErrorPacketFromError(err) } } diff --git a/go/mysql/conn_test.go b/go/mysql/conn_test.go index aab26763fcd..38f6fb63809 100644 --- a/go/mysql/conn_test.go +++ b/go/mysql/conn_test.go @@ -1036,6 +1036,30 @@ func TestConnectionErrorWhileWritingComStmtExecute(t *testing.T) { require.False(t, res, "we should beak the connection in case of error writing error packet") } +func TestParseComBinlogDumpGTID(t *testing.T) { + sConn := newConn(testConn{}, DefaultFlushDelay, 0) + + // Test packet structure (COM_BINLOG_DUMP_GTID): + // - 1 byte: command (0x1e) + // - 2 bytes: flags (0x0001 = NON_BLOCK) + // - 4 bytes: server_id (0) + // - 4 bytes: filename_len (24) + // - 24 bytes: filename ("vt_0000000100-bin.000001") + // - 8 bytes: log_pos (4) + // - 4 bytes: gtid_data_len (48) + // - 48 bytes: SID block for GTID "24bcf1e2-01e0-11ee-8c9c-0242ac120002:1-8" + input, err := hex.DecodeString("1e0100000000001800000076745f303030303030303130302d62696e2e303030303031040000000000000030000000010000000000000024bcf1e201e011ee8c9c0242ac120002010000000000000001000000000000000900000000000000") + require.NoError(t, err) + + logFile, logPos, position, nonBlock, err := sConn.parseComBinlogDumpGTID(input) + require.NoError(t, err) + + require.Equal(t, "vt_0000000100-bin.000001", logFile) + require.Equal(t, uint64(4), logPos) + require.True(t, nonBlock) + require.Equal(t, "24bcf1e2-01e0-11ee-8c9c-0242ac120002:1-8", position.String()) +} + var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") func randSeq(n int) string { @@ -1150,7 +1174,7 @@ func (t testRun) ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error panic("implement me") } -func (t testRun) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { +func (t testRun) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet, nonBlock bool) error { panic("implement me") } diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go index cff28d710d7..abf43baafd5 100644 --- a/go/mysql/fakesqldb/server.go +++ b/go/mysql/fakesqldb/server.go @@ -564,7 +564,7 @@ func (db *DB) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos uint32) err } // ComBinlogDumpGTID is part of the mysql.Handler interface. -func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { +func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet, nonBlock bool) error { return nil } diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go index af81c4dd049..3a705ab062d 100644 --- a/go/mysql/flavor.go +++ b/go/mysql/flavor.go @@ -103,9 +103,16 @@ type flavor interface { // startSQLThreadCommand returns the command to start the replica's SQL thread only. startSQLThreadCommand() string - // sendBinlogDumpCommand sends the packet required to start - // dumping binlogs from the specified location. - sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error + // sendBinlogDumpCommand sends the COM_BINLOG_DUMP packet to start + // dumping binlogs from the specified file and position. + // This is the original file/position-based protocol. + sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, binlogPos uint32) error + + // sendBinlogDumpGTIDCommand sends the COM_BINLOG_DUMP_GTID packet to start + // dumping binlogs from the specified GTID position. + // If nonBlock is true, the server will return EOF when it reaches the end + // of the binlog instead of blocking and waiting for new events. + sendBinlogDumpGTIDCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position, nonBlock bool) error // readBinlogEvent reads the next BinlogEvent from the connection. readBinlogEvent(c *Conn) (BinlogEvent, error) @@ -348,15 +355,24 @@ func (c *Conn) StartSQLThreadCommand() string { return c.flavor.startSQLThreadCommand() } -// SendBinlogDumpCommand sends the flavor-specific version of -// the COM_BINLOG_DUMP command to start dumping raw binlog +// SendBinlogDumpCommand sends the COM_BINLOG_DUMP command to start +// dumping raw binlog events over a server connection, starting at +// a given file and position. This is the original file/position-based protocol. +func (c *Conn) SendBinlogDumpCommand(serverID uint32, binlogFilename string, binlogPos uint32) error { + return c.flavor.sendBinlogDumpCommand(c, serverID, binlogFilename, binlogPos) +} + +// SendBinlogDumpGTIDCommand sends the flavor-specific version of +// the COM_BINLOG_DUMP_GTID command to start dumping raw binlog // events over a server connection, starting at a given GTID. -func (c *Conn) SendBinlogDumpCommand(serverID uint32, binlogFilename string, startPos replication.Position) error { - return c.flavor.sendBinlogDumpCommand(c, serverID, binlogFilename, startPos) +// If nonBlock is true, the server will return EOF when it reaches the end +// of the binlog instead of blocking and waiting for new events. +func (c *Conn) SendBinlogDumpGTIDCommand(serverID uint32, binlogFilename string, startPos replication.Position, nonBlock bool) error { + return c.flavor.sendBinlogDumpGTIDCommand(c, serverID, binlogFilename, startPos, nonBlock) } // ReadBinlogEvent reads the next BinlogEvent. This must be used -// in conjunction with SendBinlogDumpCommand. +// in conjunction with SendBinlogDumpCommand or SendBinlogDumpGTIDCommand. func (c *Conn) ReadBinlogEvent() (BinlogEvent, error) { return c.flavor.readBinlogEvent(c) } diff --git a/go/mysql/flavor_filepos.go b/go/mysql/flavor_filepos.go index 7f857ffbc39..2e8cf7cde68 100644 --- a/go/mysql/flavor_filepos.go +++ b/go/mysql/flavor_filepos.go @@ -124,7 +124,14 @@ func (flv *filePosFlavor) startSQLThreadCommand() string { } // sendBinlogDumpCommand is part of the Flavor interface. -func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { +func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, binlogPos uint32) error { + flv.file = binlogFilename + return c.WriteComBinlogDump(serverID, binlogFilename, uint64(binlogPos), 0) +} + +// sendBinlogDumpGTIDCommand is part of the Flavor interface. +// Note: nonBlock is not supported for file position based replication as it uses COM_BINLOG_DUMP. +func (flv *filePosFlavor) sendBinlogDumpGTIDCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position, nonBlock bool) error { rpos, ok := startPos.GTIDSet.(replication.FilePosGTID) if !ok { return fmt.Errorf("startPos.GTIDSet is wrong type - expected filePosGTID, got: %#v", startPos.GTIDSet) diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go index bafb24a0c77..7148e25a581 100644 --- a/go/mysql/flavor_mariadb.go +++ b/go/mysql/flavor_mariadb.go @@ -117,7 +117,13 @@ func (mariadbFlavor) startSQLThreadCommand() string { } // sendBinlogDumpCommand is part of the Flavor interface. -func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { +func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, binlogPos uint32) error { + return c.WriteComBinlogDump(serverID, binlogFilename, uint64(binlogPos), 0) +} + +// sendBinlogDumpGTIDCommand is part of the Flavor interface. +// Note: nonBlock is not supported for MariaDB as it uses a different protocol (COM_BINLOG_DUMP). +func (mariadbFlavor) sendBinlogDumpGTIDCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position, nonBlock bool) error { // Tell the server that we understand GTIDs by setting // mariadb_slave_capability to MARIA_SLAVE_CAPABILITY_GTID = 4 (MariaDB >= 10.0.1). if _, err := c.ExecuteFetch("SET @mariadb_slave_capability=4", 0, false); err != nil { diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go index f4a53e920ad..39220303da3 100644 --- a/go/mysql/flavor_mysql.go +++ b/go/mysql/flavor_mysql.go @@ -218,7 +218,12 @@ func (mysqlFlavor) resetReplicationParametersCommands(c *Conn) []string { } // sendBinlogDumpCommand is part of the Flavor interface. -func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { +func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, binlogPos uint32) error { + return c.WriteComBinlogDump(serverID, binlogFilename, uint64(binlogPos), 0) +} + +// sendBinlogDumpGTIDCommand is part of the Flavor interface. +func (mysqlFlavor) sendBinlogDumpGTIDCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position, nonBlock bool) error { gtidSet, ok := startPos.GTIDSet.(replication.Mysql56GTIDSet) if !ok { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "startPos.GTIDSet is wrong type - expected Mysql56GTIDSet, got: %#v", startPos.GTIDSet) @@ -230,6 +235,9 @@ func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilenam sidBlock = gtidSet.SIDBlock() } var flags2 uint16 + if nonBlock { + flags2 |= BinlogDumpNonBlock + } if binlogFilename != "" { flags2 |= BinlogThroughPosition } diff --git a/go/mysql/replication.go b/go/mysql/replication.go index b7a0b82a868..ed4b9b91e8a 100644 --- a/go/mysql/replication.go +++ b/go/mysql/replication.go @@ -18,10 +18,11 @@ package mysql import ( "fmt" + "io" "math" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/proto/vtrpc" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -39,7 +40,7 @@ func (c *Conn) WriteComBinlogDump(serverID uint32, binlogFilename string, binlog // The binary log file position is a uint64, but the protocol command // only uses 4 bytes for the file position. if binlogPos > math.MaxUint32 { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "binlog position %d is too large, it must fit into 32 bits", binlogPos) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "binlog position %d is too large, it must fit into 32 bits", binlogPos) } c.sequence = 0 length := 1 + // ComBinlogDump @@ -70,10 +71,10 @@ func (c *Conn) AnalyzeSemiSyncAckRequest(buf []byte) (strippedBuf []byte, ackReq // semi sync indicator is expected // see https://dev.mysql.com/doc/internals/en/semi-sync-binlog-event.html if len(buf) < 2 { - return buf, false, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "semi sync indicator expected, but packet too small") + return buf, false, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "semi sync indicator expected, but packet too small") } if buf[0] != semiSyncIndicator { - return buf, false, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "semi sync indicator expected, but not found") + return buf, false, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "semi sync indicator expected, but not found") } return buf[2:], buf[1] == semiSyncAckRequested, nil } @@ -146,6 +147,64 @@ func (c *Conn) WriteBinlogEvent(ev BinlogEvent, semiSyncEnabled bool) error { return nil } +// WritePacketPayload writes a raw packet payload as a MySQL packet. +// The payload should include the packet type byte (e.g., 0x00 for OK/data) +// as the first byte. This method handles the packet header (length + sequence). +func (c *Conn) WritePacketPayload(payload []byte) error { + data, pos := c.startEphemeralPacketWithHeader(len(payload)) + copy(data[pos:], payload) + if err := c.writeEphemeralPacket(); err != nil { + return sqlerror.NewSQLErrorf(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) + } + return nil +} + +// WritePacketDirect writes a single packet payload with the correct header. +// Used for streaming pre-framed packets (like binlog events) directly. +// The caller is responsible for sending zero-length terminators when needed. +func (c *Conn) WritePacketDirect(payload []byte) error { + length := len(payload) + + var w io.Writer + c.bufMu.Lock() + if c.bufferedWriter != nil { + w = c.bufferedWriter + defer func() { + c.startFlushTimer() + c.bufMu.Unlock() + }() + } else { + c.bufMu.Unlock() + w = c.conn + } + + // Build header: 3 bytes length + 1 byte sequence + var header [4]byte + header[0] = byte(length) + header[1] = byte(length >> 8) + header[2] = byte(length >> 16) + header[3] = c.sequence + + // Write header + if n, err := w.Write(header[:]); err != nil { + return vterrors.Wrapf(err, "Write(header) failed") + } else if n != 4 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Write(header) short write: %v < 4", n) + } + + // Write payload + if length > 0 { + if n, err := w.Write(payload); err != nil { + return vterrors.Wrapf(err, "Write(payload) failed") + } else if n != length { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Write(payload) short write: %v < %v", n, length) + } + } + + c.sequence++ + return nil +} + type SemiSyncType int8 const ( diff --git a/go/mysql/replication_test.go b/go/mysql/replication_test.go index cf117da6f48..1106f95e66e 100644 --- a/go/mysql/replication_test.go +++ b/go/mysql/replication_test.go @@ -103,16 +103,18 @@ func TestComBinlogDumpGTID(t *testing.T) { 0x0e, 0x0d, // flags 0x04, 0x03, 0x02, 0x01, // server-id 0x07, 0x00, 0x00, 0x00, // binlog-filename-len - 'm', 'o', 'o', 'f', 'a', 'r', 'm', // bilog-filename + 'm', 'o', 'o', 'f', 'a', 'r', 'm', // binlog-filename 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, // binlog-pos 0x00, 0x00, 0x00, 0x00, // data-size is zero, no GTID payload } assert.Equal(t, expectedData, data) - logFile, logPos, pos, err := sConn.parseComBinlogDumpGTID(data) + logFile, logPos, pos, nonBlock, err := sConn.parseComBinlogDumpGTID(data) require.NoError(t, err, "parseComBinlogDumpGTID failed: %v", err) assert.Equal(t, "moofarm", logFile) assert.Equal(t, uint64(0x05060708090a0b0c), logPos) assert.True(t, pos.IsZero()) + // flags 0x0d0e does not have BinlogDumpNonBlock (0x01) set + assert.False(t, nonBlock) }) sConn.sequence = 0 @@ -138,17 +140,19 @@ func TestComBinlogDumpGTID(t *testing.T) { 0x0e, 0x0d, // flags 0x04, 0x03, 0x02, 0x01, // server-id 0x07, 0x00, 0x00, 0x00, // binlog-filename-len - 'm', 'o', 'o', 'f', 'a', 'r', 'm', // bilog-filename + 'm', 'o', 'o', 'f', 'a', 'r', 'm', // binlog-filename 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, // binlog-pos 0x30, 0x00, 0x00, 0x00, // data-size } expectedData = append(expectedData, sidBlock...) // data assert.Equal(t, expectedData, data) - logFile, logPos, pos, err := sConn.parseComBinlogDumpGTID(data) + logFile, logPos, pos, nonBlock, err := sConn.parseComBinlogDumpGTID(data) require.NoError(t, err, "parseComBinlogDumpGTID failed: %v", err) assert.Equal(t, "moofarm", logFile) assert.Equal(t, uint64(0x05060708090a0b0c), logPos) assert.Equal(t, gtidSet, pos.GTIDSet) + // flags 0x0d0e does not have BinlogDumpNonBlock (0x01) set + assert.False(t, nonBlock) }) sConn.sequence = 0 @@ -212,6 +216,236 @@ func TestComBinlogDumpGTID(t *testing.T) { }) } +func TestWritePacketPayload(t *testing.T) { + _ = utils.LeakCheckContext(t) + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + t.Run("Write raw packet payload", func(t *testing.T) { + // Simulate a packet payload with OK prefix (0x00) followed by binlog event data + payload := []byte{ + 0x00, // OK prefix + // Binlog event header (19 bytes for MySQL 5.6+) + 0x00, 0x00, 0x00, 0x00, // timestamp + 0x04, // event type (ROTATE_EVENT) + 0x01, 0x00, 0x00, 0x00, // server_id + 0x2f, 0x00, 0x00, 0x00, // event_length + 0x04, 0x00, 0x00, 0x00, // next_position + 0x00, 0x00, // flags + // Event data + 0xc8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // position + 'm', 'y', 's', 'q', 'l', '-', 'b', 'i', 'n', '.', '0', '0', '0', '1', '2', '3', // filename + // Checksum + 0xfd, 0x1c, 0x1d, 0x80, + } + + err := cConn.WritePacketPayload(payload) + require.NoError(t, err) + + data, err := sConn.ReadPacket() + require.NoError(t, err) + + // The received data should be exactly the payload + assert.Equal(t, payload, data) + }) + + sConn.sequence = 0 + cConn.sequence = 0 + + t.Run("Write empty payload", func(t *testing.T) { + err := cConn.WritePacketPayload([]byte{}) + require.NoError(t, err) + + data, err := sConn.ReadPacket() + require.NoError(t, err) + + // Should be empty (nil or empty slice) + assert.Empty(t, data) + }) + + sConn.sequence = 0 + cConn.sequence = 0 + + t.Run("Write multiple payloads", func(t *testing.T) { + payloads := [][]byte{ + {0x00, 0x01, 0x02, 0x03}, // OK packet with data + {0x00, 0x04, 0x05, 0x06, 0x07}, // OK packet with data + {0x00, 0x08}, // OK packet with data + } + + for _, payload := range payloads { + err := cConn.WritePacketPayload(payload) + require.NoError(t, err) + + data, err := sConn.ReadPacket() + require.NoError(t, err) + + assert.Equal(t, payload, data) + } + }) +} + +func TestWritePacketDirect(t *testing.T) { + _ = utils.LeakCheckContext(t) + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + t.Run("Write single packet", func(t *testing.T) { + payload := []byte{0x00, 0x01, 0x02, 0x03} + + err := cConn.WritePacketDirect(payload) + require.NoError(t, err) + + data, err := sConn.ReadPacket() + require.NoError(t, err) + assert.Equal(t, payload, data) + }) + + sConn.sequence = 0 + cConn.sequence = 0 + + t.Run("Write multiple packets sequentially", func(t *testing.T) { + packets := [][]byte{ + {0x00, 0x01, 0x02, 0x03}, // First packet + {0x04, 0x05, 0x06, 0x07, 0x08}, // Second packet + {0x09, 0x0a}, // Third packet + } + + for _, packet := range packets { + err := cConn.WritePacketDirect(packet) + require.NoError(t, err) + } + + // Read each packet and verify + for _, expected := range packets { + data, err := sConn.ReadPacket() + require.NoError(t, err) + assert.Equal(t, expected, data) + } + }) + + sConn.sequence = 0 + cConn.sequence = 0 + + t.Run("Write zero-length packet", func(t *testing.T) { + // Zero-length packets are valid terminators in MySQL protocol + err := cConn.WritePacketDirect([]byte{}) + require.NoError(t, err) + + // ReadPacket returns nil for zero-length packets + data, err := sConn.ReadPacket() + require.NoError(t, err) + assert.Empty(t, data) + }) +} + +func TestWritePacketDirectSequenceNumbers(t *testing.T) { + _ = utils.LeakCheckContext(t) + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + t.Run("Sequence numbers increment correctly", func(t *testing.T) { + // Verify sequence numbers start at 0 + assert.Equal(t, uint8(0), cConn.sequence, "Writer sequence should start at 0") + assert.Equal(t, uint8(0), sConn.sequence, "Reader sequence should start at 0") + + // Write first packet + err := cConn.WritePacketDirect([]byte{0x01}) + require.NoError(t, err) + assert.Equal(t, uint8(1), cConn.sequence, "Writer sequence should be 1 after first packet") + + // Write second packet + err = cConn.WritePacketDirect([]byte{0x02}) + require.NoError(t, err) + assert.Equal(t, uint8(2), cConn.sequence, "Writer sequence should be 2 after second packet") + + // Write third packet + err = cConn.WritePacketDirect([]byte{0x03}) + require.NoError(t, err) + assert.Equal(t, uint8(3), cConn.sequence, "Writer sequence should be 3 after third packet") + + // Read packets - reader validates sequence numbers internally + // If sequence numbers are wrong, ReadPacket will return an error + for i := 0; i < 3; i++ { + _, err := sConn.ReadPacket() + require.NoError(t, err, "ReadPacket should succeed with correct sequence number %d", i) + } + assert.Equal(t, uint8(3), sConn.sequence, "Reader sequence should be 3 after reading all packets") + }) + + sConn.sequence = 0 + cConn.sequence = 0 + + t.Run("Multi-packet sequence with zero-length terminator", func(t *testing.T) { + // Simulate a multi-packet binlog event: + // - First packet (max size would be 16MB, we use small for testing) + // - Second packet (continuation) + // - Zero-length terminator + packets := [][]byte{ + {0x00, 0x01, 0x02, 0x03}, // First fragment + {0x04, 0x05, 0x06, 0x07}, // Second fragment + {}, // Zero-length terminator + } + + // Write all packets + for i, packet := range packets { + err := cConn.WritePacketDirect(packet) + require.NoError(t, err) + assert.Equal(t, uint8(i+1), cConn.sequence, "Writer sequence should be %d after packet %d", i+1, i) + } + + // Read all packets - validates sequence numbers + for i, expected := range packets { + data, err := sConn.ReadPacket() + require.NoError(t, err, "ReadPacket %d should succeed", i) + if len(expected) == 0 { + // ReadPacket returns nil for zero-length packets + assert.Nil(t, data, "Zero-length packet should return nil") + } else { + assert.Equal(t, expected, data, "Packet %d content should match", i) + } + } + assert.Equal(t, uint8(3), sConn.sequence, "Reader sequence should be 3 after all packets") + }) + + sConn.sequence = 0 + cConn.sequence = 0 + + t.Run("Sequence wraps around at 256", func(t *testing.T) { + // Write 256 packets to test sequence wraparound + for i := 0; i < 256; i++ { + err := cConn.WritePacketDirect([]byte{byte(i)}) + require.NoError(t, err) + } + // After 256 packets, sequence should wrap to 0 + assert.Equal(t, uint8(0), cConn.sequence, "Writer sequence should wrap to 0 after 256 packets") + + // Write one more packet + err := cConn.WritePacketDirect([]byte{0xFF}) + require.NoError(t, err) + assert.Equal(t, uint8(1), cConn.sequence, "Writer sequence should be 1 after wraparound") + + // Read all 257 packets + for i := 0; i < 257; i++ { + _, err := sConn.ReadPacket() + require.NoError(t, err, "ReadPacket %d should succeed after wraparound", i) + } + assert.Equal(t, uint8(1), sConn.sequence, "Reader sequence should be 1 after reading all packets") + }) +} + func TestSendSemiSyncAck(t *testing.T) { _ = utils.LeakCheckContext(t) listener, sConn, cConn := createSocketPair(t) diff --git a/go/mysql/server.go b/go/mysql/server.go index 17c113248e6..cff9362dd0f 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -31,7 +31,6 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/netutil" - "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" @@ -125,7 +124,7 @@ type Handler interface { ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error // ComBinlogDumpGTID is called when a connection receives a ComBinlogDumpGTID request - ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error + ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet, nonBlock bool) error // WarningCount is called at the end of each query to obtain // the value to be returned to the client in the EOF packet. @@ -460,6 +459,16 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti defer connCountByTLSVer.Add(versionNoTLS, -1) } + // Check if username contains a target override (format: user|target). + // This allows replication clients to specify the target tablet in the username. + // The target string format is "keyspace:shard@type|alias" (e.g., "commerce:-80@replica|zone1-100"). + // Note: We use strings.Index to find only the FIRST "|" since the target string itself + // may contain "|" to separate tablet type from alias. + if idx := strings.Index(user, "|"); idx != -1 { + c.schemaName = user[idx+1:] + user = user[:idx] + } + // See what auth method the AuthServer wants to use for that user. negotiatedAuthMethod, err := negotiateAuthMethod(c, l.authServer, user, clientAuthMethod) @@ -527,9 +536,11 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti defer connCountPerUser.Add(c.User, -1) } - // Set initial db name. + // Set initial db name (or target string for binlog replication). + // Note: We use the raw schemaName without escaping because it may contain + // a target string with special characters (e.g., "keyspace:shard@type|alias"). if c.schemaName != "" { - err = l.handler.ComQuery(c, "use "+sqlescape.EscapeID(c.schemaName), func(result *sqltypes.Result) error { + err = l.handler.ComQuery(c, "use `"+c.schemaName+"`", func(result *sqltypes.Result) error { return nil }) if err != nil { diff --git a/go/mysql/server_test.go b/go/mysql/server_test.go index ee30dded978..64e8789960a 100644 --- a/go/mysql/server_test.go +++ b/go/mysql/server_test.go @@ -265,7 +265,7 @@ func (th *testHandler) ComRegisterReplica(c *Conn, replicaHost string, replicaPo func (th *testHandler) ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error { return nil } -func (th *testHandler) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { +func (th *testHandler) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet, nonBlock bool) error { return nil } @@ -320,6 +320,43 @@ func TestConnectionFromListener(t *testing.T) { c.Close() } +// TestConnectionWithPipeInUsername tests that usernames with the format "user|target" +// are correctly parsed - the target is extracted into schemaName and the user is stripped. +// This is used for binlog dump connections where the target is specified in the username. +func TestConnectionWithPipeInUsername(t *testing.T) { + ctx := utils.LeakCheckContext(t) + th := &testHandler{} + + authServer := NewAuthServerStatic("", "", 0) + authServer.entries["vt_repl"] = []*AuthServerStaticEntry{{ + Password: "password1", + UserData: "userData1", + }} + defer authServer.close() + + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, false) + require.NoError(t, err, "NewListener failed") + host, port := getHostPort(t, l.Addr()) + // Connect with username containing pipe-separated target + params := &ConnParams{ + Host: host, + Port: port, + Uname: "vt_repl|commerce:0@primary|zone1-100", + Pass: "password1", + } + go l.Accept() + defer cleanupListener(ctx, l, params) + + c, err := Connect(ctx, params) + require.NoError(t, err, "Should be able to connect to server") + defer c.Close() + + // The schemaName should contain the target (everything after the first pipe) + // Note: The USE statement will be issued with this schemaName, which in vtgate + // sets the session's TargetString. + require.Equal(t, "commerce:0@primary|zone1-100", th.LastConn().schemaName, "Schema name should contain the target from username") +} + func TestConnectionWithoutSourceHost(t *testing.T) { ctx := utils.LeakCheckContext(t) th := &testHandler{} diff --git a/go/test/endtoend/binlogdump/binlogdump_test.go b/go/test/endtoend/binlogdump/binlogdump_test.go new file mode 100644 index 00000000000..891adead0b7 --- /dev/null +++ b/go/test/endtoend/binlogdump/binlogdump_test.go @@ -0,0 +1,1313 @@ +/* +Copyright 2025 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package binlogdump + +import ( + "context" + "encoding/hex" + "fmt" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +// TestBinlogDumpGTID_Streaming tests that binlog events are actually streamed from vttablet to the client. +func TestBinlogDumpGTID_Streaming(t *testing.T) { + ctx := t.Context() + + // Get the primary tablet for our keyspace + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + tabletAlias := primaryTablet.Alias + + t.Logf("Tablet alias: %s", tabletAlias) + + // Connect to vtgate for binlog streaming + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, tabletAlias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + t.Logf("Connecting to VTGate at %s:%d with username: %s", binlogParams.Host, binlogParams.Port, binlogParams.Uname) + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + t.Logf("Connected successfully, connection ID: %d", binlogConn.ConnectionID) + + // Start binlog dump with no initial GTID - will start from current position + err = binlogConn.WriteComBinlogDumpGTID(1, "", 4, mysql.BinlogThroughGTID, nil) + require.NoError(t, err, "Should be able to send COM_BINLOG_DUMP_GTID") + + // Channel to receive packets and errors + packetCh := make(chan []byte, 10) + errCh := make(chan error, 1) + + // Start reading packets in a goroutine + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + defer close(packetCh) + for { + data, err := binlogConn.ReadPacket() + if err != nil { + select { + case errCh <- err: + default: + } + return + } + select { + case packetCh <- data: + default: + // Channel full, drop packet + } + } + }() + + // Give the binlog dump a moment to start + time.Sleep(100 * time.Millisecond) + + // Now insert data to generate binlog packets + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer dataConn.Close() + + t.Log("Inserting test data to generate binlog packets") + for i := 0; i < 3; i++ { + _, err := dataConn.ExecuteFetch(fmt.Sprintf("INSERT INTO binlog_test (msg) VALUES ('streaming_test_%d')", i), 1, false) + require.NoError(t, err) + } + + // Wait for at least one packet or timeout + receivedPackets := 0 + timeout := time.After(5 * time.Second) + +packetLoop: + for { + select { + case data, ok := <-packetCh: + if !ok { + // Channel closed + t.Logf("Packet channel closed after receiving %d packets", receivedPackets) + break packetLoop + } + receivedPackets++ + if len(data) > 0 { + t.Logf("Received packet %d: first byte=0x%02x, length=%d", receivedPackets, data[0], len(data)) + } + // We got a packet, test passes + if receivedPackets >= 3 { + t.Logf("Received %d packets, test passed", receivedPackets) + break packetLoop + } + case err := <-errCh: + t.Logf("Got error from packet reader: %v", err) + break packetLoop + case <-timeout: + t.Logf("Timeout after receiving %d packets", receivedPackets) + break packetLoop + } + } + + // Close the connection to stop the reader goroutine + binlogConn.Close() + wg.Wait() + + // We should have received at least some packets + assert.GreaterOrEqual(t, receivedPackets, 1, "Should have received at least one binlog packet") +} + +// TestBinlogDumpGTID_NoTarget verifies that binlog dump returns an error packet without a target +func TestBinlogDumpGTID_NoTarget(t *testing.T) { + ctx := t.Context() + + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // Try to send COM_BINLOG_DUMP_GTID without setting a target + err = conn.WriteComBinlogDumpGTID(1, "", 4, mysql.BinlogThroughGTID, nil) + require.NoError(t, err) + + // Server should send an error packet when no target is specified + data, err := conn.ReadPacket() + require.NoError(t, err, "Should receive error packet, not connection close") + require.True(t, len(data) > 0, "Response should not be empty") + require.Equal(t, byte(mysql.ErrPacket), data[0], "Expected error packet") + + // Parse the error packet and verify the message + sqlErr := mysql.ParseErrorPacket(data) + require.Error(t, sqlErr) + assert.Contains(t, sqlErr.Error(), "no target specified", "Error message should mention missing target") +} + +// TestBinlogDumpGTID_LargeEvent tests that binlog events larger than 16MB (spanning multiple MySQL packets) +// are correctly streamed through VTGate. This is critical because MySQL protocol uses 16MB max packet +// size, and large events must be split into multiple packets and reassembled correctly. +func TestBinlogDumpGTID_LargeEvent(t *testing.T) { + ctx := t.Context() + + // Get the primary tablet for our keyspace + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + tabletAlias := primaryTablet.Alias + + t.Logf("Tablet alias: %s", tabletAlias) + + // Connect to vtgate for binlog streaming + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, tabletAlias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + // Start binlog dump FIRST (with no GTID = starts from current position) + err = binlogConn.WriteComBinlogDumpGTID(1, "", 4, mysql.BinlogThroughGTID, nil) + require.NoError(t, err, "Should be able to send COM_BINLOG_DUMP_GTID") + + t.Log("Binlog dump started") + + // Channel to receive packets + packetCh := make(chan []byte, 100) + errCh := make(chan error, 1) + + // Start reading packets in a goroutine + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + defer close(packetCh) + for { + data, err := binlogConn.ReadPacket() + if err != nil { + select { + case errCh <- err: + default: + } + return + } + select { + case packetCh <- data: + default: + // Channel full, drop packet + } + } + }() + + // Give the binlog dump a moment to start + time.Sleep(100 * time.Millisecond) + + // Connect to insert data - this will generate binlog packets AFTER we started dumping + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer dataConn.Close() + + // Create a large blob of 32MB that spans multiple MySQL packets. + // The MySQL MaxPacketSize is 16777215 bytes (16MB - 1), so a 32MB packet + // will be split across 2+ packets by MySQL, testing our multi-packet handling. + // + // The gRPC max message size is configured to 64MB in main_test.go to allow + // streaming these large packets. + largeDataSize := 32 * 1024 * 1024 // 32MB + + t.Logf("Inserting large blob of %d bytes (%d MB) to test multi-packet handling", largeDataSize, largeDataSize/(1024*1024)) + + // Insert the large blob using REPEAT to build the data in MySQL + baseSize := 1024 * 1024 // 1MB base + repeatCount := 32 // 32 repetitions = 32MB + + // Create base data (1MB of 'A' characters) + baseData := make([]byte, baseSize) + for i := range baseData { + baseData[i] = 'A' + } + hexBase := hex.EncodeToString(baseData) + + // Use REPEAT to build the full blob in MySQL + insertSQL := fmt.Sprintf("INSERT INTO large_blob_test (data) VALUES (REPEAT(X'%s', %d))", hexBase, repeatCount) + _, err = dataConn.ExecuteFetch(insertSQL, 1, false) + if err != nil { + t.Logf("Insert error: %v", err) + // Try a smaller size if the large one fails + t.Log("Retrying with smaller blob size...") + insertSQL = fmt.Sprintf("INSERT INTO large_blob_test (data) VALUES (REPEAT(X'%s', %d))", hexBase, 5) + _, err = dataConn.ExecuteFetch(insertSQL, 1, false) + if err != nil { + t.Logf("Smaller insert also failed: %v", err) + } + } + require.NoError(t, err, "Should be able to insert large blob") + + t.Log("Large blob inserted successfully, waiting for binlog packets...") + + // Wait for packets - we should receive the large packet + var largePacketReceived bool + receivedPackets := 0 + timeout := time.After(30 * time.Second) // Longer timeout for large data + +packetLoop: + for { + select { + case data, ok := <-packetCh: + if !ok { + t.Logf("Packet channel closed after receiving %d packets", receivedPackets) + break packetLoop + } + receivedPackets++ + packetSize := len(data) + + // Log packet details + if packetSize > 1024*1024 { + t.Logf("Received packet %d: size=%d bytes (%.1f MB), first byte=0x%02x", + receivedPackets, packetSize, float64(packetSize)/(1024*1024), data[0]) + } else { + t.Logf("Received packet %d: size=%d bytes, first byte=0x%02x", + receivedPackets, packetSize, data[0]) + } + + // Check if we received a large packet (>30MB) + // The binlog packet will be slightly larger than our data due to event headers + if packetSize > 30*1024*1024 { + largePacketReceived = true + t.Logf("SUCCESS: Received large packet of %d bytes (%.1f MB) - multi-packet handling works!", + packetSize, float64(packetSize)/(1024*1024)) + break packetLoop + } + + // Safety limit - don't wait forever + if receivedPackets > 50 { + t.Log("Received 50 packets, stopping") + break packetLoop + } + + case err := <-errCh: + t.Logf("Got error from packet reader: %v", err) + break packetLoop + + case <-timeout: + t.Logf("Timeout after receiving %d packets", receivedPackets) + break packetLoop + } + } + + // Close the connection to stop the reader goroutine + binlogConn.Close() + wg.Wait() + + // Verify we received the large packet + assert.True(t, largePacketReceived, "Should have received a binlog packet larger than 30MB") + assert.GreaterOrEqual(t, receivedPackets, 1, "Should have received at least one binlog packet") +} + +// getCurrentGTID returns the current gtid_executed value from MySQL +func getCurrentGTID(t *testing.T, conn *mysql.Conn) string { + qr, err := conn.ExecuteFetch("SELECT @@global.gtid_executed", 1, false) + require.NoError(t, err) + require.Len(t, qr.Rows, 1) + return qr.Rows[0][0].ToString() +} + +// gtidToSIDBlock converts a GTID string to a SID block for COM_BINLOG_DUMP_GTID +func gtidToSIDBlock(t *testing.T, gtidStr string) []byte { + gtidSet, err := replication.ParseMysql56GTIDSet(gtidStr) + require.NoError(t, err) + return gtidSet.SIDBlock() +} + +// TestBinlogDumpGTID_FromSpecificPosition verifies that binlog streaming starts from the specified +// GTID position and only receives subsequent events. +func TestBinlogDumpGTID_FromSpecificPosition(t *testing.T) { + ctx := t.Context() + + // Connect to insert initial data + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer dataConn.Close() + + // Insert initial data (we should NOT see these events) + for i := 0; i < 3; i++ { + _, err := dataConn.ExecuteFetch( + fmt.Sprintf("INSERT INTO binlog_test (msg) VALUES ('before_gtid_%d')", i), 1, false) + require.NoError(t, err) + } + + // Get current GTID position - we'll start streaming from here + startGTID := getCurrentGTID(t, dataConn) + t.Logf("Starting GTID position: %s", startGTID) + + // Insert more data (we SHOULD see these events) + for i := 0; i < 3; i++ { + _, err := dataConn.ExecuteFetch( + fmt.Sprintf("INSERT INTO binlog_test (msg) VALUES ('after_gtid_%d')", i), 1, false) + require.NoError(t, err) + } + + // Connect for binlog streaming + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, primaryTablet.Alias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + // Start binlog dump from the saved GTID position + sidBlock := gtidToSIDBlock(t, startGTID) + err = binlogConn.WriteComBinlogDumpGTID(1, "", 4, mysql.BinlogThroughGTID, sidBlock) + require.NoError(t, err) + + // Read packets - should only get packets after startGTID + receivedPackets := 0 + timeout := time.After(5 * time.Second) + + for receivedPackets < 3 { + select { + case <-timeout: + t.Fatalf("Timeout waiting for packets, received %d", receivedPackets) + default: + data, err := binlogConn.ReadPacket() + if err != nil { + t.Fatalf("Error reading packet: %v", err) + } + if len(data) > 0 && data[0] == mysql.OKPacket { + receivedPackets++ + t.Logf("Received packet %d: size=%d bytes", receivedPackets, len(data)) + } + } + } + + t.Logf("Successfully received %d packets from GTID position", receivedPackets) +} + +// TestBinlogDumpGTID_InvalidFormat verifies that an invalid GTID format returns a proper error packet. +func TestBinlogDumpGTID_InvalidFormat(t *testing.T) { + ctx := t.Context() + + // Connect with proper target + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, primaryTablet.Alias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + // Send COM_BINLOG_DUMP_GTID with invalid GTID data + // We'll send garbage bytes as the SID block + invalidSIDBlock := []byte("not-a-valid-gtid-format") + err = binlogConn.WriteComBinlogDumpGTID(1, "", 4, mysql.BinlogThroughGTID, invalidSIDBlock) + require.NoError(t, err) + + // Should receive an error packet + data, err := binlogConn.ReadPacket() + require.NoError(t, err, "Should receive error packet, not connection close") + require.True(t, len(data) > 0, "Response should not be empty") + require.Equal(t, byte(mysql.ErrPacket), data[0], "Expected error packet") + + // Parse and verify error message + sqlErr := mysql.ParseErrorPacket(data) + require.Error(t, sqlErr) + t.Logf("Got expected error: %v", sqlErr) +} + +// TestBinlogDumpGTID_FuturePosition verifies that when requesting a GTID set that includes +// transactions not yet in the binlog, MySQL returns an error. +// This is expected MySQL behavior - you cannot request events from a position that doesn't exist. +func TestBinlogDumpGTID_FuturePosition(t *testing.T) { + ctx := t.Context() + + // Connect to get current GTID + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer dataConn.Close() + + // Get current GTID + currentGTID := getCurrentGTID(t, dataConn) + t.Logf("Current GTID: %s", currentGTID) + + // Parse the GTID to get the last transaction number and create a future GTID + gtidSet, err := replication.ParseMysql56GTIDSet(currentGTID) + require.NoError(t, err) + + // Get the last GTID and construct a future one + lastGTID := gtidSet.Last() + require.NotEmpty(t, lastGTID, "Should have a last GTID") + t.Logf("Last GTID: %s", lastGTID) + + // Parse the last transaction number and add 10 to create a future GTID + parts := strings.Split(lastGTID, ":") + require.Len(t, parts, 2, "Last GTID should be in format uuid:N") + uuid := parts[0] + lastTxn, err := strconv.ParseInt(parts[1], 10, 64) + require.NoError(t, err) + + futureTxn := lastTxn + 10 + futureGTID := fmt.Sprintf("%s:1-%d", uuid, futureTxn) + t.Logf("Future GTID (includes non-existent transactions): %s", futureGTID) + + // Connect for binlog streaming + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, primaryTablet.Alias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + // Start binlog dump from the future GTID position + futureGTIDSet, err := replication.ParseMysql56GTIDSet(futureGTID) + require.NoError(t, err) + sidBlock := futureGTIDSet.SIDBlock() + err = binlogConn.WriteComBinlogDumpGTID(1, "", 4, mysql.BinlogThroughGTID, sidBlock) + require.NoError(t, err) + + // MySQL should return an error because the requested GTID includes transactions + // that don't exist yet. We should receive an error packet or connection close. + data, err := binlogConn.ReadPacket() + if err != nil { + // Connection closed or read error - this is acceptable + t.Logf("Got expected error: %v", err) + return + } + + // If we got a packet, check if it's an error packet + require.True(t, len(data) > 0, "Response should not be empty") + if data[0] == mysql.ErrPacket { + // Error packet - parse and log it + sqlErr := mysql.ParseErrorPacket(data) + t.Logf("Got expected error packet: %v", sqlErr) + return + } + + // If MySQL sends events (shouldn't happen with future GTID), that's also OK + // as long as the behavior is consistent + t.Logf("Got unexpected response: first byte=0x%02x, length=%d", data[0], len(data)) + t.Logf("MySQL may have sent existing events - behavior depends on MySQL version") +} + +// TestBinlogDumpGTID_NonBlockEOF verifies that when the BINLOG_DUMP_NON_BLOCK flag is set, +// the server returns an EOF packet when there are no more events to stream, instead of +// blocking indefinitely waiting for new events. +func TestBinlogDumpGTID_NonBlockEOF(t *testing.T) { + ctx := t.Context() + + // Connect to insert some data first to ensure binlog has content + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer dataConn.Close() + + // Insert data to ensure binlog is not empty + _, err = dataConn.ExecuteFetch("INSERT INTO binlog_test (msg) VALUES ('nonblock_test')", 1, false) + require.NoError(t, err) + + // Get current GTID - we'll start streaming from here (after all existing events) + currentGTID := getCurrentGTID(t, dataConn) + t.Logf("Starting from GTID: %s (should have no pending events)", currentGTID) + + // Connect for binlog streaming + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, primaryTablet.Alias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + // Start binlog dump with NONBLOCK flag - should return EOF when caught up + // Flags: BinlogDumpNonBlock (0x01) | BinlogThroughGTID (0x04) = 0x05 + flags := uint16(mysql.BinlogDumpNonBlock | mysql.BinlogThroughGTID) + sidBlock := gtidToSIDBlock(t, currentGTID) + err = binlogConn.WriteComBinlogDumpGTID(1, "", 4, flags, sidBlock) + require.NoError(t, err) + + // With nonBlock, we should receive an EOF packet relatively quickly + // since there are no packets after the current GTID position + timeout := time.After(5 * time.Second) + var receivedEOF bool + var receivedPackets int + +readLoop: + for { + select { + case <-timeout: + t.Fatalf("Timeout waiting for EOF packet - nonBlock flag may not be implemented. Received %d packets.", receivedPackets) + default: + data, err := binlogConn.ReadPacket() + if err != nil { + // Connection closed - could be server's way of signaling end + t.Logf("Connection closed: %v (received %d packets)", err, receivedPackets) + break readLoop + } + + if len(data) == 0 { + continue + } + + switch data[0] { + case mysql.EOFPacket: + receivedEOF = true + t.Logf("Received EOF packet after %d packets - nonBlock working correctly", receivedPackets) + break readLoop + case mysql.ErrPacket: + sqlErr := mysql.ParseErrorPacket(data) + t.Logf("Received error packet: %v", sqlErr) + break readLoop + case mysql.OKPacket: + receivedPackets++ + t.Logf("Received packet %d: size=%d bytes", receivedPackets, len(data)) + // Continue reading - there might be a few packets before EOF + default: + t.Logf("Received packet with first byte=0x%02x, size=%d", data[0], len(data)) + } + } + } + + assert.True(t, receivedEOF, "Should have received EOF packet with nonBlock flag set") +} + +// TestBinlogDumpGTID_NonBlockWithPendingEvents verifies that when nonBlock is set and there +// ARE pending events, the server streams them all and THEN returns EOF. +func TestBinlogDumpGTID_NonBlockWithPendingEvents(t *testing.T) { + ctx := t.Context() + + // Connect to insert data + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer dataConn.Close() + + // Get GTID BEFORE inserting test data + startGTID := getCurrentGTID(t, dataConn) + t.Logf("Start GTID (before inserts): %s", startGTID) + + // Insert several rows - these will be "pending" events when we start streaming + numInserts := 5 + for i := 0; i < numInserts; i++ { + _, err := dataConn.ExecuteFetch( + fmt.Sprintf("INSERT INTO binlog_test (msg) VALUES ('nonblock_pending_%d')", i), 1, false) + require.NoError(t, err) + } + + endGTID := getCurrentGTID(t, dataConn) + t.Logf("End GTID (after inserts): %s", endGTID) + + // Connect for binlog streaming + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, primaryTablet.Alias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + // Start binlog dump with NONBLOCK flag from BEFORE the inserts + flags := uint16(mysql.BinlogDumpNonBlock | mysql.BinlogThroughGTID) + sidBlock := gtidToSIDBlock(t, startGTID) + err = binlogConn.WriteComBinlogDumpGTID(1, "", 4, flags, sidBlock) + require.NoError(t, err) + + // Should receive the pending packets, then EOF + timeout := time.After(10 * time.Second) + var receivedEOF bool + var receivedPackets int + +readLoop: + for { + select { + case <-timeout: + t.Fatalf("Timeout - received %d packets but no EOF. NonBlock may not be implemented.", receivedPackets) + default: + data, err := binlogConn.ReadPacket() + if err != nil { + t.Logf("Connection closed: %v (received %d packets)", err, receivedPackets) + break readLoop + } + + if len(data) == 0 { + continue + } + + switch data[0] { + case mysql.EOFPacket: + receivedEOF = true + t.Logf("Received EOF after %d packets", receivedPackets) + break readLoop + case mysql.ErrPacket: + sqlErr := mysql.ParseErrorPacket(data) + t.Fatalf("Unexpected error packet: %v", sqlErr) + case mysql.OKPacket: + receivedPackets++ + if receivedPackets <= 10 { + t.Logf("Received packet %d: size=%d bytes", receivedPackets, len(data)) + } + default: + t.Logf("Received packet with first byte=0x%02x, size=%d", data[0], len(data)) + } + } + } + + // We should have received packets (binlog packets for the inserts) and then EOF + assert.True(t, receivedEOF, "Should have received EOF packet after streaming pending packets") + assert.GreaterOrEqual(t, receivedPackets, 1, "Should have received at least some binlog packets for the inserts") + t.Logf("NonBlock with pending packets: received %d packets then EOF", receivedPackets) +} + +// TestBinlogDumpGTID_BlockingMode verifies the default blocking behavior - when BINLOG_DUMP_NON_BLOCK +// is NOT set, the server should block waiting for new events instead of returning EOF. +// This test verifies that new events are received after an insert, demonstrating that the +// connection stays open and continues to stream events. +func TestBinlogDumpGTID_BlockingMode(t *testing.T) { + ctx := t.Context() + + // Connect to get current position + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer dataConn.Close() + + // Get current GTID - we'll start streaming from here + currentGTID := getCurrentGTID(t, dataConn) + t.Logf("Starting from GTID: %s", currentGTID) + + // Connect for binlog streaming + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, primaryTablet.Alias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + // Start binlog dump WITHOUT nonBlock flag - should block when caught up + // Flags: only BinlogThroughGTID (0x04), NOT BinlogDumpNonBlock + flags := uint16(mysql.BinlogThroughGTID) + sidBlock := gtidToSIDBlock(t, currentGTID) + err = binlogConn.WriteComBinlogDumpGTID(1, "", 4, flags, sidBlock) + require.NoError(t, err) + + // Channel for async packet reading - we'll read continuously + packetCh := make(chan []byte, 100) + errCh := make(chan error, 1) + doneCh := make(chan struct{}) + + // Start a goroutine to read packets continuously + go func() { + defer close(packetCh) + for { + select { + case <-doneCh: + return + default: + } + data, err := binlogConn.ReadPacket() + if err != nil { + select { + case errCh <- err: + default: + } + return + } + select { + case packetCh <- data: + case <-doneCh: + return + } + } + }() + + // Give it a moment, then insert data + time.Sleep(100 * time.Millisecond) + + // Insert data - this should generate binlog packets that we receive + _, err = dataConn.ExecuteFetch("INSERT INTO binlog_test (msg) VALUES ('blocking_mode_test')", 1, false) + require.NoError(t, err) + t.Log("Inserted test row") + + // Wait for packets - in blocking mode we should receive the insert packets + receivedPackets := 0 + timeout := time.After(10 * time.Second) + +readLoop: + for { + select { + case data, ok := <-packetCh: + if !ok { + t.Log("Packet channel closed") + break readLoop + } + if len(data) > 0 { + receivedPackets++ + if data[0] == mysql.EOFPacket { + t.Fatal("Received unexpected EOF in blocking mode") + } + t.Logf("Received packet %d: first byte=0x%02x, size=%d", receivedPackets, data[0], len(data)) + // After receiving some packets, we can stop + if receivedPackets >= 3 { + t.Logf("Received %d packets, blocking mode is working correctly", receivedPackets) + break readLoop + } + } + case err := <-errCh: + t.Fatalf("Error reading packet: %v", err) + case <-timeout: + if receivedPackets > 0 { + t.Logf("Timeout after receiving %d packets - blocking mode works", receivedPackets) + break readLoop + } + t.Fatal("Timeout waiting for packets in blocking mode") + } + } + + // Signal the reader goroutine to stop + close(doneCh) + + assert.GreaterOrEqual(t, receivedPackets, 1, "Should have received at least one packet in blocking mode") +} + +// TestBinlogDumpGTID_DirectGRPC tests GTID-based binlog streaming via direct gRPC connection to vttablet. +func TestBinlogDumpGTID_DirectGRPC(t *testing.T) { + ctx := t.Context() + + // Get the tablet info for direct gRPC connection + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + tablet, err := clusterInstance.VtctldClientProcess.GetTablet(primaryTablet.Alias) + require.NoError(t, err) + + // Get current position - returns format like "MySQL56/uuid:1-44" + pos, _ := cluster.GetPrimaryPosition(t, *primaryTablet, hostname) + t.Logf("Primary position: %v", pos) + + // Extract just the GTID set (strip the "MySQL56/" prefix) + gtidSet := pos + if idx := strings.Index(pos, "/"); idx != -1 { + gtidSet = pos[idx+1:] + } + t.Logf("GTID set for BinlogDump: %s", gtidSet) + + grpcCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + // Connect directly to vttablet via gRPC + conn, err := tabletconn.GetDialer()(grpcCtx, tablet, grpcclient.FailFast(false)) + require.NoError(t, err) + defer conn.Close(grpcCtx) + + var receivedPackets int + var wg sync.WaitGroup + + // Goroutine 1: Stream binlog packets via direct gRPC + wg.Add(1) + go func() { + defer wg.Done() + err := conn.BinlogDumpGTID(grpcCtx, &binlogdatapb.BinlogDumpGTIDRequest{ + Target: &querypb.Target{ + Keyspace: keyspaceName, + Shard: "0", + TabletType: tablet.Type, + }, + GtidSet: gtidSet, + }, func(response *binlogdatapb.BinlogDumpResponse) error { + receivedPackets++ + t.Logf("Received packet %d via gRPC: %d bytes", receivedPackets, len(response.Packet)) + return nil + }) + if err != nil { + t.Logf("BinlogDumpGTID ended: %v", err) + } + }() + + // Goroutine 2: Write data to generate binlog packets + wg.Add(1) + go func() { + defer wg.Done() + dataConn, err := mysql.Connect(grpcCtx, &vtParams) + if err != nil { + t.Logf("Failed to connect for writes: %v", err) + return + } + defer dataConn.Close() + + for i := range 5 { + select { + case <-grpcCtx.Done(): + return + default: + } + time.Sleep(1 * time.Second) + t.Logf("Writing row %d", i+1) + _, err := dataConn.ExecuteFetch( + fmt.Sprintf("INSERT INTO binlog_test (msg) VALUES ('grpc_test_%d')", i), 1, false) + if err != nil { + t.Logf("Insert failed: %v", err) + return + } + } + }() + + wg.Wait() + + assert.GreaterOrEqual(t, receivedPackets, 1, "Should have received binlog packets via direct gRPC") + t.Logf("Successfully received %d packets via direct gRPC to vttablet", receivedPackets) +} + +// getBinlogFilePosition queries MySQL for the current binlog file and position. +func getBinlogFilePosition(t *testing.T, conn *mysql.Conn) (string, uint32) { + t.Helper() + + // Try SHOW BINARY LOG STATUS first (MySQL 8.2+), fall back to SHOW MASTER STATUS + qr, err := conn.ExecuteFetch("SHOW BINARY LOG STATUS", 1, false) + if err != nil { + qr, err = conn.ExecuteFetch("SHOW MASTER STATUS", 1, false) + require.NoError(t, err, "Failed to get binlog position") + } + require.Len(t, qr.Rows, 1, "Expected one row from SHOW BINARY LOG STATUS") + + file := qr.Rows[0][0].ToString() + posStr := qr.Rows[0][1].ToString() + pos, err := strconv.ParseUint(posStr, 10, 32) + require.NoError(t, err, "Failed to parse binlog position") + + return file, uint32(pos) +} + +// TestBinlogDump_VTGate tests COM_BINLOG_DUMP (file/position-based) via VTGate. +func TestBinlogDump_VTGate(t *testing.T) { + ctx := t.Context() + + // First, connect to MySQL directly (via vtgate) to get the current binlog position + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + + // Get current binlog file and position + binlogFile, binlogPos := getBinlogFilePosition(t, dataConn) + t.Logf("Starting binlog file: %s, position: %d", binlogFile, binlogPos) + + // Insert some data so we have events to read + for i := range 3 { + _, err = dataConn.ExecuteFetch( + fmt.Sprintf("INSERT INTO binlog_test (msg) VALUES ('filepos_test_%d')", i), 1, false) + require.NoError(t, err) + } + dataConn.Close() + + // Get the primary tablet for our keyspace + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + tabletAlias := primaryTablet.Alias + + // Connect to vtgate for binlog streaming + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, tabletAlias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + t.Logf("Connecting to VTGate for COM_BINLOG_DUMP with username: %s", binlogParams.Uname) + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + // Send COM_BINLOG_DUMP with file and position + err = binlogConn.WriteComBinlogDump(1, binlogFile, uint64(binlogPos), 0) + require.NoError(t, err, "Should be able to send COM_BINLOG_DUMP") + + // Read binlog packets using a goroutine with timeout + packetCh := make(chan []byte, 100) + errCh := make(chan error, 1) + doneCh := make(chan struct{}) + + go func() { + for { + select { + case <-doneCh: + return + default: + } + data, err := binlogConn.ReadPacket() + if err != nil { + select { + case errCh <- err: + default: + } + return + } + select { + case packetCh <- data: + case <-doneCh: + return + } + } + }() + + var receivedPackets int + timeout := time.After(5 * time.Second) + +readLoop: + for { + select { + case <-timeout: + t.Logf("Timeout reached after receiving %d packets", receivedPackets) + break readLoop + case err := <-errCh: + t.Logf("Read error: %v", err) + break readLoop + case data := <-packetCh: + if len(data) > 0 { + receivedPackets++ + t.Logf("Received packet %d: size=%d bytes, first byte=0x%02x", receivedPackets, len(data), data[0]) + + // Check for EOF packet + if data[0] == mysql.EOFPacket && len(data) < 9 { + t.Log("Received EOF packet") + break readLoop + } + + // Check for error packet + if data[0] == mysql.ErrPacket { + t.Logf("Received error packet") + break readLoop + } + + // Stop after receiving enough packets + if receivedPackets >= 10 { + break readLoop + } + } + } + } + + close(doneCh) + assert.GreaterOrEqual(t, receivedPackets, 1, "Should have received at least one packet via COM_BINLOG_DUMP") + t.Logf("Successfully received %d packets via COM_BINLOG_DUMP (file/position)", receivedPackets) +} + +// TestBinlogDump_DirectGRPC tests file/position-based binlog streaming via direct gRPC to vttablet. +func TestBinlogDump_DirectGRPC(t *testing.T) { + ctx := t.Context() + + // Get the tablet info for direct gRPC connection + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + tablet, err := clusterInstance.VtctldClientProcess.GetTablet(primaryTablet.Alias) + require.NoError(t, err) + + // Connect to MySQL to get the binlog file and position + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + + binlogFile, binlogPos := getBinlogFilePosition(t, dataConn) + t.Logf("Starting binlog file: %s, position: %d", binlogFile, binlogPos) + dataConn.Close() + + grpcCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + // Connect directly to vttablet via gRPC + conn, err := tabletconn.GetDialer()(grpcCtx, tablet, grpcclient.FailFast(false)) + require.NoError(t, err) + defer conn.Close(grpcCtx) + + var receivedPackets int + var wg sync.WaitGroup + + // Goroutine 1: Stream binlog packets via direct gRPC using file/position + wg.Add(1) + go func() { + defer wg.Done() + err := conn.BinlogDump(grpcCtx, &binlogdatapb.BinlogDumpRequest{ + Target: &querypb.Target{ + Keyspace: keyspaceName, + Shard: "0", + TabletType: tablet.Type, + }, + BinlogFilename: binlogFile, + BinlogPosition: binlogPos, + }, func(response *binlogdatapb.BinlogDumpResponse) error { + receivedPackets++ + t.Logf("Received packet %d via gRPC (file/pos): %d bytes", receivedPackets, len(response.Packet)) + return nil + }) + if err != nil { + t.Logf("BinlogDump (file/pos) ended: %v", err) + } + }() + + // Goroutine 2: Write data to generate binlog packets + wg.Add(1) + go func() { + defer wg.Done() + writeConn, err := mysql.Connect(grpcCtx, &vtParams) + if err != nil { + t.Logf("Failed to connect for writes: %v", err) + return + } + defer writeConn.Close() + + for i := range 5 { + select { + case <-grpcCtx.Done(): + return + default: + } + time.Sleep(1 * time.Second) + t.Logf("Writing row %d", i+1) + _, err := writeConn.ExecuteFetch( + fmt.Sprintf("INSERT INTO binlog_test (msg) VALUES ('filepos_grpc_test_%d')", i), 1, false) + if err != nil { + t.Logf("Insert failed: %v", err) + return + } + } + }() + + wg.Wait() + + assert.GreaterOrEqual(t, receivedPackets, 1, "Should have received binlog packets via direct gRPC (file/position)") + t.Logf("Successfully received %d packets via direct gRPC to vttablet (file/position)", receivedPackets) +} + +// TestBinlogDump_NoTarget verifies that COM_BINLOG_DUMP returns an error packet without a target. +func TestBinlogDump_NoTarget(t *testing.T) { + ctx := t.Context() + + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // Try to send COM_BINLOG_DUMP without setting a target + err = conn.WriteComBinlogDump(1, "binlog.000001", 4, 0) + require.NoError(t, err) + + // Server should send an error packet when no target is specified + data, err := conn.ReadPacket() + require.NoError(t, err, "Should receive error packet, not connection close") + require.True(t, len(data) > 0, "Response should not be empty") + require.Equal(t, byte(mysql.ErrPacket), data[0], "Expected error packet") + + // Parse the error packet and verify the message + sqlErr := mysql.ParseErrorPacket(data) + require.Error(t, sqlErr) + assert.Contains(t, sqlErr.Error(), "no target specified", "Error message should mention missing target") +} + +// TestBinlogDump_LargeEvent tests that binlog events larger than 16MB (spanning multiple MySQL packets) +// are correctly streamed through VTGate using COM_BINLOG_DUMP (file/position-based). +// This verifies that multi-packet handling works correctly for the file/position code path. +func TestBinlogDump_LargeEvent(t *testing.T) { + ctx := t.Context() + + // Get the primary tablet for our keyspace + primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() + tabletAlias := primaryTablet.Alias + + t.Logf("Tablet alias: %s", tabletAlias) + + // First, get the current binlog file and position + dataConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + + binlogFile, binlogPos := getBinlogFilePosition(t, dataConn) + t.Logf("Starting binlog file: %s, position: %d", binlogFile, binlogPos) + dataConn.Close() + + // Connect to vtgate for binlog streaming + targetString := fmt.Sprintf("%s:0@primary|%s", keyspaceName, tabletAlias) + binlogParams := mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + Uname: "vt_repl|" + targetString, + } + + binlogConn, err := mysql.Connect(ctx, &binlogParams) + require.NoError(t, err) + defer binlogConn.Close() + + // Start binlog dump with file and position + err = binlogConn.WriteComBinlogDump(1, binlogFile, uint64(binlogPos), 0) + require.NoError(t, err, "Should be able to send COM_BINLOG_DUMP") + + t.Log("Binlog dump started (file/position mode)") + + // Channel to receive packets + packetCh := make(chan []byte, 100) + errCh := make(chan error, 1) + + // Start reading packets in a goroutine + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + defer close(packetCh) + for { + data, err := binlogConn.ReadPacket() + if err != nil { + select { + case errCh <- err: + default: + } + return + } + select { + case packetCh <- data: + default: + // Channel full, drop packet + } + } + }() + + // Give the binlog dump a moment to start + time.Sleep(100 * time.Millisecond) + + // Connect to insert data - this will generate binlog packets AFTER we started dumping + dataConn, err = mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer dataConn.Close() + + // Create a large blob of 32MB that spans multiple MySQL packets. + largeDataSize := 32 * 1024 * 1024 // 32MB + + t.Logf("Inserting large blob of %d bytes (%d MB) to test multi-packet handling", largeDataSize, largeDataSize/(1024*1024)) + + // Insert the large blob using REPEAT to build the data in MySQL + baseSize := 1024 * 1024 // 1MB base + repeatCount := 32 // 32 repetitions = 32MB + + // Create base data (1MB of 'A' characters) + baseData := make([]byte, baseSize) + for i := range baseData { + baseData[i] = 'A' + } + hexBase := hex.EncodeToString(baseData) + + // Use REPEAT to build the full blob in MySQL + insertSQL := fmt.Sprintf("INSERT INTO large_blob_test (data) VALUES (REPEAT(X'%s', %d))", hexBase, repeatCount) + _, err = dataConn.ExecuteFetch(insertSQL, 1, false) + if err != nil { + t.Logf("Insert error: %v", err) + // Try a smaller size if the large one fails + t.Log("Retrying with smaller blob size...") + insertSQL = fmt.Sprintf("INSERT INTO large_blob_test (data) VALUES (REPEAT(X'%s', %d))", hexBase, 5) + _, err = dataConn.ExecuteFetch(insertSQL, 1, false) + if err != nil { + t.Logf("Smaller insert also failed: %v", err) + } + } + require.NoError(t, err, "Should be able to insert large blob") + + t.Log("Large blob inserted successfully, waiting for binlog packets...") + + // Wait for packets - we should receive the large packet + var largePacketReceived bool + receivedPackets := 0 + timeout := time.After(30 * time.Second) // Longer timeout for large data + +packetLoop: + for { + select { + case data, ok := <-packetCh: + if !ok { + t.Logf("Packet channel closed after receiving %d packets", receivedPackets) + break packetLoop + } + receivedPackets++ + packetSize := len(data) + + // Log packet details + if packetSize > 1024*1024 { + t.Logf("Received packet %d: size=%d bytes (%.1f MB), first byte=0x%02x", + receivedPackets, packetSize, float64(packetSize)/(1024*1024), data[0]) + } else { + t.Logf("Received packet %d: size=%d bytes, first byte=0x%02x", + receivedPackets, packetSize, data[0]) + } + + // Check if we received a large packet (>30MB) + if packetSize > 30*1024*1024 { + largePacketReceived = true + t.Logf("SUCCESS: Received large packet of %d bytes (%.1f MB) - multi-packet handling works!", + packetSize, float64(packetSize)/(1024*1024)) + break packetLoop + } + + // Safety limit - don't wait forever + if receivedPackets > 50 { + t.Log("Received 50 packets, stopping") + break packetLoop + } + + case err := <-errCh: + t.Logf("Got error from packet reader: %v", err) + break packetLoop + + case <-timeout: + t.Logf("Timeout after receiving %d packets", receivedPackets) + break packetLoop + } + } + + // Close the connection to stop the reader goroutine + binlogConn.Close() + wg.Wait() + + // Verify we received the large packet + assert.True(t, largePacketReceived, "Should have received a binlog packet larger than 30MB") + assert.GreaterOrEqual(t, receivedPackets, 1, "Should have received at least one binlog packet") +} diff --git a/go/test/endtoend/binlogdump/main_test.go b/go/test/endtoend/binlogdump/main_test.go new file mode 100644 index 00000000000..39467fe95e9 --- /dev/null +++ b/go/test/endtoend/binlogdump/main_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2025 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package binlogdump + +import ( + "flag" + "fmt" + "os" + "testing" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + hostname = "localhost" + keyspaceName = "test_keyspace" + cell = "zone1" + sqlSchema = `create table binlog_test ( + id bigint auto_increment, + msg varchar(64), + primary key (id) + ) Engine=InnoDB; + + create table large_blob_test ( + id bigint auto_increment, + data longblob, + primary key (id) + ) Engine=InnoDB;` +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + // Set gRPC max message size to 64MB for both vttablet and vtgate + // This is needed to stream large binlog events (>16MB default) + grpcMaxMsgSize := "--grpc-max-message-size=67108864" // 64MB + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, grpcMaxMsgSize) + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, grpcMaxMsgSize) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: sqlSchema, + } + if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false, clusterInstance.Cell); err != nil { + return 1, err + } + + // Start vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1, err + } + + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } +} diff --git a/go/vt/binlog/binlog_connection.go b/go/vt/binlog/binlog_connection.go index dcb38a4b93a..c19605de07b 100644 --- a/go/vt/binlog/binlog_connection.go +++ b/go/vt/binlog/binlog_connection.go @@ -81,6 +81,11 @@ func NewBinlogConnection(cp dbconfigs.Connector) (*BinlogConnection, error) { return bc, nil } +// ServerID returns the server ID used by this binlog connection. +func (bc *BinlogConnection) ServerID() uint32 { + return bc.serverID +} + // connectForReplication create a MySQL connection ready to use for replication. func connectForReplication(cp dbconfigs.Connector) (*mysql.Conn, error) { ctx := context.Background() @@ -124,7 +129,8 @@ func (bc *BinlogConnection) StartBinlogDumpFromPosition(ctx context.Context, bin ctx, bc.cancel = context.WithCancel(ctx) log.Infof("sending binlog dump command: startPos=%v, serverID=%v", startPos, bc.serverID) - if err := bc.SendBinlogDumpCommand(bc.serverID, binlogFilename, startPos); err != nil { + // VStream uses blocking mode (nonBlock=false) - it continuously streams events + if err := bc.SendBinlogDumpGTIDCommand(bc.serverID, binlogFilename, startPos, false); err != nil { log.Errorf("couldn't send binlog dump command: %v", err) return nil, nil, err } @@ -306,3 +312,7 @@ func (bc *BinlogConnection) Close() { serverIDPool.Put(bc.serverID) } } + +func (bc *BinlogConnection) GetServerID() uint32 { + return bc.serverID +} diff --git a/go/vt/mysqlctl/clone_test.go b/go/vt/mysqlctl/clone_test.go index 46dc8449f3c..6a240a8fe1d 100644 --- a/go/vt/mysqlctl/clone_test.go +++ b/go/vt/mysqlctl/clone_test.go @@ -549,7 +549,7 @@ func (h *mockDonorHandler) ComBinlogDump(c *mysql.Conn, logFile string, binlogPo return errors.New("ComBinlogDump not implemented") } -func (h *mockDonorHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { +func (h *mockDonorHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet, nonblock bool) error { return errors.New("ComBinlogDumpGTID not implemented") } diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index 7a855d254a1..0e654437a31 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -2969,6 +2969,238 @@ func (x *VStreamResultsResponse) GetRows() []*query.Row { return nil } +// BinlogDumpRequest is the payload for COM_BINLOG_DUMP (file/position-based replication). +// This is the older protocol that uses binlog filename and position. +type BinlogDumpRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // Binlog filename to start streaming from (e.g., "binlog.000001") + BinlogFilename string `protobuf:"bytes,4,opt,name=binlog_filename,json=binlogFilename,proto3" json:"binlog_filename,omitempty"` + // Position within the binlog file (32-bit for COM_BINLOG_DUMP) + BinlogPosition uint32 `protobuf:"varint,5,opt,name=binlog_position,json=binlogPosition,proto3" json:"binlog_position,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BinlogDumpRequest) Reset() { + *x = BinlogDumpRequest{} + mi := &file_binlogdata_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BinlogDumpRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BinlogDumpRequest) ProtoMessage() {} + +func (x *BinlogDumpRequest) ProtoReflect() protoreflect.Message { + mi := &file_binlogdata_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BinlogDumpRequest.ProtoReflect.Descriptor instead. +func (*BinlogDumpRequest) Descriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{31} +} + +func (x *BinlogDumpRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if x != nil { + return x.EffectiveCallerId + } + return nil +} + +func (x *BinlogDumpRequest) GetImmediateCallerId() *query.VTGateCallerID { + if x != nil { + return x.ImmediateCallerId + } + return nil +} + +func (x *BinlogDumpRequest) GetTarget() *query.Target { + if x != nil { + return x.Target + } + return nil +} + +func (x *BinlogDumpRequest) GetBinlogFilename() string { + if x != nil { + return x.BinlogFilename + } + return "" +} + +func (x *BinlogDumpRequest) GetBinlogPosition() uint32 { + if x != nil { + return x.BinlogPosition + } + return 0 +} + +// BinlogDumpGTIDRequest is the payload for COM_BINLOG_DUMP_GTID (GTID-based replication). +// This is the newer protocol (MySQL 5.6+) that uses GTID sets. +type BinlogDumpGTIDRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // Optional binlog filename (used with BinlogThroughPosition flag) + BinlogFilename string `protobuf:"bytes,4,opt,name=binlog_filename,json=binlogFilename,proto3" json:"binlog_filename,omitempty"` + // Position within the binlog file (64-bit for COM_BINLOG_DUMP_GTID) + BinlogPosition uint64 `protobuf:"varint,5,opt,name=binlog_position,json=binlogPosition,proto3" json:"binlog_position,omitempty"` + // GTID set in string format (e.g., "uuid:1-5,uuid2:1-3") + // vttablet will convert to SIDBlock for MySQL + GtidSet string `protobuf:"bytes,6,opt,name=gtid_set,json=gtidSet,proto3" json:"gtid_set,omitempty"` + // If true, MySQL will return EOF when it reaches the end of the binlog + // instead of blocking and waiting for new events. This corresponds to + // the BINLOG_DUMP_NON_BLOCK flag (0x01) in COM_BINLOG_DUMP_GTID. + NonBlock bool `protobuf:"varint,7,opt,name=non_block,json=nonBlock,proto3" json:"non_block,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BinlogDumpGTIDRequest) Reset() { + *x = BinlogDumpGTIDRequest{} + mi := &file_binlogdata_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BinlogDumpGTIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BinlogDumpGTIDRequest) ProtoMessage() {} + +func (x *BinlogDumpGTIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_binlogdata_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BinlogDumpGTIDRequest.ProtoReflect.Descriptor instead. +func (*BinlogDumpGTIDRequest) Descriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{32} +} + +func (x *BinlogDumpGTIDRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if x != nil { + return x.EffectiveCallerId + } + return nil +} + +func (x *BinlogDumpGTIDRequest) GetImmediateCallerId() *query.VTGateCallerID { + if x != nil { + return x.ImmediateCallerId + } + return nil +} + +func (x *BinlogDumpGTIDRequest) GetTarget() *query.Target { + if x != nil { + return x.Target + } + return nil +} + +func (x *BinlogDumpGTIDRequest) GetBinlogFilename() string { + if x != nil { + return x.BinlogFilename + } + return "" +} + +func (x *BinlogDumpGTIDRequest) GetBinlogPosition() uint64 { + if x != nil { + return x.BinlogPosition + } + return 0 +} + +func (x *BinlogDumpGTIDRequest) GetGtidSet() string { + if x != nil { + return x.GtidSet + } + return "" +} + +func (x *BinlogDumpGTIDRequest) GetNonBlock() bool { + if x != nil { + return x.NonBlock + } + return false +} + +// BinlogDumpResponse streams raw MySQL packet payloads. +// Used by both BinlogDump and BinlogDumpGTID RPCs. +type BinlogDumpResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // A single raw MySQL packet payload from the binlog stream. + // Packets are streamed exactly as received from MySQL, including + // zero-length packets that terminate multi-packet sequences. + // Only the first packet of a sequence contains the status byte + // (0x00 for event data, 0xFE for EOF, 0xFF for error). + Packet []byte `protobuf:"bytes,1,opt,name=packet,proto3" json:"packet,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BinlogDumpResponse) Reset() { + *x = BinlogDumpResponse{} + mi := &file_binlogdata_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BinlogDumpResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BinlogDumpResponse) ProtoMessage() {} + +func (x *BinlogDumpResponse) ProtoReflect() protoreflect.Message { + mi := &file_binlogdata_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BinlogDumpResponse.ProtoReflect.Descriptor instead. +func (*BinlogDumpResponse) Descriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{33} +} + +func (x *BinlogDumpResponse) GetPacket() []byte { + if x != nil { + return x.Packet + } + return nil +} + type BinlogTransaction_Statement struct { state protoimpl.MessageState `protogen:"open.v1"` // what type of statement is this? @@ -2983,7 +3215,7 @@ type BinlogTransaction_Statement struct { func (x *BinlogTransaction_Statement) Reset() { *x = BinlogTransaction_Statement{} - mi := &file_binlogdata_proto_msgTypes[31] + mi := &file_binlogdata_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2995,7 +3227,7 @@ func (x *BinlogTransaction_Statement) String() string { func (*BinlogTransaction_Statement) ProtoMessage() {} func (x *BinlogTransaction_Statement) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[31] + mi := &file_binlogdata_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3042,7 +3274,7 @@ type RowChange_Bitmap struct { func (x *RowChange_Bitmap) Reset() { *x = RowChange_Bitmap{} - mi := &file_binlogdata_proto_msgTypes[35] + mi := &file_binlogdata_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3054,7 +3286,7 @@ func (x *RowChange_Bitmap) String() string { func (*RowChange_Bitmap) ProtoMessage() {} func (x *RowChange_Bitmap) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[35] + mi := &file_binlogdata_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3320,7 +3552,23 @@ const file_binlogdata_proto_rawDesc = "" + "\x06fields\x18\x01 \x03(\v2\f.query.FieldR\x06fields\x12\x12\n" + "\x04gtid\x18\x03 \x01(\tR\x04gtid\x12\x1e\n" + "\x04rows\x18\x04 \x03(\v2\n" + - ".query.RowR\x04rows*>\n" + + ".query.RowR\x04rows\"\x94\x02\n" + + "\x11BinlogDumpRequest\x12?\n" + + "\x13effective_caller_id\x18\x01 \x01(\v2\x0f.vtrpc.CallerIDR\x11effectiveCallerId\x12E\n" + + "\x13immediate_caller_id\x18\x02 \x01(\v2\x15.query.VTGateCallerIDR\x11immediateCallerId\x12%\n" + + "\x06target\x18\x03 \x01(\v2\r.query.TargetR\x06target\x12'\n" + + "\x0fbinlog_filename\x18\x04 \x01(\tR\x0ebinlogFilename\x12'\n" + + "\x0fbinlog_position\x18\x05 \x01(\rR\x0ebinlogPosition\"\xd0\x02\n" + + "\x15BinlogDumpGTIDRequest\x12?\n" + + "\x13effective_caller_id\x18\x01 \x01(\v2\x0f.vtrpc.CallerIDR\x11effectiveCallerId\x12E\n" + + "\x13immediate_caller_id\x18\x02 \x01(\v2\x15.query.VTGateCallerIDR\x11immediateCallerId\x12%\n" + + "\x06target\x18\x03 \x01(\v2\r.query.TargetR\x06target\x12'\n" + + "\x0fbinlog_filename\x18\x04 \x01(\tR\x0ebinlogFilename\x12'\n" + + "\x0fbinlog_position\x18\x05 \x01(\x04R\x0ebinlogPosition\x12\x19\n" + + "\bgtid_set\x18\x06 \x01(\tR\agtidSet\x12\x1b\n" + + "\tnon_block\x18\a \x01(\bR\bnonBlock\",\n" + + "\x12BinlogDumpResponse\x12\x16\n" + + "\x06packet\x18\x01 \x01(\fR\x06packet*>\n" + "\vOnDDLAction\x12\n" + "\n" + "\x06IGNORE\x10\x00\x12\b\n" + @@ -3397,7 +3645,7 @@ func file_binlogdata_proto_rawDescGZIP() []byte { } var file_binlogdata_proto_enumTypes = make([]protoimpl.EnumInfo, 8) -var file_binlogdata_proto_msgTypes = make([]protoimpl.MessageInfo, 37) +var file_binlogdata_proto_msgTypes = make([]protoimpl.MessageInfo, 40) var file_binlogdata_proto_goTypes = []any{ (OnDDLAction)(0), // 0: binlogdata.OnDDLAction (VReplicationWorkflowType)(0), // 1: binlogdata.VReplicationWorkflowType @@ -3438,45 +3686,48 @@ var file_binlogdata_proto_goTypes = []any{ (*TableLastPK)(nil), // 36: binlogdata.TableLastPK (*VStreamResultsRequest)(nil), // 37: binlogdata.VStreamResultsRequest (*VStreamResultsResponse)(nil), // 38: binlogdata.VStreamResultsResponse - (*BinlogTransaction_Statement)(nil), // 39: binlogdata.BinlogTransaction.Statement - nil, // 40: binlogdata.Rule.ConvertEnumToTextEntry - nil, // 41: binlogdata.Rule.ConvertCharsetEntry - nil, // 42: binlogdata.Rule.ConvertIntToEnumEntry - (*RowChange_Bitmap)(nil), // 43: binlogdata.RowChange.Bitmap - nil, // 44: binlogdata.VStreamOptions.ConfigOverridesEntry - (*query.EventToken)(nil), // 45: query.EventToken - (*topodata.KeyRange)(nil), // 46: topodata.KeyRange - (topodata.TabletType)(0), // 47: topodata.TabletType - (*query.Row)(nil), // 48: query.Row - (*query.Field)(nil), // 49: query.Field - (*vtrpc.CallerID)(nil), // 50: vtrpc.CallerID - (*query.VTGateCallerID)(nil), // 51: query.VTGateCallerID - (*query.Target)(nil), // 52: query.Target - (*query.QueryResult)(nil), // 53: query.QueryResult + (*BinlogDumpRequest)(nil), // 39: binlogdata.BinlogDumpRequest + (*BinlogDumpGTIDRequest)(nil), // 40: binlogdata.BinlogDumpGTIDRequest + (*BinlogDumpResponse)(nil), // 41: binlogdata.BinlogDumpResponse + (*BinlogTransaction_Statement)(nil), // 42: binlogdata.BinlogTransaction.Statement + nil, // 43: binlogdata.Rule.ConvertEnumToTextEntry + nil, // 44: binlogdata.Rule.ConvertCharsetEntry + nil, // 45: binlogdata.Rule.ConvertIntToEnumEntry + (*RowChange_Bitmap)(nil), // 46: binlogdata.RowChange.Bitmap + nil, // 47: binlogdata.VStreamOptions.ConfigOverridesEntry + (*query.EventToken)(nil), // 48: query.EventToken + (*topodata.KeyRange)(nil), // 49: topodata.KeyRange + (topodata.TabletType)(0), // 50: topodata.TabletType + (*query.Row)(nil), // 51: query.Row + (*query.Field)(nil), // 52: query.Field + (*vtrpc.CallerID)(nil), // 53: vtrpc.CallerID + (*query.VTGateCallerID)(nil), // 54: query.VTGateCallerID + (*query.Target)(nil), // 55: query.Target + (*query.QueryResult)(nil), // 56: query.QueryResult } var file_binlogdata_proto_depIdxs = []int32{ - 39, // 0: binlogdata.BinlogTransaction.statements:type_name -> binlogdata.BinlogTransaction.Statement - 45, // 1: binlogdata.BinlogTransaction.event_token:type_name -> query.EventToken - 46, // 2: binlogdata.StreamKeyRangeRequest.key_range:type_name -> topodata.KeyRange + 42, // 0: binlogdata.BinlogTransaction.statements:type_name -> binlogdata.BinlogTransaction.Statement + 48, // 1: binlogdata.BinlogTransaction.event_token:type_name -> query.EventToken + 49, // 2: binlogdata.StreamKeyRangeRequest.key_range:type_name -> topodata.KeyRange 8, // 3: binlogdata.StreamKeyRangeRequest.charset:type_name -> binlogdata.Charset 9, // 4: binlogdata.StreamKeyRangeResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction 8, // 5: binlogdata.StreamTablesRequest.charset:type_name -> binlogdata.Charset 9, // 6: binlogdata.StreamTablesResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction - 40, // 7: binlogdata.Rule.convert_enum_to_text:type_name -> binlogdata.Rule.ConvertEnumToTextEntry - 41, // 8: binlogdata.Rule.convert_charset:type_name -> binlogdata.Rule.ConvertCharsetEntry - 42, // 9: binlogdata.Rule.convert_int_to_enum:type_name -> binlogdata.Rule.ConvertIntToEnumEntry + 43, // 7: binlogdata.Rule.convert_enum_to_text:type_name -> binlogdata.Rule.ConvertEnumToTextEntry + 44, // 8: binlogdata.Rule.convert_charset:type_name -> binlogdata.Rule.ConvertCharsetEntry + 45, // 9: binlogdata.Rule.convert_int_to_enum:type_name -> binlogdata.Rule.ConvertIntToEnumEntry 15, // 10: binlogdata.Filter.rules:type_name -> binlogdata.Rule 7, // 11: binlogdata.Filter.field_event_mode:type_name -> binlogdata.Filter.FieldEventMode - 47, // 12: binlogdata.BinlogSource.tablet_type:type_name -> topodata.TabletType - 46, // 13: binlogdata.BinlogSource.key_range:type_name -> topodata.KeyRange + 50, // 12: binlogdata.BinlogSource.tablet_type:type_name -> topodata.TabletType + 49, // 13: binlogdata.BinlogSource.key_range:type_name -> topodata.KeyRange 16, // 14: binlogdata.BinlogSource.filter:type_name -> binlogdata.Filter 0, // 15: binlogdata.BinlogSource.on_ddl:type_name -> binlogdata.OnDDLAction - 48, // 16: binlogdata.RowChange.before:type_name -> query.Row - 48, // 17: binlogdata.RowChange.after:type_name -> query.Row - 43, // 18: binlogdata.RowChange.data_columns:type_name -> binlogdata.RowChange.Bitmap - 43, // 19: binlogdata.RowChange.json_partial_values:type_name -> binlogdata.RowChange.Bitmap + 51, // 16: binlogdata.RowChange.before:type_name -> query.Row + 51, // 17: binlogdata.RowChange.after:type_name -> query.Row + 46, // 18: binlogdata.RowChange.data_columns:type_name -> binlogdata.RowChange.Bitmap + 46, // 19: binlogdata.RowChange.json_partial_values:type_name -> binlogdata.RowChange.Bitmap 18, // 20: binlogdata.RowEvent.row_changes:type_name -> binlogdata.RowChange - 49, // 21: binlogdata.FieldEvent.fields:type_name -> query.Field + 52, // 21: binlogdata.FieldEvent.fields:type_name -> query.Field 36, // 22: binlogdata.ShardGtid.table_p_ks:type_name -> binlogdata.TableLastPK 21, // 23: binlogdata.VGtid.shard_gtids:type_name -> binlogdata.ShardGtid 5, // 24: binlogdata.Journal.migration_type:type_name -> binlogdata.MigrationType @@ -3488,48 +3739,54 @@ var file_binlogdata_proto_depIdxs = []int32{ 22, // 30: binlogdata.VEvent.vgtid:type_name -> binlogdata.VGtid 24, // 31: binlogdata.VEvent.journal:type_name -> binlogdata.Journal 35, // 32: binlogdata.VEvent.last_p_k_event:type_name -> binlogdata.LastPKEvent - 49, // 33: binlogdata.MinimalTable.fields:type_name -> query.Field + 52, // 33: binlogdata.MinimalTable.fields:type_name -> query.Field 26, // 34: binlogdata.MinimalSchema.tables:type_name -> binlogdata.MinimalTable - 44, // 35: binlogdata.VStreamOptions.config_overrides:type_name -> binlogdata.VStreamOptions.ConfigOverridesEntry - 50, // 36: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID - 51, // 37: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 52, // 38: binlogdata.VStreamRequest.target:type_name -> query.Target + 47, // 35: binlogdata.VStreamOptions.config_overrides:type_name -> binlogdata.VStreamOptions.ConfigOverridesEntry + 53, // 36: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID + 54, // 37: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 55, // 38: binlogdata.VStreamRequest.target:type_name -> query.Target 16, // 39: binlogdata.VStreamRequest.filter:type_name -> binlogdata.Filter 36, // 40: binlogdata.VStreamRequest.table_last_p_ks:type_name -> binlogdata.TableLastPK 28, // 41: binlogdata.VStreamRequest.options:type_name -> binlogdata.VStreamOptions 25, // 42: binlogdata.VStreamResponse.events:type_name -> binlogdata.VEvent - 50, // 43: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID - 51, // 44: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 52, // 45: binlogdata.VStreamRowsRequest.target:type_name -> query.Target - 53, // 46: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult + 53, // 43: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID + 54, // 44: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 55, // 45: binlogdata.VStreamRowsRequest.target:type_name -> query.Target + 56, // 46: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult 28, // 47: binlogdata.VStreamRowsRequest.options:type_name -> binlogdata.VStreamOptions - 49, // 48: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field - 49, // 49: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field - 48, // 50: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row - 48, // 51: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row - 50, // 52: binlogdata.VStreamTablesRequest.effective_caller_id:type_name -> vtrpc.CallerID - 51, // 53: binlogdata.VStreamTablesRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 52, // 54: binlogdata.VStreamTablesRequest.target:type_name -> query.Target + 52, // 48: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field + 52, // 49: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field + 51, // 50: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row + 51, // 51: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row + 53, // 52: binlogdata.VStreamTablesRequest.effective_caller_id:type_name -> vtrpc.CallerID + 54, // 53: binlogdata.VStreamTablesRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 55, // 54: binlogdata.VStreamTablesRequest.target:type_name -> query.Target 28, // 55: binlogdata.VStreamTablesRequest.options:type_name -> binlogdata.VStreamOptions - 49, // 56: binlogdata.VStreamTablesResponse.fields:type_name -> query.Field - 49, // 57: binlogdata.VStreamTablesResponse.pkfields:type_name -> query.Field - 48, // 58: binlogdata.VStreamTablesResponse.rows:type_name -> query.Row - 48, // 59: binlogdata.VStreamTablesResponse.lastpk:type_name -> query.Row + 52, // 56: binlogdata.VStreamTablesResponse.fields:type_name -> query.Field + 52, // 57: binlogdata.VStreamTablesResponse.pkfields:type_name -> query.Field + 51, // 58: binlogdata.VStreamTablesResponse.rows:type_name -> query.Row + 51, // 59: binlogdata.VStreamTablesResponse.lastpk:type_name -> query.Row 36, // 60: binlogdata.LastPKEvent.table_last_p_k:type_name -> binlogdata.TableLastPK - 53, // 61: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult - 50, // 62: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID - 51, // 63: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 52, // 64: binlogdata.VStreamResultsRequest.target:type_name -> query.Target - 49, // 65: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field - 48, // 66: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row - 6, // 67: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category - 8, // 68: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset - 14, // 69: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion - 70, // [70:70] is the sub-list for method output_type - 70, // [70:70] is the sub-list for method input_type - 70, // [70:70] is the sub-list for extension type_name - 70, // [70:70] is the sub-list for extension extendee - 0, // [0:70] is the sub-list for field type_name + 56, // 61: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult + 53, // 62: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID + 54, // 63: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 55, // 64: binlogdata.VStreamResultsRequest.target:type_name -> query.Target + 52, // 65: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field + 51, // 66: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row + 53, // 67: binlogdata.BinlogDumpRequest.effective_caller_id:type_name -> vtrpc.CallerID + 54, // 68: binlogdata.BinlogDumpRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 55, // 69: binlogdata.BinlogDumpRequest.target:type_name -> query.Target + 53, // 70: binlogdata.BinlogDumpGTIDRequest.effective_caller_id:type_name -> vtrpc.CallerID + 54, // 71: binlogdata.BinlogDumpGTIDRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 55, // 72: binlogdata.BinlogDumpGTIDRequest.target:type_name -> query.Target + 6, // 73: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category + 8, // 74: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset + 14, // 75: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion + 76, // [76:76] is the sub-list for method output_type + 76, // [76:76] is the sub-list for method input_type + 76, // [76:76] is the sub-list for extension type_name + 76, // [76:76] is the sub-list for extension extendee + 0, // [0:76] is the sub-list for field type_name } func init() { file_binlogdata_proto_init() } @@ -3543,7 +3800,7 @@ func file_binlogdata_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_binlogdata_proto_rawDesc), len(file_binlogdata_proto_rawDesc)), NumEnums: 8, - NumMessages: 37, + NumMessages: 40, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go index d2b489a4e75..4d19ae9e3f1 100644 --- a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go +++ b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go @@ -876,6 +876,71 @@ func (m *VStreamResultsResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *BinlogDumpRequest) CloneVT() *BinlogDumpRequest { + if m == nil { + return (*BinlogDumpRequest)(nil) + } + r := new(BinlogDumpRequest) + r.EffectiveCallerId = m.EffectiveCallerId.CloneVT() + r.ImmediateCallerId = m.ImmediateCallerId.CloneVT() + r.Target = m.Target.CloneVT() + r.BinlogFilename = m.BinlogFilename + r.BinlogPosition = m.BinlogPosition + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BinlogDumpRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BinlogDumpGTIDRequest) CloneVT() *BinlogDumpGTIDRequest { + if m == nil { + return (*BinlogDumpGTIDRequest)(nil) + } + r := new(BinlogDumpGTIDRequest) + r.EffectiveCallerId = m.EffectiveCallerId.CloneVT() + r.ImmediateCallerId = m.ImmediateCallerId.CloneVT() + r.Target = m.Target.CloneVT() + r.BinlogFilename = m.BinlogFilename + r.BinlogPosition = m.BinlogPosition + r.GtidSet = m.GtidSet + r.NonBlock = m.NonBlock + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BinlogDumpGTIDRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BinlogDumpResponse) CloneVT() *BinlogDumpResponse { + if m == nil { + return (*BinlogDumpResponse)(nil) + } + r := new(BinlogDumpResponse) + if rhs := m.Packet; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Packet = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BinlogDumpResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Charset) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -3292,6 +3357,213 @@ func (m *VStreamResultsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error return len(dAtA) - i, nil } +func (m *BinlogDumpRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinlogDumpRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BinlogDumpRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BinlogPosition != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BinlogPosition)) + i-- + dAtA[i] = 0x28 + } + if len(m.BinlogFilename) > 0 { + i -= len(m.BinlogFilename) + copy(dAtA[i:], m.BinlogFilename) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BinlogFilename))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BinlogDumpGTIDRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinlogDumpGTIDRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BinlogDumpGTIDRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NonBlock { + i-- + if m.NonBlock { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.GtidSet) > 0 { + i -= len(m.GtidSet) + copy(dAtA[i:], m.GtidSet) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.GtidSet))) + i-- + dAtA[i] = 0x32 + } + if m.BinlogPosition != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BinlogPosition)) + i-- + dAtA[i] = 0x28 + } + if len(m.BinlogFilename) > 0 { + i -= len(m.BinlogFilename) + copy(dAtA[i:], m.BinlogFilename) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BinlogFilename))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BinlogDumpResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinlogDumpResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BinlogDumpResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Packet) > 0 { + i -= len(m.Packet) + copy(dAtA[i:], m.Packet) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Packet))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + var vtprotoPool_VStreamRowsResponse = sync.Pool{ New: func() interface{} { return &VStreamRowsResponse{} @@ -4325,41 +4597,120 @@ func (m *VStreamResultsResponse) SizeVT() (n int) { return n } -func (m *Charset) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Charset: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Charset: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Client", wireType) - } - m.Client = 0 - for shift := uint(0); ; shift += 7 { +func (m *BinlogDumpRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Target != nil { + l = m.Target.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.BinlogFilename) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BinlogPosition != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BinlogPosition)) + } + n += len(m.unknownFields) + return n +} + +func (m *BinlogDumpGTIDRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Target != nil { + l = m.Target.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.BinlogFilename) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BinlogPosition != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BinlogPosition)) + } + l = len(m.GtidSet) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NonBlock { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *BinlogDumpResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Packet) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Charset) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Charset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Charset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Client", wireType) + } + m.Client = 0 + for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow } @@ -10975,3 +11326,560 @@ func (m *VStreamResultsResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *BinlogDumpRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinlogDumpRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinlogDumpRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &query.VTGateCallerID{} + } + if err := m.ImmediateCallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &query.Target{} + } + if err := m.Target.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogFilename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinlogFilename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogPosition", wireType) + } + m.BinlogPosition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BinlogPosition |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinlogDumpGTIDRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinlogDumpGTIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinlogDumpGTIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &query.VTGateCallerID{} + } + if err := m.ImmediateCallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &query.Target{} + } + if err := m.Target.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogFilename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinlogFilename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogPosition", wireType) + } + m.BinlogPosition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BinlogPosition |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GtidSet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GtidSet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NonBlock", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NonBlock = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinlogDumpResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinlogDumpResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinlogDumpResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Packet = append(m.Packet[:0], dAtA[iNdEx:postIndex]...) + if m.Packet == nil { + m.Packet = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go index fc19fd9e66b..6d18c723c67 100644 --- a/go/vt/proto/queryservice/queryservice.pb.go +++ b/go/vt/proto/queryservice/queryservice.pb.go @@ -43,7 +43,7 @@ var File_queryservice_proto protoreflect.FileDescriptor const file_queryservice_proto_rawDesc = "" + "\n" + - "\x12queryservice.proto\x12\fqueryservice\x1a\vquery.proto\x1a\x10binlogdata.proto2\x95\x12\n" + + "\x12queryservice.proto\x12\fqueryservice\x1a\vquery.proto\x1a\x10binlogdata.proto2\xbf\x13\n" + "\x05Query\x12:\n" + "\aExecute\x12\x15.query.ExecuteRequest\x1a\x16.query.ExecuteResponse\"\x00\x12N\n" + "\rStreamExecute\x12\x1b.query.StreamExecuteRequest\x1a\x1c.query.StreamExecuteResponse\"\x000\x01\x124\n" + @@ -74,7 +74,10 @@ const file_queryservice_proto_rawDesc = "" + "\vVStreamRows\x12\x1e.binlogdata.VStreamRowsRequest\x1a\x1f.binlogdata.VStreamRowsResponse\"\x000\x01\x12X\n" + "\rVStreamTables\x12 .binlogdata.VStreamTablesRequest\x1a!.binlogdata.VStreamTablesResponse\"\x000\x01\x12[\n" + "\x0eVStreamResults\x12!.binlogdata.VStreamResultsRequest\x1a\".binlogdata.VStreamResultsResponse\"\x000\x01\x12B\n" + - "\tGetSchema\x12\x17.query.GetSchemaRequest\x1a\x18.query.GetSchemaResponse\"\x000\x01B+Z)vitess.io/vitess/go/vt/proto/queryserviceb\x06proto3" + "\tGetSchema\x12\x17.query.GetSchemaRequest\x1a\x18.query.GetSchemaResponse\"\x000\x01\x12O\n" + + "\n" + + "BinlogDump\x12\x1d.binlogdata.BinlogDumpRequest\x1a\x1e.binlogdata.BinlogDumpResponse\"\x000\x01\x12W\n" + + "\x0eBinlogDumpGTID\x12!.binlogdata.BinlogDumpGTIDRequest\x1a\x1e.binlogdata.BinlogDumpResponse\"\x000\x01B+Z)vitess.io/vitess/go/vt/proto/queryserviceb\x06proto3" var file_queryservice_proto_goTypes = []any{ (*query.ExecuteRequest)(nil), // 0: query.ExecuteRequest @@ -106,35 +109,38 @@ var file_queryservice_proto_goTypes = []any{ (*binlogdata.VStreamTablesRequest)(nil), // 26: binlogdata.VStreamTablesRequest (*binlogdata.VStreamResultsRequest)(nil), // 27: binlogdata.VStreamResultsRequest (*query.GetSchemaRequest)(nil), // 28: query.GetSchemaRequest - (*query.ExecuteResponse)(nil), // 29: query.ExecuteResponse - (*query.StreamExecuteResponse)(nil), // 30: query.StreamExecuteResponse - (*query.BeginResponse)(nil), // 31: query.BeginResponse - (*query.CommitResponse)(nil), // 32: query.CommitResponse - (*query.RollbackResponse)(nil), // 33: query.RollbackResponse - (*query.PrepareResponse)(nil), // 34: query.PrepareResponse - (*query.CommitPreparedResponse)(nil), // 35: query.CommitPreparedResponse - (*query.RollbackPreparedResponse)(nil), // 36: query.RollbackPreparedResponse - (*query.CreateTransactionResponse)(nil), // 37: query.CreateTransactionResponse - (*query.StartCommitResponse)(nil), // 38: query.StartCommitResponse - (*query.SetRollbackResponse)(nil), // 39: query.SetRollbackResponse - (*query.ConcludeTransactionResponse)(nil), // 40: query.ConcludeTransactionResponse - (*query.ReadTransactionResponse)(nil), // 41: query.ReadTransactionResponse - (*query.UnresolvedTransactionsResponse)(nil), // 42: query.UnresolvedTransactionsResponse - (*query.BeginExecuteResponse)(nil), // 43: query.BeginExecuteResponse - (*query.BeginStreamExecuteResponse)(nil), // 44: query.BeginStreamExecuteResponse - (*query.MessageStreamResponse)(nil), // 45: query.MessageStreamResponse - (*query.MessageAckResponse)(nil), // 46: query.MessageAckResponse - (*query.ReserveExecuteResponse)(nil), // 47: query.ReserveExecuteResponse - (*query.ReserveBeginExecuteResponse)(nil), // 48: query.ReserveBeginExecuteResponse - (*query.ReserveStreamExecuteResponse)(nil), // 49: query.ReserveStreamExecuteResponse - (*query.ReserveBeginStreamExecuteResponse)(nil), // 50: query.ReserveBeginStreamExecuteResponse - (*query.ReleaseResponse)(nil), // 51: query.ReleaseResponse - (*query.StreamHealthResponse)(nil), // 52: query.StreamHealthResponse - (*binlogdata.VStreamResponse)(nil), // 53: binlogdata.VStreamResponse - (*binlogdata.VStreamRowsResponse)(nil), // 54: binlogdata.VStreamRowsResponse - (*binlogdata.VStreamTablesResponse)(nil), // 55: binlogdata.VStreamTablesResponse - (*binlogdata.VStreamResultsResponse)(nil), // 56: binlogdata.VStreamResultsResponse - (*query.GetSchemaResponse)(nil), // 57: query.GetSchemaResponse + (*binlogdata.BinlogDumpRequest)(nil), // 29: binlogdata.BinlogDumpRequest + (*binlogdata.BinlogDumpGTIDRequest)(nil), // 30: binlogdata.BinlogDumpGTIDRequest + (*query.ExecuteResponse)(nil), // 31: query.ExecuteResponse + (*query.StreamExecuteResponse)(nil), // 32: query.StreamExecuteResponse + (*query.BeginResponse)(nil), // 33: query.BeginResponse + (*query.CommitResponse)(nil), // 34: query.CommitResponse + (*query.RollbackResponse)(nil), // 35: query.RollbackResponse + (*query.PrepareResponse)(nil), // 36: query.PrepareResponse + (*query.CommitPreparedResponse)(nil), // 37: query.CommitPreparedResponse + (*query.RollbackPreparedResponse)(nil), // 38: query.RollbackPreparedResponse + (*query.CreateTransactionResponse)(nil), // 39: query.CreateTransactionResponse + (*query.StartCommitResponse)(nil), // 40: query.StartCommitResponse + (*query.SetRollbackResponse)(nil), // 41: query.SetRollbackResponse + (*query.ConcludeTransactionResponse)(nil), // 42: query.ConcludeTransactionResponse + (*query.ReadTransactionResponse)(nil), // 43: query.ReadTransactionResponse + (*query.UnresolvedTransactionsResponse)(nil), // 44: query.UnresolvedTransactionsResponse + (*query.BeginExecuteResponse)(nil), // 45: query.BeginExecuteResponse + (*query.BeginStreamExecuteResponse)(nil), // 46: query.BeginStreamExecuteResponse + (*query.MessageStreamResponse)(nil), // 47: query.MessageStreamResponse + (*query.MessageAckResponse)(nil), // 48: query.MessageAckResponse + (*query.ReserveExecuteResponse)(nil), // 49: query.ReserveExecuteResponse + (*query.ReserveBeginExecuteResponse)(nil), // 50: query.ReserveBeginExecuteResponse + (*query.ReserveStreamExecuteResponse)(nil), // 51: query.ReserveStreamExecuteResponse + (*query.ReserveBeginStreamExecuteResponse)(nil), // 52: query.ReserveBeginStreamExecuteResponse + (*query.ReleaseResponse)(nil), // 53: query.ReleaseResponse + (*query.StreamHealthResponse)(nil), // 54: query.StreamHealthResponse + (*binlogdata.VStreamResponse)(nil), // 55: binlogdata.VStreamResponse + (*binlogdata.VStreamRowsResponse)(nil), // 56: binlogdata.VStreamRowsResponse + (*binlogdata.VStreamTablesResponse)(nil), // 57: binlogdata.VStreamTablesResponse + (*binlogdata.VStreamResultsResponse)(nil), // 58: binlogdata.VStreamResultsResponse + (*query.GetSchemaResponse)(nil), // 59: query.GetSchemaResponse + (*binlogdata.BinlogDumpResponse)(nil), // 60: binlogdata.BinlogDumpResponse } var file_queryservice_proto_depIdxs = []int32{ 0, // 0: queryservice.Query.Execute:input_type -> query.ExecuteRequest @@ -166,37 +172,41 @@ var file_queryservice_proto_depIdxs = []int32{ 26, // 26: queryservice.Query.VStreamTables:input_type -> binlogdata.VStreamTablesRequest 27, // 27: queryservice.Query.VStreamResults:input_type -> binlogdata.VStreamResultsRequest 28, // 28: queryservice.Query.GetSchema:input_type -> query.GetSchemaRequest - 29, // 29: queryservice.Query.Execute:output_type -> query.ExecuteResponse - 30, // 30: queryservice.Query.StreamExecute:output_type -> query.StreamExecuteResponse - 31, // 31: queryservice.Query.Begin:output_type -> query.BeginResponse - 32, // 32: queryservice.Query.Commit:output_type -> query.CommitResponse - 33, // 33: queryservice.Query.Rollback:output_type -> query.RollbackResponse - 34, // 34: queryservice.Query.Prepare:output_type -> query.PrepareResponse - 35, // 35: queryservice.Query.CommitPrepared:output_type -> query.CommitPreparedResponse - 36, // 36: queryservice.Query.RollbackPrepared:output_type -> query.RollbackPreparedResponse - 37, // 37: queryservice.Query.CreateTransaction:output_type -> query.CreateTransactionResponse - 38, // 38: queryservice.Query.StartCommit:output_type -> query.StartCommitResponse - 39, // 39: queryservice.Query.SetRollback:output_type -> query.SetRollbackResponse - 40, // 40: queryservice.Query.ConcludeTransaction:output_type -> query.ConcludeTransactionResponse - 41, // 41: queryservice.Query.ReadTransaction:output_type -> query.ReadTransactionResponse - 42, // 42: queryservice.Query.UnresolvedTransactions:output_type -> query.UnresolvedTransactionsResponse - 43, // 43: queryservice.Query.BeginExecute:output_type -> query.BeginExecuteResponse - 44, // 44: queryservice.Query.BeginStreamExecute:output_type -> query.BeginStreamExecuteResponse - 45, // 45: queryservice.Query.MessageStream:output_type -> query.MessageStreamResponse - 46, // 46: queryservice.Query.MessageAck:output_type -> query.MessageAckResponse - 47, // 47: queryservice.Query.ReserveExecute:output_type -> query.ReserveExecuteResponse - 48, // 48: queryservice.Query.ReserveBeginExecute:output_type -> query.ReserveBeginExecuteResponse - 49, // 49: queryservice.Query.ReserveStreamExecute:output_type -> query.ReserveStreamExecuteResponse - 50, // 50: queryservice.Query.ReserveBeginStreamExecute:output_type -> query.ReserveBeginStreamExecuteResponse - 51, // 51: queryservice.Query.Release:output_type -> query.ReleaseResponse - 52, // 52: queryservice.Query.StreamHealth:output_type -> query.StreamHealthResponse - 53, // 53: queryservice.Query.VStream:output_type -> binlogdata.VStreamResponse - 54, // 54: queryservice.Query.VStreamRows:output_type -> binlogdata.VStreamRowsResponse - 55, // 55: queryservice.Query.VStreamTables:output_type -> binlogdata.VStreamTablesResponse - 56, // 56: queryservice.Query.VStreamResults:output_type -> binlogdata.VStreamResultsResponse - 57, // 57: queryservice.Query.GetSchema:output_type -> query.GetSchemaResponse - 29, // [29:58] is the sub-list for method output_type - 0, // [0:29] is the sub-list for method input_type + 29, // 29: queryservice.Query.BinlogDump:input_type -> binlogdata.BinlogDumpRequest + 30, // 30: queryservice.Query.BinlogDumpGTID:input_type -> binlogdata.BinlogDumpGTIDRequest + 31, // 31: queryservice.Query.Execute:output_type -> query.ExecuteResponse + 32, // 32: queryservice.Query.StreamExecute:output_type -> query.StreamExecuteResponse + 33, // 33: queryservice.Query.Begin:output_type -> query.BeginResponse + 34, // 34: queryservice.Query.Commit:output_type -> query.CommitResponse + 35, // 35: queryservice.Query.Rollback:output_type -> query.RollbackResponse + 36, // 36: queryservice.Query.Prepare:output_type -> query.PrepareResponse + 37, // 37: queryservice.Query.CommitPrepared:output_type -> query.CommitPreparedResponse + 38, // 38: queryservice.Query.RollbackPrepared:output_type -> query.RollbackPreparedResponse + 39, // 39: queryservice.Query.CreateTransaction:output_type -> query.CreateTransactionResponse + 40, // 40: queryservice.Query.StartCommit:output_type -> query.StartCommitResponse + 41, // 41: queryservice.Query.SetRollback:output_type -> query.SetRollbackResponse + 42, // 42: queryservice.Query.ConcludeTransaction:output_type -> query.ConcludeTransactionResponse + 43, // 43: queryservice.Query.ReadTransaction:output_type -> query.ReadTransactionResponse + 44, // 44: queryservice.Query.UnresolvedTransactions:output_type -> query.UnresolvedTransactionsResponse + 45, // 45: queryservice.Query.BeginExecute:output_type -> query.BeginExecuteResponse + 46, // 46: queryservice.Query.BeginStreamExecute:output_type -> query.BeginStreamExecuteResponse + 47, // 47: queryservice.Query.MessageStream:output_type -> query.MessageStreamResponse + 48, // 48: queryservice.Query.MessageAck:output_type -> query.MessageAckResponse + 49, // 49: queryservice.Query.ReserveExecute:output_type -> query.ReserveExecuteResponse + 50, // 50: queryservice.Query.ReserveBeginExecute:output_type -> query.ReserveBeginExecuteResponse + 51, // 51: queryservice.Query.ReserveStreamExecute:output_type -> query.ReserveStreamExecuteResponse + 52, // 52: queryservice.Query.ReserveBeginStreamExecute:output_type -> query.ReserveBeginStreamExecuteResponse + 53, // 53: queryservice.Query.Release:output_type -> query.ReleaseResponse + 54, // 54: queryservice.Query.StreamHealth:output_type -> query.StreamHealthResponse + 55, // 55: queryservice.Query.VStream:output_type -> binlogdata.VStreamResponse + 56, // 56: queryservice.Query.VStreamRows:output_type -> binlogdata.VStreamRowsResponse + 57, // 57: queryservice.Query.VStreamTables:output_type -> binlogdata.VStreamTablesResponse + 58, // 58: queryservice.Query.VStreamResults:output_type -> binlogdata.VStreamResultsResponse + 59, // 59: queryservice.Query.GetSchema:output_type -> query.GetSchemaResponse + 60, // 60: queryservice.Query.BinlogDump:output_type -> binlogdata.BinlogDumpResponse + 60, // 61: queryservice.Query.BinlogDumpGTID:output_type -> binlogdata.BinlogDumpResponse + 31, // [31:62] is the sub-list for method output_type + 0, // [0:31] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/go/vt/proto/queryservice/queryservice_grpc.pb.go b/go/vt/proto/queryservice/queryservice_grpc.pb.go index ebf52ce4dcf..9ad511b9ac9 100644 --- a/go/vt/proto/queryservice/queryservice_grpc.pb.go +++ b/go/vt/proto/queryservice/queryservice_grpc.pb.go @@ -87,6 +87,10 @@ type QueryClient interface { VStreamResults(ctx context.Context, in *binlogdata.VStreamResultsRequest, opts ...grpc.CallOption) (Query_VStreamResultsClient, error) // GetSchema returns the schema information. GetSchema(ctx context.Context, in *query.GetSchemaRequest, opts ...grpc.CallOption) (Query_GetSchemaClient, error) + // BinlogDump streams raw binlog events from MySQL using COM_BINLOG_DUMP (file/position-based). + BinlogDump(ctx context.Context, in *binlogdata.BinlogDumpRequest, opts ...grpc.CallOption) (Query_BinlogDumpClient, error) + // BinlogDumpGTID streams raw binlog events from MySQL using COM_BINLOG_DUMP_GTID (GTID-based). + BinlogDumpGTID(ctx context.Context, in *binlogdata.BinlogDumpGTIDRequest, opts ...grpc.CallOption) (Query_BinlogDumpGTIDClient, error) } type queryClient struct { @@ -611,6 +615,70 @@ func (x *queryGetSchemaClient) Recv() (*query.GetSchemaResponse, error) { return m, nil } +func (c *queryClient) BinlogDump(ctx context.Context, in *binlogdata.BinlogDumpRequest, opts ...grpc.CallOption) (Query_BinlogDumpClient, error) { + stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[11], "/queryservice.Query/BinlogDump", opts...) + if err != nil { + return nil, err + } + x := &queryBinlogDumpClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_BinlogDumpClient interface { + Recv() (*binlogdata.BinlogDumpResponse, error) + grpc.ClientStream +} + +type queryBinlogDumpClient struct { + grpc.ClientStream +} + +func (x *queryBinlogDumpClient) Recv() (*binlogdata.BinlogDumpResponse, error) { + m := new(binlogdata.BinlogDumpResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *queryClient) BinlogDumpGTID(ctx context.Context, in *binlogdata.BinlogDumpGTIDRequest, opts ...grpc.CallOption) (Query_BinlogDumpGTIDClient, error) { + stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[12], "/queryservice.Query/BinlogDumpGTID", opts...) + if err != nil { + return nil, err + } + x := &queryBinlogDumpGTIDClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_BinlogDumpGTIDClient interface { + Recv() (*binlogdata.BinlogDumpResponse, error) + grpc.ClientStream +} + +type queryBinlogDumpGTIDClient struct { + grpc.ClientStream +} + +func (x *queryBinlogDumpGTIDClient) Recv() (*binlogdata.BinlogDumpResponse, error) { + m := new(binlogdata.BinlogDumpResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // QueryServer is the server API for Query service. // All implementations must embed UnimplementedQueryServer // for forward compatibility @@ -678,6 +746,10 @@ type QueryServer interface { VStreamResults(*binlogdata.VStreamResultsRequest, Query_VStreamResultsServer) error // GetSchema returns the schema information. GetSchema(*query.GetSchemaRequest, Query_GetSchemaServer) error + // BinlogDump streams raw binlog events from MySQL using COM_BINLOG_DUMP (file/position-based). + BinlogDump(*binlogdata.BinlogDumpRequest, Query_BinlogDumpServer) error + // BinlogDumpGTID streams raw binlog events from MySQL using COM_BINLOG_DUMP_GTID (GTID-based). + BinlogDumpGTID(*binlogdata.BinlogDumpGTIDRequest, Query_BinlogDumpGTIDServer) error mustEmbedUnimplementedQueryServer() } @@ -772,6 +844,12 @@ func (UnimplementedQueryServer) VStreamResults(*binlogdata.VStreamResultsRequest func (UnimplementedQueryServer) GetSchema(*query.GetSchemaRequest, Query_GetSchemaServer) error { return status.Errorf(codes.Unimplemented, "method GetSchema not implemented") } +func (UnimplementedQueryServer) BinlogDump(*binlogdata.BinlogDumpRequest, Query_BinlogDumpServer) error { + return status.Errorf(codes.Unimplemented, "method BinlogDump not implemented") +} +func (UnimplementedQueryServer) BinlogDumpGTID(*binlogdata.BinlogDumpGTIDRequest, Query_BinlogDumpGTIDServer) error { + return status.Errorf(codes.Unimplemented, "method BinlogDumpGTID not implemented") +} func (UnimplementedQueryServer) mustEmbedUnimplementedQueryServer() {} // UnsafeQueryServer may be embedded to opt out of forward compatibility for this service. @@ -1340,6 +1418,48 @@ func (x *queryGetSchemaServer) Send(m *query.GetSchemaResponse) error { return x.ServerStream.SendMsg(m) } +func _Query_BinlogDump_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.BinlogDumpRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).BinlogDump(m, &queryBinlogDumpServer{stream}) +} + +type Query_BinlogDumpServer interface { + Send(*binlogdata.BinlogDumpResponse) error + grpc.ServerStream +} + +type queryBinlogDumpServer struct { + grpc.ServerStream +} + +func (x *queryBinlogDumpServer) Send(m *binlogdata.BinlogDumpResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Query_BinlogDumpGTID_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.BinlogDumpGTIDRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).BinlogDumpGTID(m, &queryBinlogDumpGTIDServer{stream}) +} + +type Query_BinlogDumpGTIDServer interface { + Send(*binlogdata.BinlogDumpResponse) error + grpc.ServerStream +} + +type queryBinlogDumpGTIDServer struct { + grpc.ServerStream +} + +func (x *queryBinlogDumpGTIDServer) Send(m *binlogdata.BinlogDumpResponse) error { + return x.ServerStream.SendMsg(m) +} + // Query_ServiceDesc is the grpc.ServiceDesc for Query service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1476,6 +1596,16 @@ var Query_ServiceDesc = grpc.ServiceDesc{ Handler: _Query_GetSchema_Handler, ServerStreams: true, }, + { + StreamName: "BinlogDump", + Handler: _Query_BinlogDump_Handler, + ServerStreams: true, + }, + { + StreamName: "BinlogDumpGTID", + Handler: _Query_BinlogDumpGTID_Handler, + ServerStreams: true, + }, }, Metadata: "queryservice.proto", } diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 0185ab00d42..b36993ce4b6 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -759,6 +759,26 @@ func (itc *internalTabletConn) VStreamResults( return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } +// BinlogDump is part of the QueryService interface. +func (itc *internalTabletConn) BinlogDump( + ctx context.Context, + request *binlogdatapb.BinlogDumpRequest, + send func(*binlogdatapb.BinlogDumpResponse) error, +) error { + err := itc.tablet.qsc.QueryService().BinlogDump(ctx, request, send) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) +} + +// BinlogDumpGTID is part of the QueryService interface. +func (itc *internalTabletConn) BinlogDumpGTID( + ctx context.Context, + request *binlogdatapb.BinlogDumpGTIDRequest, + send func(*binlogdatapb.BinlogDumpResponse) error, +) error { + err := itc.tablet.qsc.QueryService().BinlogDumpGTID(ctx, request, send) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) +} + // // TabletManagerClient implementation // diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index 67d94b5ef6e..ca1a283656b 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -32,19 +32,23 @@ import ( "github.com/google/uuid" "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" - - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/callinfo" + "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/utils" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" @@ -469,13 +473,280 @@ func (vh *vtgateHandler) ComRegisterReplica(c *mysql.Conn, replicaHost string, r } // ComBinlogDump is part of the mysql.Handler interface. +// It handles file/position-based binlog dump requests by forwarding them to a targeted vttablet. +// The target tablet is determined from the session's TargetString, which can be set via: +// 1. A USE statement (e.g., "USE `keyspace:shard@type|alias`"), or +// 2. The username during connection (format: "user|keyspace:shard@type|alias") +// The target format is "keyspace:shard@tablet_type|tablet_alias" (e.g., "commerce:-80@replica|zone1-100"). func (vh *vtgateHandler) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos uint32) error { - return vterrors.VT12001("ComBinlogDump for the VTGate handler") + // Check for shutdown before starting a long-lived stream + if c.IsShuttingDown() { + c.MarkForClose() + return sqlerror.NewSQLError(sqlerror.ERServerShutdown, sqlerror.SSNetError, "Server shutdown in progress") + } + + // Track this connection as busy for graceful shutdown + vh.busyConnections.Add(1) + defer vh.busyConnections.Add(-1) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Add call info for observability + ctx = callinfo.MysqlCallInfo(ctx, c) + + // Fill in the ImmediateCallerID with the UserData returned by + // the AuthServer plugin for that user. If nothing was + // returned, use the User. This lets the plugin map a MySQL + // user used for authentication to a Vitess User used for + // Table ACLs and Vitess authentication in general. + im := c.UserData.Get() + ef := callerid.NewEffectiveCallerID( + c.User, /* principal: who */ + c.RemoteAddr().String(), /* component: running client process */ + "VTGate MySQL Connector" /* subcomponent: part of the client */) + ctx = callerid.NewContext(ctx, ef, im) + + // Get the target from the session (set by USE statement or parsed from username during handshake) + session := vh.session(c) + targetString := session.TargetString + + if targetString == "" { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no target specified for binlog dump; use 'USE keyspace:shard@type|alias' or connect with username 'user|keyspace:shard@type|alias'") + } + + // Parse the target string to extract the tablet alias + keyspace, tabletType, dest, tabletAlias, err := topoproto.ParseDestination(targetString, topodatapb.TabletType_UNKNOWN) + if err != nil { + return vterrors.Wrapf(err, "failed to parse target: %s", targetString) + } + if tabletAlias == nil { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "target must include tablet alias (e.g., 'keyspace:shard@type|zone1-100'): %s", targetString) + } + + // Build the target for the tablet connection + var target *querypb.Target + if keyspace != "" { + target = &querypb.Target{ + Keyspace: keyspace, + TabletType: tabletType, + } + if dest != nil { + // Extract shard from destination - need to type assert to get the raw shard name + if ds, ok := dest.(key.DestinationShard); ok { + target.Shard = string(ds) + } else { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "binlog dump requires a specific shard, got: %s", dest.String()) + } + } + } + + // Get the query service connection to the tablet + qs, err := vh.vtg.Gateway().QueryServiceByAlias(ctx, tabletAlias, target) + if err != nil { + return vterrors.Wrapf(err, "failed to get connection to tablet %s", topoproto.TabletAliasString(tabletAlias)) + } + + // Build the BinlogDump request (file/position-based) + request := &binlogdatapb.BinlogDumpRequest{ + BinlogFilename: logFile, + BinlogPosition: binlogPos, + } + if target != nil { + request.Target = target + } + + // Track streaming state for proper error handling. + // If an error occurs mid-message (after sending a max-size packet fragment), + // we can't send a clean error packet - we must just close the connection. + var inProgressMessage bool + var streamingStarted bool + + // Stream binlog packets from the tablet + err = qs.BinlogDump(ctx, request, func(response *binlogdatapb.BinlogDumpResponse) error { + streamingStarted = true + packet := response.Packet + + if err := c.WritePacketDirect(packet); err != nil { + return err + } + + // A packet of exactly MaxPacketSize indicates more fragments follow + inProgressMessage = len(packet) == mysql.MaxPacketSize + return nil + }) + if err != nil { + // If streaming never started, return the error normally so the + // handler framework can send a proper error packet to the client. + if !streamingStarted { + return vterrors.Wrapf(err, "binlog dump failed") + } + + // Streaming started. We need to handle the error carefully. + if inProgressMessage { + // We're mid-message (sent a max-size fragment). We can't send + // a clean error packet since the client is expecting more data. + // Just close the connection. + c.MarkForClose() + log.Errorf("ComBinlogDump: error mid-packet, closing connection: %v", err) + return nil + } + + // At a message boundary - we can send a proper error packet. + if writeErr := c.WriteErrorPacketFromError(err); writeErr != nil { + log.Errorf("ComBinlogDump: failed to write error packet: %v", writeErr) + } + c.MarkForClose() + log.Errorf("ComBinlogDump: %v", err) + return nil + } + + return nil } // ComBinlogDumpGTID is part of the mysql.Handler interface. -func (vh *vtgateHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { - return vterrors.VT12001("ComBinlogDumpGTID for the VTGate handler") +// It handles binlog dump requests by forwarding them to a targeted vttablet. +// The target tablet is determined from the session's TargetString, which can be set via: +// 1. A USE statement (e.g., "USE `keyspace:shard@type|alias`"), or +// 2. The username during connection (format: "user|keyspace:shard@type|alias") +// The target format is "keyspace:shard@tablet_type|tablet_alias" (e.g., "commerce:-80@replica|zone1-100"). +func (vh *vtgateHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet, nonBlock bool) error { + // Check for shutdown before starting a long-lived stream + if c.IsShuttingDown() { + c.MarkForClose() + return sqlerror.NewSQLError(sqlerror.ERServerShutdown, sqlerror.SSNetError, "Server shutdown in progress") + } + + // Track this connection as busy for graceful shutdown + vh.busyConnections.Add(1) + defer vh.busyConnections.Add(-1) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Add call info for observability + ctx = callinfo.MysqlCallInfo(ctx, c) + + // Fill in the ImmediateCallerID with the UserData returned by + // the AuthServer plugin for that user. If nothing was + // returned, use the User. This lets the plugin map a MySQL + // user used for authentication to a Vitess User used for + // Table ACLs and Vitess authentication in general. + im := c.UserData.Get() + ef := callerid.NewEffectiveCallerID( + c.User, /* principal: who */ + c.RemoteAddr().String(), /* component: running client process */ + "VTGate MySQL Connector" /* subcomponent: part of the client */) + ctx = callerid.NewContext(ctx, ef, im) + + // Get the target from the session (set by USE statement or parsed from username during handshake) + session := vh.session(c) + targetString := session.TargetString + + if targetString == "" { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no target specified for binlog dump; use 'USE keyspace:shard@type|alias' or connect with username 'user|keyspace:shard@type|alias'") + } + + // Parse the target string to extract the tablet alias + keyspace, tabletType, dest, tabletAlias, err := topoproto.ParseDestination(targetString, topodatapb.TabletType_UNKNOWN) + if err != nil { + return vterrors.Wrapf(err, "failed to parse target: %s", targetString) + } + if tabletAlias == nil { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "target must include tablet alias (e.g., 'keyspace:shard@type|zone1-100'): %s", targetString) + } + + // Build the target for the tablet connection + var target *querypb.Target + if keyspace != "" { + target = &querypb.Target{ + Keyspace: keyspace, + TabletType: tabletType, + } + if dest != nil { + // Extract shard from destination - need to type assert to get the raw shard name + if ds, ok := dest.(key.DestinationShard); ok { + target.Shard = string(ds) + } else { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "binlog dump requires a specific shard, got: %s", dest.String()) + } + } + } + + // Get the query service connection to the tablet + qs, err := vh.vtg.Gateway().QueryServiceByAlias(ctx, tabletAlias, target) + if err != nil { + return vterrors.Wrapf(err, "failed to get connection to tablet %s", topoproto.TabletAliasString(tabletAlias)) + } + + // Build the BinlogDumpGTID request + request := &binlogdatapb.BinlogDumpGTIDRequest{ + BinlogFilename: logFile, + BinlogPosition: logPos, + NonBlock: nonBlock, + } + if gtidSet != nil { + request.GtidSet = gtidSet.String() + } + if target != nil { + request.Target = target + } + + // TODO: Add support for replication session variables (for Fivetran MySQL adapter compatibility): + // - @master_heartbeat_period / @source_heartbeat_period: Controls heartbeat frequency + // - @master_binlog_checksum / @source_binlog_checksum: Controls checksum algorithm + // Implementation requires: + // 1. Add heartbeat_period_ns and binlog_checksum fields to BinlogDumpRequest proto + // 2. Extract user-defined variables from session.UserDefinedVariables here + // 3. Apply variables in vttablet's BinlogDump before sending COM_BINLOG_DUMP_GTID + // See: https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html + + // Track streaming state for proper error handling. + // If an error occurs mid-message (after sending a max-size packet fragment), + // we can't send a clean error packet - we must just close the connection. + var inProgressMessage bool + var streamingStarted bool + + // Stream binlog packets from the tablet + err = qs.BinlogDumpGTID(ctx, request, func(response *binlogdatapb.BinlogDumpResponse) error { + streamingStarted = true + packet := response.Packet + + if err := c.WritePacketDirect(packet); err != nil { + return err + } + + // A packet of exactly MaxPacketSize indicates more fragments follow + inProgressMessage = len(packet) == mysql.MaxPacketSize + return nil + }) + if err != nil { + // If streaming never started, return the error normally so the + // handler framework can send a proper error packet to the client. + if !streamingStarted { + return vterrors.Wrapf(err, "binlog dump failed") + } + + // Streaming started. We need to handle the error carefully. + if inProgressMessage { + // We're mid-message (sent a max-size fragment). We can't send + // a clean error packet since the client is expecting more data. + // Just close the connection. + c.MarkForClose() + log.Errorf("ComBinlogDumpGTID: error mid-packet, closing connection: %v", err) + return nil + } + + // At a message boundary - we can send a proper error packet. + if writeErr := c.WriteErrorPacketFromError(err); writeErr != nil { + log.Errorf("ComBinlogDumpGTID: failed to write error packet: %v", writeErr) + } + c.MarkForClose() + log.Errorf("ComBinlogDumpGTID: %v", err) + return nil + } + + return nil } // KillConnection closes an open connection by connection ID. diff --git a/go/vt/vtgate/plugin_mysql_server_test.go b/go/vt/vtgate/plugin_mysql_server_test.go index 4891fc3461f..4fccf753506 100644 --- a/go/vt/vtgate/plugin_mysql_server_test.go +++ b/go/vt/vtgate/plugin_mysql_server_test.go @@ -37,9 +37,11 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/trace" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tlstest" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtenv" ) @@ -96,7 +98,7 @@ func (th *testHandler) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos ui return nil } -func (th *testHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { +func (th *testHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet, nonBlock bool) error { return nil } @@ -861,6 +863,121 @@ func TestGracefulShutdown(t *testing.T) { require.True(t, mysqlConn.IsMarkedForClose()) } +func TestComBinlogDumpGTID(t *testing.T) { + // Create executor environment with sandbox connections + executor, sbc1, _, _, _ := createExecutorEnv(t) + + // Create VTGate with the gateway + vtg := newVTGate(executor, executor.resolver, nil, nil, executor.scatterConn.gateway) + + // Get the tablet alias from the sandbox connection + tabletAlias := sbc1.Tablet().Alias + + // Create the vtgate handler + vh := newVtgateHandler(vtg) + th := &testHandler{} + listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0, false) + require.NoError(t, err) + defer listener.Close() + + // Create a connection + mysqlConn := mysql.GetTestServerConn(listener) + mysqlConn.ConnectionID = 1 + mysqlConn.UserData = &mysql.StaticUserData{} + vh.connections[1] = mysqlConn + + t.Run("no target specified", func(t *testing.T) { + // Clear any previous target + vh.session(mysqlConn).TargetString = "" + mysqlConn.User = "testuser" + + err := vh.ComBinlogDumpGTID(mysqlConn, "", 0, nil, false) + require.Error(t, err) + assert.Contains(t, err.Error(), "no target specified") + }) + + t.Run("target from session TargetString", func(t *testing.T) { + // Set up empty responses + sbc1.BinlogDumpError = nil + sbc1.BinlogDumpResponses = []*binlogdatapb.BinlogDumpResponse{} + + // Set the session target (normally set by USE statement or parsed from username) + targetString := "TestExecutor:-20@primary|" + topoproto.TabletAliasString(tabletAlias) + vh.session(mysqlConn).TargetString = targetString + mysqlConn.User = "testuser" + + err := vh.ComBinlogDumpGTID(mysqlConn, "", 0, nil, false) + require.NoError(t, err) + }) + + t.Run("target without tablet alias", func(t *testing.T) { + vh.session(mysqlConn).TargetString = "TestExecutor:-20@primary" + + err := vh.ComBinlogDumpGTID(mysqlConn, "", 0, nil, false) + require.Error(t, err) + assert.Contains(t, err.Error(), "target must include tablet alias") + }) + + t.Run("binlog dump with error from tablet", func(t *testing.T) { + // Set up an error response + sbc1.BinlogDumpError = errors.New("test binlog error") + defer func() { sbc1.BinlogDumpError = nil }() + + targetString := "TestExecutor:-20@primary|" + topoproto.TabletAliasString(tabletAlias) + vh.session(mysqlConn).TargetString = targetString + + err := vh.ComBinlogDumpGTID(mysqlConn, "", 0, nil, false) + require.Error(t, err) + assert.Contains(t, err.Error(), "test binlog error") + }) + + t.Run("binlog dump with empty response succeeds", func(t *testing.T) { + // Reset error and set up empty responses (no events to write) + sbc1.BinlogDumpError = nil + sbc1.BinlogDumpResponses = []*binlogdatapb.BinlogDumpResponse{} + + targetString := "TestExecutor:-20@primary|" + topoproto.TabletAliasString(tabletAlias) + vh.session(mysqlConn).TargetString = targetString + + err := vh.ComBinlogDumpGTID(mysqlConn, "binlog.000001", 4, nil, false) + require.NoError(t, err) + }) + + t.Run("binlog dump with GTID set and empty response", func(t *testing.T) { + // Reset error + sbc1.BinlogDumpError = nil + sbc1.BinlogDumpResponses = []*binlogdatapb.BinlogDumpResponse{} + + targetString := "TestExecutor:-20@primary|" + topoproto.TabletAliasString(tabletAlias) + vh.session(mysqlConn).TargetString = targetString + + gtidSet, err := replication.ParseMysql56GTIDSet("16b1039f-22b6-11ed-b765-0a43f95f28a3:1-100") + require.NoError(t, err) + + err = vh.ComBinlogDumpGTID(mysqlConn, "", 0, gtidSet, false) + require.NoError(t, err) + }) + + t.Run("invalid tablet alias in target", func(t *testing.T) { + vh.session(mysqlConn).TargetString = "TestExecutor:-20@primary|invalid-alias" + + err := vh.ComBinlogDumpGTID(mysqlConn, "", 0, nil, false) + require.Error(t, err) + // The error could be about parsing the alias or not finding the tablet + assert.True(t, strings.Contains(err.Error(), "invalid") || strings.Contains(err.Error(), "not found"), + "Expected error about invalid alias or tablet not found, got: %v", err) + }) + + t.Run("nonexistent tablet alias", func(t *testing.T) { + // Use a valid format but non-existent alias + vh.session(mysqlConn).TargetString = "TestExecutor:-20@primary|aa-9999999" + + err := vh.ComBinlogDumpGTID(mysqlConn, "", 0, nil, false) + require.Error(t, err) + assert.Contains(t, err.Error(), "not found") + }) +} + func TestGracefulShutdownWithTransaction(t *testing.T) { executor, _, _, _, _ := createExecutorEnv(t) diff --git a/go/vt/vttablet/grpcqueryservice/server.go b/go/vt/vttablet/grpcqueryservice/server.go index 44bef54523c..05c3aca647b 100644 --- a/go/vt/vttablet/grpcqueryservice/server.go +++ b/go/vt/vttablet/grpcqueryservice/server.go @@ -385,6 +385,28 @@ func (q *query) VStreamResults(request *binlogdatapb.VStreamResultsRequest, stre return vterrors.ToGRPC(err) } +// BinlogDump is part of the queryservice.QueryServer interface +func (q *query) BinlogDump(request *binlogdatapb.BinlogDumpRequest, stream queryservicepb.Query_BinlogDumpServer) (err error) { + defer q.server.HandlePanic(&err) + ctx := callerid.NewContext(callinfo.GRPCCallInfo(stream.Context()), + request.EffectiveCallerId, + request.ImmediateCallerId, + ) + err = q.server.BinlogDump(ctx, request, stream.Send) + return vterrors.ToGRPC(err) +} + +// BinlogDumpGTID is part of the queryservice.QueryServer interface +func (q *query) BinlogDumpGTID(request *binlogdatapb.BinlogDumpGTIDRequest, stream queryservicepb.Query_BinlogDumpGTIDServer) (err error) { + defer q.server.HandlePanic(&err) + ctx := callerid.NewContext(callinfo.GRPCCallInfo(stream.Context()), + request.EffectiveCallerId, + request.ImmediateCallerId, + ) + err = q.server.BinlogDumpGTID(ctx, request, stream.Send) + return vterrors.ToGRPC(err) +} + // ReserveExecute implements the QueryServer interface func (q *query) ReserveExecute(ctx context.Context, request *querypb.ReserveExecuteRequest) (response *querypb.ReserveExecuteResponse, err error) { defer q.server.HandlePanic(&err) diff --git a/go/vt/vttablet/grpctabletconn/conn.go b/go/vt/vttablet/grpctabletconn/conn.go index 156b4ceae10..345bbf9fa73 100644 --- a/go/vt/vttablet/grpctabletconn/conn.go +++ b/go/vt/vttablet/grpctabletconn/conn.go @@ -857,6 +857,102 @@ func (conn *gRPCQueryClient) VStreamResults(ctx context.Context, target *querypb } } +// BinlogDump streams raw binlog events from MySQL using COM_BINLOG_DUMP (file/position-based). +func (conn *gRPCQueryClient) BinlogDump(ctx context.Context, request *binlogdatapb.BinlogDumpRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := func() (queryservicepb.Query_BinlogDumpClient, error) { + conn.mu.RLock() + defer conn.mu.RUnlock() + if conn.cc == nil { + return nil, tabletconn.ConnClosed + } + + req := &binlogdatapb.BinlogDumpRequest{ + Target: request.Target, + EffectiveCallerId: callerid.EffectiveCallerIDFromContext(ctx), + ImmediateCallerId: callerid.ImmediateCallerIDFromContext(ctx), + BinlogFilename: request.BinlogFilename, + BinlogPosition: request.BinlogPosition, + } + stream, err := conn.c.BinlogDump(ctx, req) + if err != nil { + return nil, tabletconn.ErrorFromGRPC(err) + } + return stream, nil + }() + if err != nil { + return err + } + for { + r, err := stream.Recv() + if err != nil { + return tabletconn.ErrorFromGRPC(err) + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := send(r); err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} + +// BinlogDumpGTID streams raw binlog events from MySQL using COM_BINLOG_DUMP_GTID (GTID-based). +func (conn *gRPCQueryClient) BinlogDumpGTID(ctx context.Context, request *binlogdatapb.BinlogDumpGTIDRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := func() (queryservicepb.Query_BinlogDumpGTIDClient, error) { + conn.mu.RLock() + defer conn.mu.RUnlock() + if conn.cc == nil { + return nil, tabletconn.ConnClosed + } + + req := &binlogdatapb.BinlogDumpGTIDRequest{ + Target: request.Target, + EffectiveCallerId: callerid.EffectiveCallerIDFromContext(ctx), + ImmediateCallerId: callerid.ImmediateCallerIDFromContext(ctx), + BinlogFilename: request.BinlogFilename, + BinlogPosition: request.BinlogPosition, + GtidSet: request.GtidSet, + NonBlock: request.NonBlock, + } + stream, err := conn.c.BinlogDumpGTID(ctx, req) + if err != nil { + return nil, tabletconn.ErrorFromGRPC(err) + } + return stream, nil + }() + if err != nil { + return err + } + for { + r, err := stream.Recv() + if err != nil { + return tabletconn.ErrorFromGRPC(err) + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := send(r); err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} + // HandlePanic is a no-op. func (conn *gRPCQueryClient) HandlePanic(err *error) { } diff --git a/go/vt/vttablet/grpctabletconn/conn_test.go b/go/vt/vttablet/grpctabletconn/conn_test.go index 6f501e0ad90..8eb738d5633 100644 --- a/go/vt/vttablet/grpctabletconn/conn_test.go +++ b/go/vt/vttablet/grpctabletconn/conn_test.go @@ -181,6 +181,16 @@ func (m *mockQueryClient) GetSchema(ctx context.Context, in *querypb.GetSchemaRe return nil, errors.New("A general error") } +func (m *mockQueryClient) BinlogDump(ctx context.Context, in *binlogdatapb.BinlogDumpRequest, opts ...grpc.CallOption) (queryservicepb.Query_BinlogDumpClient, error) { + m.lastCallCtx = ctx + return nil, errors.New("A general error") +} + +func (m *mockQueryClient) BinlogDumpGTID(ctx context.Context, in *binlogdatapb.BinlogDumpGTIDRequest, opts ...grpc.CallOption) (queryservicepb.Query_BinlogDumpGTIDClient, error) { + m.lastCallCtx = ctx + return nil, errors.New("A general error") +} + var _ queryservicepb.QueryClient = (*mockQueryClient)(nil) // TestGoRoutineLeakPrevention tests that after all the RPCs that stream queries, we end up closing the context that was passed to it, to prevent go routines from being leaked. diff --git a/go/vt/vttablet/queryservice/queryservice.go b/go/vt/vttablet/queryservice/queryservice.go index 2faf70226c4..3bf5411c4d3 100644 --- a/go/vt/vttablet/queryservice/queryservice.go +++ b/go/vt/vttablet/queryservice/queryservice.go @@ -114,6 +114,12 @@ type QueryService interface { // VStreamResults streams results along with the gtid of the snapshot. VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error + // BinlogDump streams raw binlog events from MySQL using COM_BINLOG_DUMP (file/position-based). + BinlogDump(ctx context.Context, request *binlogdatapb.BinlogDumpRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error + + // BinlogDumpGTID streams raw binlog events from MySQL using COM_BINLOG_DUMP_GTID (GTID-based). + BinlogDumpGTID(ctx context.Context, request *binlogdatapb.BinlogDumpGTIDRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error + // StreamHealth streams health status. StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error diff --git a/go/vt/vttablet/queryservice/wrapped.go b/go/vt/vttablet/queryservice/wrapped.go index 4964b7b5b91..c0a395a0f43 100644 --- a/go/vt/vttablet/queryservice/wrapped.go +++ b/go/vt/vttablet/queryservice/wrapped.go @@ -341,6 +341,22 @@ func (ws *wrappedService) VStreamResults(ctx context.Context, target *querypb.Ta }) } +func (ws *wrappedService) BinlogDump(ctx context.Context, request *binlogdatapb.BinlogDumpRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + opts := WrapOpts{InTransaction: false} + return ws.wrapper(ctx, request.Target, ws.impl, "BinlogDump", opts, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { + innerErr := conn.BinlogDump(ctx, request, send) + return false, innerErr + }) +} + +func (ws *wrappedService) BinlogDumpGTID(ctx context.Context, request *binlogdatapb.BinlogDumpGTIDRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + opts := WrapOpts{InTransaction: false} + return ws.wrapper(ctx, request.Target, ws.impl, "BinlogDumpGTID", opts, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { + innerErr := conn.BinlogDumpGTID(ctx, request, send) + return false, innerErr + }) +} + func (ws *wrappedService) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { opts := WrapOpts{InTransaction: false} return ws.wrapper(ctx, nil, ws.impl, "StreamHealth", opts, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index 49e057fff07..2e70f03b26a 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -116,6 +116,10 @@ type SandboxConn struct { VStreamCh chan *binlogdatapb.VEvent VStreamEventDelay time.Duration // Any sleep that should be introduced before each event is streamed + // BinlogDump expectations. + BinlogDumpResponses []*binlogdatapb.BinlogDumpResponse + BinlogDumpError error + // transaction id generator TransactionID atomic.Int64 @@ -654,6 +658,32 @@ func (sbc *SandboxConn) VStreamResults(ctx context.Context, target *querypb.Targ return errors.New("not implemented in test") } +// BinlogDump is part of the QueryService interface. +func (sbc *SandboxConn) BinlogDump(ctx context.Context, request *binlogdatapb.BinlogDumpRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + if sbc.BinlogDumpError != nil { + return sbc.BinlogDumpError + } + for _, response := range sbc.BinlogDumpResponses { + if err := send(response); err != nil { + return err + } + } + return nil +} + +// BinlogDumpGTID is part of the QueryService interface. +func (sbc *SandboxConn) BinlogDumpGTID(ctx context.Context, request *binlogdatapb.BinlogDumpGTIDRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + if sbc.BinlogDumpError != nil { + return sbc.BinlogDumpError + } + for _, response := range sbc.BinlogDumpResponses { + if err := send(response); err != nil { + return err + } + } + return nil +} + // QueryServiceByAlias is part of the Gateway interface. func (sbc *SandboxConn) QueryServiceByAlias(_ context.Context, _ *topodatapb.TabletAlias, _ *querypb.Target) (queryservice.QueryService, error) { return sbc, nil diff --git a/go/vt/vttablet/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go index eb22d655e88..a30c97264b4 100644 --- a/go/vt/vttablet/tabletconntest/fakequeryservice.go +++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go @@ -721,6 +721,16 @@ func (f *FakeQueryService) VStreamResults(ctx context.Context, target *querypb.T panic("not implemented") } +// BinlogDump is part of the QueryService interface. +func (f *FakeQueryService) BinlogDump(ctx context.Context, request *binlogdatapb.BinlogDumpRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + panic("not implemented") +} + +// BinlogDumpGTID is part of the QueryService interface. +func (f *FakeQueryService) BinlogDumpGTID(ctx context.Context, request *binlogdatapb.BinlogDumpGTIDRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + panic("not implemented") +} + // QueryServiceByAlias satisfies the Gateway interface func (f *FakeQueryService) QueryServiceByAlias(_ context.Context, _ *topodatapb.TabletAlias, _ *querypb.Target) (queryservice.QueryService, error) { panic("not implemented") diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index dadbfd5d619..15d597768c0 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -32,9 +32,9 @@ import ( "syscall" "time" - "vitess.io/vitess/go/vt/vttablet/tabletserver/querythrottler" - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools/smartconnpool" "vitess.io/vitess/go/sqltypes" @@ -42,6 +42,7 @@ import ( "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/binlog" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -65,6 +66,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/gc" "vitess.io/vitess/go/vt/vttablet/tabletserver/messager" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" + "vitess.io/vitess/go/vt/vttablet/tabletserver/querythrottler" "vitess.io/vitess/go/vt/vttablet/tabletserver/repltracker" "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -1338,6 +1340,137 @@ func (tsv *TabletServer) VStreamResults(ctx context.Context, target *querypb.Tar return tsv.vstreamer.StreamResults(ctx, query, send) } +// BinlogDump streams raw binlog packets from MySQL using COM_BINLOG_DUMP (file/position-based). +// It reads MySQL packets directly and forwards them to the client without parsing. +// Each packet is streamed individually, including zero-length packets that terminate +// multi-packet sequences. This provides true streaming with bounded memory usage. +func (tsv *TabletServer) BinlogDump(ctx context.Context, request *binlogdatapb.BinlogDumpRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + if err := tsv.sm.VerifyTarget(ctx, request.Target); err != nil { + return err + } + + // Create a binlog connection to MySQL + conn, err := binlog.NewBinlogConnection(tsv.config.DB.FilteredWithDB()) + if err != nil { + return vterrors.Wrapf(err, "failed to create binlog connection") + } + defer conn.Close() + + // Send the binlog dump command to MySQL using file/position + if err := conn.SendBinlogDumpCommand(conn.ServerID(), request.BinlogFilename, request.BinlogPosition); err != nil { + return vterrors.Wrapf(err, "failed to send binlog dump command") + } + + return tsv.streamBinlogPackets(ctx, conn, send) +} + +// BinlogDumpGTID streams raw binlog packets from MySQL using COM_BINLOG_DUMP_GTID (GTID-based). +// It reads MySQL packets directly and forwards them to the client without parsing. +// Each packet is streamed individually, including zero-length packets that terminate +// multi-packet sequences. This provides true streaming with bounded memory usage. +func (tsv *TabletServer) BinlogDumpGTID(ctx context.Context, request *binlogdatapb.BinlogDumpGTIDRequest, send func(*binlogdatapb.BinlogDumpResponse) error) error { + if err := tsv.sm.VerifyTarget(ctx, request.Target); err != nil { + return err + } + + // Create a binlog connection to MySQL + conn, err := binlog.NewBinlogConnection(tsv.config.DB.FilteredWithDB()) + if err != nil { + return vterrors.Wrapf(err, "failed to create binlog connection") + } + defer conn.Close() + + // Parse the GTID set from the request + var startPos replication.Position + if request.GtidSet != "" { + gtidSet, err := replication.ParseMysql56GTIDSet(request.GtidSet) + if err != nil { + return vterrors.Wrapf(err, "failed to parse GTID set: %s", request.GtidSet) + } + startPos = replication.Position{GTIDSet: gtidSet} + } else { + // If no GTID set is provided, get the current position from MySQL. + // This means we'll stream events starting from "now". + currentPos, err := conn.PrimaryPosition() + if err != nil { + return vterrors.Wrapf(err, "failed to get current position") + } + startPos = currentPos + } + + // Send the binlog dump command to MySQL using GTID + if err := conn.SendBinlogDumpGTIDCommand(conn.ServerID(), request.BinlogFilename, startPos, request.NonBlock); err != nil { + return vterrors.Wrapf(err, "failed to send binlog dump command") + } + + return tsv.streamBinlogPackets(ctx, conn, send) +} + +// streamBinlogPackets streams binlog packets from the connection to the client. +// This is shared by both BinlogDump and BinlogDumpGTID. +// +// TODO: Optimize for zero-copy streaming using gRPC's mem.BufferSlice. +// Currently, packet data is copied during protobuf marshaling. To eliminate this: +// 1. Use mem.BufferPool to allocate read buffers (ReadOnePacketPooled) +// 2. Return mem.Buffer with reference counting +// 3. Implement BufferSliceMarshaler interface for BinlogDumpResponse +// 4. Update grpc_codec.go to check for BufferSliceMarshaler before vtprotoMessage +// 5. Build BufferSlice with [protobuf header, packet buffer] - no copy needed +// gRPC's mem.Buffer reference counting handles buffer lifecycle automatically. +// See: google.golang.org/grpc/mem and go/vt/servenv/grpc_codec.go +func (tsv *TabletServer) streamBinlogPackets(ctx context.Context, conn *binlog.BinlogConnection, send func(*binlogdatapb.BinlogDumpResponse) error) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Read first fragment of a message + packet, err := conn.ReadOnePacket() + if err != nil { + return vterrors.Wrapf(err, "failed to read binlog packet") + } + + // First fragment must have content + if len(packet) == 0 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected zero-length packet at start of message") + } + + // Send first fragment + if err := send(&binlogdatapb.BinlogDumpResponse{Packet: packet}); err != nil { + return err + } + + // Check status byte on first fragment + isEOF := packet[0] == mysql.EOFPacket + isError := packet[0] == mysql.ErrPacket + + // Read remaining fragments if this is a multi-packet message + for len(packet) == mysql.MaxPacketSize { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + packet, err = conn.ReadOnePacket() + if err != nil { + return vterrors.Wrapf(err, "failed to read continuation packet") + } + + if err := send(&binlogdatapb.BinlogDumpResponse{Packet: packet}); err != nil { + return err + } + } + + // Message complete - check if we should stop + if isEOF || isError { + return nil + } + } +} + // ReserveBeginExecute implements the QueryService interface func (tsv *TabletServer) ReserveBeginExecute(ctx context.Context, session queryservice.Session, target *querypb.Target, settings []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions) (state queryservice.ReservedTransactionState, result *sqltypes.Result, err error) { state, result, err = tsv.beginExecuteWithSettings(ctx, target, settings, postBeginQueries, sql, bindVariables, options) diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index ed1762f78d2..3b2f2c43cfe 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -604,3 +604,48 @@ message VStreamResultsResponse { string gtid = 3; repeated query.Row rows = 4; } + +// BinlogDumpRequest is the payload for COM_BINLOG_DUMP (file/position-based replication). +// This is the older protocol that uses binlog filename and position. +message BinlogDumpRequest { + vtrpc.CallerID effective_caller_id = 1; + query.VTGateCallerID immediate_caller_id = 2; + query.Target target = 3; + + // Binlog filename to start streaming from (e.g., "binlog.000001") + string binlog_filename = 4; + // Position within the binlog file (32-bit for COM_BINLOG_DUMP) + uint32 binlog_position = 5; +} + +// BinlogDumpGTIDRequest is the payload for COM_BINLOG_DUMP_GTID (GTID-based replication). +// This is the newer protocol (MySQL 5.6+) that uses GTID sets. +message BinlogDumpGTIDRequest { + vtrpc.CallerID effective_caller_id = 1; + query.VTGateCallerID immediate_caller_id = 2; + query.Target target = 3; + + // Optional binlog filename (used with BinlogThroughPosition flag) + string binlog_filename = 4; + // Position within the binlog file (64-bit for COM_BINLOG_DUMP_GTID) + uint64 binlog_position = 5; + // GTID set in string format (e.g., "uuid:1-5,uuid2:1-3") + // vttablet will convert to SIDBlock for MySQL + string gtid_set = 6; + + // If true, MySQL will return EOF when it reaches the end of the binlog + // instead of blocking and waiting for new events. This corresponds to + // the BINLOG_DUMP_NON_BLOCK flag (0x01) in COM_BINLOG_DUMP_GTID. + bool non_block = 7; +} + +// BinlogDumpResponse streams raw MySQL packet payloads. +// Used by both BinlogDump and BinlogDumpGTID RPCs. +message BinlogDumpResponse { + // A single raw MySQL packet payload from the binlog stream. + // Packets are streamed exactly as received from MySQL, including + // zero-length packets that terminate multi-packet sequences. + // Only the first packet of a sequence contains the status byte + // (0x00 for event data, 0xFE for EOF, 0xFF for error). + bytes packet = 1; +} diff --git a/proto/queryservice.proto b/proto/queryservice.proto index b987f692289..207eabbc5fa 100644 --- a/proto/queryservice.proto +++ b/proto/queryservice.proto @@ -117,4 +117,10 @@ service Query { // GetSchema returns the schema information. rpc GetSchema(query.GetSchemaRequest) returns (stream query.GetSchemaResponse) {}; + + // BinlogDump streams raw binlog events from MySQL using COM_BINLOG_DUMP (file/position-based). + rpc BinlogDump(binlogdata.BinlogDumpRequest) returns (stream binlogdata.BinlogDumpResponse) {}; + + // BinlogDumpGTID streams raw binlog events from MySQL using COM_BINLOG_DUMP_GTID (GTID-based). + rpc BinlogDumpGTID(binlogdata.BinlogDumpGTIDRequest) returns (stream binlogdata.BinlogDumpResponse) {}; } \ No newline at end of file diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json index 26f0afd9615..339e08d229d 100644 --- a/web/vtadmin/package-lock.json +++ b/web/vtadmin/package-lock.json @@ -18,7 +18,7 @@ "dayjs": "^1.11.7", "downshift": "^7.2.0", "history": "^5.3.0", - "lodash-es": "^4.17.23", + "lodash-es": "^4.17.21", "path-to-regexp": "^8.1.0", "postcss-flexbugs-fixes": "^5.0.2", "postcss-preset-env": "^8.0.1", @@ -11502,9 +11502,9 @@ "license": "MIT" }, "node_modules/lodash-es": { - "version": "4.17.23", - "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.23.tgz", - "integrity": "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg==", + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", "license": "MIT" }, "node_modules/lodash.debounce": { diff --git a/web/vtadmin/package.json b/web/vtadmin/package.json index 656560fcaf7..db6b0bec4dc 100644 --- a/web/vtadmin/package.json +++ b/web/vtadmin/package.json @@ -17,7 +17,7 @@ "dayjs": "^1.11.7", "downshift": "^7.2.0", "history": "^5.3.0", - "lodash-es": "^4.17.23", + "lodash-es": "^4.17.21", "path-to-regexp": "^8.1.0", "postcss-flexbugs-fixes": "^5.0.2", "postcss-preset-env": "^8.0.1", diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index 3d7db5f3430..1706f2b9723 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -42098,6 +42098,357 @@ export namespace binlogdata { */ public static getTypeUrl(typeUrlPrefix?: string): string; } + + /** Properties of a BinlogDumpRequest. */ + interface IBinlogDumpRequest { + + /** BinlogDumpRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); + + /** BinlogDumpRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); + + /** BinlogDumpRequest target */ + target?: (query.ITarget|null); + + /** BinlogDumpRequest binlog_filename */ + binlog_filename?: (string|null); + + /** BinlogDumpRequest binlog_position */ + binlog_position?: (number|null); + } + + /** Represents a BinlogDumpRequest. */ + class BinlogDumpRequest implements IBinlogDumpRequest { + + /** + * Constructs a new BinlogDumpRequest. + * @param [properties] Properties to set + */ + constructor(properties?: binlogdata.IBinlogDumpRequest); + + /** BinlogDumpRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); + + /** BinlogDumpRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); + + /** BinlogDumpRequest target. */ + public target?: (query.ITarget|null); + + /** BinlogDumpRequest binlog_filename. */ + public binlog_filename: string; + + /** BinlogDumpRequest binlog_position. */ + public binlog_position: number; + + /** + * Creates a new BinlogDumpRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns BinlogDumpRequest instance + */ + public static create(properties?: binlogdata.IBinlogDumpRequest): binlogdata.BinlogDumpRequest; + + /** + * Encodes the specified BinlogDumpRequest message. Does not implicitly {@link binlogdata.BinlogDumpRequest.verify|verify} messages. + * @param message BinlogDumpRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: binlogdata.IBinlogDumpRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified BinlogDumpRequest message, length delimited. Does not implicitly {@link binlogdata.BinlogDumpRequest.verify|verify} messages. + * @param message BinlogDumpRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: binlogdata.IBinlogDumpRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a BinlogDumpRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns BinlogDumpRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.BinlogDumpRequest; + + /** + * Decodes a BinlogDumpRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns BinlogDumpRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.BinlogDumpRequest; + + /** + * Verifies a BinlogDumpRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a BinlogDumpRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns BinlogDumpRequest + */ + public static fromObject(object: { [k: string]: any }): binlogdata.BinlogDumpRequest; + + /** + * Creates a plain object from a BinlogDumpRequest message. Also converts values to other types if specified. + * @param message BinlogDumpRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: binlogdata.BinlogDumpRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this BinlogDumpRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for BinlogDumpRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a BinlogDumpGTIDRequest. */ + interface IBinlogDumpGTIDRequest { + + /** BinlogDumpGTIDRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); + + /** BinlogDumpGTIDRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); + + /** BinlogDumpGTIDRequest target */ + target?: (query.ITarget|null); + + /** BinlogDumpGTIDRequest binlog_filename */ + binlog_filename?: (string|null); + + /** BinlogDumpGTIDRequest binlog_position */ + binlog_position?: (number|Long|null); + + /** BinlogDumpGTIDRequest gtid_set */ + gtid_set?: (string|null); + + /** BinlogDumpGTIDRequest non_block */ + non_block?: (boolean|null); + } + + /** Represents a BinlogDumpGTIDRequest. */ + class BinlogDumpGTIDRequest implements IBinlogDumpGTIDRequest { + + /** + * Constructs a new BinlogDumpGTIDRequest. + * @param [properties] Properties to set + */ + constructor(properties?: binlogdata.IBinlogDumpGTIDRequest); + + /** BinlogDumpGTIDRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); + + /** BinlogDumpGTIDRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); + + /** BinlogDumpGTIDRequest target. */ + public target?: (query.ITarget|null); + + /** BinlogDumpGTIDRequest binlog_filename. */ + public binlog_filename: string; + + /** BinlogDumpGTIDRequest binlog_position. */ + public binlog_position: (number|Long); + + /** BinlogDumpGTIDRequest gtid_set. */ + public gtid_set: string; + + /** BinlogDumpGTIDRequest non_block. */ + public non_block: boolean; + + /** + * Creates a new BinlogDumpGTIDRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns BinlogDumpGTIDRequest instance + */ + public static create(properties?: binlogdata.IBinlogDumpGTIDRequest): binlogdata.BinlogDumpGTIDRequest; + + /** + * Encodes the specified BinlogDumpGTIDRequest message. Does not implicitly {@link binlogdata.BinlogDumpGTIDRequest.verify|verify} messages. + * @param message BinlogDumpGTIDRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: binlogdata.IBinlogDumpGTIDRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified BinlogDumpGTIDRequest message, length delimited. Does not implicitly {@link binlogdata.BinlogDumpGTIDRequest.verify|verify} messages. + * @param message BinlogDumpGTIDRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: binlogdata.IBinlogDumpGTIDRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a BinlogDumpGTIDRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns BinlogDumpGTIDRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.BinlogDumpGTIDRequest; + + /** + * Decodes a BinlogDumpGTIDRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns BinlogDumpGTIDRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.BinlogDumpGTIDRequest; + + /** + * Verifies a BinlogDumpGTIDRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a BinlogDumpGTIDRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns BinlogDumpGTIDRequest + */ + public static fromObject(object: { [k: string]: any }): binlogdata.BinlogDumpGTIDRequest; + + /** + * Creates a plain object from a BinlogDumpGTIDRequest message. Also converts values to other types if specified. + * @param message BinlogDumpGTIDRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: binlogdata.BinlogDumpGTIDRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this BinlogDumpGTIDRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for BinlogDumpGTIDRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a BinlogDumpResponse. */ + interface IBinlogDumpResponse { + + /** BinlogDumpResponse packet */ + packet?: (Uint8Array|null); + } + + /** Represents a BinlogDumpResponse. */ + class BinlogDumpResponse implements IBinlogDumpResponse { + + /** + * Constructs a new BinlogDumpResponse. + * @param [properties] Properties to set + */ + constructor(properties?: binlogdata.IBinlogDumpResponse); + + /** BinlogDumpResponse packet. */ + public packet: Uint8Array; + + /** + * Creates a new BinlogDumpResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns BinlogDumpResponse instance + */ + public static create(properties?: binlogdata.IBinlogDumpResponse): binlogdata.BinlogDumpResponse; + + /** + * Encodes the specified BinlogDumpResponse message. Does not implicitly {@link binlogdata.BinlogDumpResponse.verify|verify} messages. + * @param message BinlogDumpResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: binlogdata.IBinlogDumpResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified BinlogDumpResponse message, length delimited. Does not implicitly {@link binlogdata.BinlogDumpResponse.verify|verify} messages. + * @param message BinlogDumpResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: binlogdata.IBinlogDumpResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a BinlogDumpResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns BinlogDumpResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.BinlogDumpResponse; + + /** + * Decodes a BinlogDumpResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns BinlogDumpResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.BinlogDumpResponse; + + /** + * Verifies a BinlogDumpResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a BinlogDumpResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns BinlogDumpResponse + */ + public static fromObject(object: { [k: string]: any }): binlogdata.BinlogDumpResponse; + + /** + * Creates a plain object from a BinlogDumpResponse message. Also converts values to other types if specified. + * @param message BinlogDumpResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: binlogdata.BinlogDumpResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this BinlogDumpResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for BinlogDumpResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } } /** Namespace query. */ diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index df1cdc0f375..e7b97b208b3 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -100206,6 +100206,900 @@ export const binlogdata = $root.binlogdata = (() => { return VStreamResultsResponse; })(); + binlogdata.BinlogDumpRequest = (function() { + + /** + * Properties of a BinlogDumpRequest. + * @memberof binlogdata + * @interface IBinlogDumpRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] BinlogDumpRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] BinlogDumpRequest immediate_caller_id + * @property {query.ITarget|null} [target] BinlogDumpRequest target + * @property {string|null} [binlog_filename] BinlogDumpRequest binlog_filename + * @property {number|null} [binlog_position] BinlogDumpRequest binlog_position + */ + + /** + * Constructs a new BinlogDumpRequest. + * @memberof binlogdata + * @classdesc Represents a BinlogDumpRequest. + * @implements IBinlogDumpRequest + * @constructor + * @param {binlogdata.IBinlogDumpRequest=} [properties] Properties to set + */ + function BinlogDumpRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * BinlogDumpRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof binlogdata.BinlogDumpRequest + * @instance + */ + BinlogDumpRequest.prototype.effective_caller_id = null; + + /** + * BinlogDumpRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof binlogdata.BinlogDumpRequest + * @instance + */ + BinlogDumpRequest.prototype.immediate_caller_id = null; + + /** + * BinlogDumpRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof binlogdata.BinlogDumpRequest + * @instance + */ + BinlogDumpRequest.prototype.target = null; + + /** + * BinlogDumpRequest binlog_filename. + * @member {string} binlog_filename + * @memberof binlogdata.BinlogDumpRequest + * @instance + */ + BinlogDumpRequest.prototype.binlog_filename = ""; + + /** + * BinlogDumpRequest binlog_position. + * @member {number} binlog_position + * @memberof binlogdata.BinlogDumpRequest + * @instance + */ + BinlogDumpRequest.prototype.binlog_position = 0; + + /** + * Creates a new BinlogDumpRequest instance using the specified properties. + * @function create + * @memberof binlogdata.BinlogDumpRequest + * @static + * @param {binlogdata.IBinlogDumpRequest=} [properties] Properties to set + * @returns {binlogdata.BinlogDumpRequest} BinlogDumpRequest instance + */ + BinlogDumpRequest.create = function create(properties) { + return new BinlogDumpRequest(properties); + }; + + /** + * Encodes the specified BinlogDumpRequest message. Does not implicitly {@link binlogdata.BinlogDumpRequest.verify|verify} messages. + * @function encode + * @memberof binlogdata.BinlogDumpRequest + * @static + * @param {binlogdata.IBinlogDumpRequest} message BinlogDumpRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BinlogDumpRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.binlog_filename != null && Object.hasOwnProperty.call(message, "binlog_filename")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.binlog_filename); + if (message.binlog_position != null && Object.hasOwnProperty.call(message, "binlog_position")) + writer.uint32(/* id 5, wireType 0 =*/40).uint32(message.binlog_position); + return writer; + }; + + /** + * Encodes the specified BinlogDumpRequest message, length delimited. Does not implicitly {@link binlogdata.BinlogDumpRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof binlogdata.BinlogDumpRequest + * @static + * @param {binlogdata.IBinlogDumpRequest} message BinlogDumpRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BinlogDumpRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a BinlogDumpRequest message from the specified reader or buffer. + * @function decode + * @memberof binlogdata.BinlogDumpRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {binlogdata.BinlogDumpRequest} BinlogDumpRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BinlogDumpRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.BinlogDumpRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + break; + } + case 2: { + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + break; + } + case 3: { + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + case 4: { + message.binlog_filename = reader.string(); + break; + } + case 5: { + message.binlog_position = reader.uint32(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a BinlogDumpRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof binlogdata.BinlogDumpRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {binlogdata.BinlogDumpRequest} BinlogDumpRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BinlogDumpRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a BinlogDumpRequest message. + * @function verify + * @memberof binlogdata.BinlogDumpRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + BinlogDumpRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.binlog_filename != null && message.hasOwnProperty("binlog_filename")) + if (!$util.isString(message.binlog_filename)) + return "binlog_filename: string expected"; + if (message.binlog_position != null && message.hasOwnProperty("binlog_position")) + if (!$util.isInteger(message.binlog_position)) + return "binlog_position: integer expected"; + return null; + }; + + /** + * Creates a BinlogDumpRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof binlogdata.BinlogDumpRequest + * @static + * @param {Object.} object Plain object + * @returns {binlogdata.BinlogDumpRequest} BinlogDumpRequest + */ + BinlogDumpRequest.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.BinlogDumpRequest) + return object; + let message = new $root.binlogdata.BinlogDumpRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".binlogdata.BinlogDumpRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".binlogdata.BinlogDumpRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".binlogdata.BinlogDumpRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.binlog_filename != null) + message.binlog_filename = String(object.binlog_filename); + if (object.binlog_position != null) + message.binlog_position = object.binlog_position >>> 0; + return message; + }; + + /** + * Creates a plain object from a BinlogDumpRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof binlogdata.BinlogDumpRequest + * @static + * @param {binlogdata.BinlogDumpRequest} message BinlogDumpRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + BinlogDumpRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.binlog_filename = ""; + object.binlog_position = 0; + } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.binlog_filename != null && message.hasOwnProperty("binlog_filename")) + object.binlog_filename = message.binlog_filename; + if (message.binlog_position != null && message.hasOwnProperty("binlog_position")) + object.binlog_position = message.binlog_position; + return object; + }; + + /** + * Converts this BinlogDumpRequest to JSON. + * @function toJSON + * @memberof binlogdata.BinlogDumpRequest + * @instance + * @returns {Object.} JSON object + */ + BinlogDumpRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for BinlogDumpRequest + * @function getTypeUrl + * @memberof binlogdata.BinlogDumpRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + BinlogDumpRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/binlogdata.BinlogDumpRequest"; + }; + + return BinlogDumpRequest; + })(); + + binlogdata.BinlogDumpGTIDRequest = (function() { + + /** + * Properties of a BinlogDumpGTIDRequest. + * @memberof binlogdata + * @interface IBinlogDumpGTIDRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] BinlogDumpGTIDRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] BinlogDumpGTIDRequest immediate_caller_id + * @property {query.ITarget|null} [target] BinlogDumpGTIDRequest target + * @property {string|null} [binlog_filename] BinlogDumpGTIDRequest binlog_filename + * @property {number|Long|null} [binlog_position] BinlogDumpGTIDRequest binlog_position + * @property {string|null} [gtid_set] BinlogDumpGTIDRequest gtid_set + * @property {boolean|null} [non_block] BinlogDumpGTIDRequest non_block + */ + + /** + * Constructs a new BinlogDumpGTIDRequest. + * @memberof binlogdata + * @classdesc Represents a BinlogDumpGTIDRequest. + * @implements IBinlogDumpGTIDRequest + * @constructor + * @param {binlogdata.IBinlogDumpGTIDRequest=} [properties] Properties to set + */ + function BinlogDumpGTIDRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * BinlogDumpGTIDRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof binlogdata.BinlogDumpGTIDRequest + * @instance + */ + BinlogDumpGTIDRequest.prototype.effective_caller_id = null; + + /** + * BinlogDumpGTIDRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof binlogdata.BinlogDumpGTIDRequest + * @instance + */ + BinlogDumpGTIDRequest.prototype.immediate_caller_id = null; + + /** + * BinlogDumpGTIDRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof binlogdata.BinlogDumpGTIDRequest + * @instance + */ + BinlogDumpGTIDRequest.prototype.target = null; + + /** + * BinlogDumpGTIDRequest binlog_filename. + * @member {string} binlog_filename + * @memberof binlogdata.BinlogDumpGTIDRequest + * @instance + */ + BinlogDumpGTIDRequest.prototype.binlog_filename = ""; + + /** + * BinlogDumpGTIDRequest binlog_position. + * @member {number|Long} binlog_position + * @memberof binlogdata.BinlogDumpGTIDRequest + * @instance + */ + BinlogDumpGTIDRequest.prototype.binlog_position = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * BinlogDumpGTIDRequest gtid_set. + * @member {string} gtid_set + * @memberof binlogdata.BinlogDumpGTIDRequest + * @instance + */ + BinlogDumpGTIDRequest.prototype.gtid_set = ""; + + /** + * BinlogDumpGTIDRequest non_block. + * @member {boolean} non_block + * @memberof binlogdata.BinlogDumpGTIDRequest + * @instance + */ + BinlogDumpGTIDRequest.prototype.non_block = false; + + /** + * Creates a new BinlogDumpGTIDRequest instance using the specified properties. + * @function create + * @memberof binlogdata.BinlogDumpGTIDRequest + * @static + * @param {binlogdata.IBinlogDumpGTIDRequest=} [properties] Properties to set + * @returns {binlogdata.BinlogDumpGTIDRequest} BinlogDumpGTIDRequest instance + */ + BinlogDumpGTIDRequest.create = function create(properties) { + return new BinlogDumpGTIDRequest(properties); + }; + + /** + * Encodes the specified BinlogDumpGTIDRequest message. Does not implicitly {@link binlogdata.BinlogDumpGTIDRequest.verify|verify} messages. + * @function encode + * @memberof binlogdata.BinlogDumpGTIDRequest + * @static + * @param {binlogdata.IBinlogDumpGTIDRequest} message BinlogDumpGTIDRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BinlogDumpGTIDRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.binlog_filename != null && Object.hasOwnProperty.call(message, "binlog_filename")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.binlog_filename); + if (message.binlog_position != null && Object.hasOwnProperty.call(message, "binlog_position")) + writer.uint32(/* id 5, wireType 0 =*/40).uint64(message.binlog_position); + if (message.gtid_set != null && Object.hasOwnProperty.call(message, "gtid_set")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.gtid_set); + if (message.non_block != null && Object.hasOwnProperty.call(message, "non_block")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.non_block); + return writer; + }; + + /** + * Encodes the specified BinlogDumpGTIDRequest message, length delimited. Does not implicitly {@link binlogdata.BinlogDumpGTIDRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof binlogdata.BinlogDumpGTIDRequest + * @static + * @param {binlogdata.IBinlogDumpGTIDRequest} message BinlogDumpGTIDRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BinlogDumpGTIDRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a BinlogDumpGTIDRequest message from the specified reader or buffer. + * @function decode + * @memberof binlogdata.BinlogDumpGTIDRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {binlogdata.BinlogDumpGTIDRequest} BinlogDumpGTIDRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BinlogDumpGTIDRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.BinlogDumpGTIDRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + break; + } + case 2: { + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + break; + } + case 3: { + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + case 4: { + message.binlog_filename = reader.string(); + break; + } + case 5: { + message.binlog_position = reader.uint64(); + break; + } + case 6: { + message.gtid_set = reader.string(); + break; + } + case 7: { + message.non_block = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a BinlogDumpGTIDRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof binlogdata.BinlogDumpGTIDRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {binlogdata.BinlogDumpGTIDRequest} BinlogDumpGTIDRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BinlogDumpGTIDRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a BinlogDumpGTIDRequest message. + * @function verify + * @memberof binlogdata.BinlogDumpGTIDRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + BinlogDumpGTIDRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.binlog_filename != null && message.hasOwnProperty("binlog_filename")) + if (!$util.isString(message.binlog_filename)) + return "binlog_filename: string expected"; + if (message.binlog_position != null && message.hasOwnProperty("binlog_position")) + if (!$util.isInteger(message.binlog_position) && !(message.binlog_position && $util.isInteger(message.binlog_position.low) && $util.isInteger(message.binlog_position.high))) + return "binlog_position: integer|Long expected"; + if (message.gtid_set != null && message.hasOwnProperty("gtid_set")) + if (!$util.isString(message.gtid_set)) + return "gtid_set: string expected"; + if (message.non_block != null && message.hasOwnProperty("non_block")) + if (typeof message.non_block !== "boolean") + return "non_block: boolean expected"; + return null; + }; + + /** + * Creates a BinlogDumpGTIDRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof binlogdata.BinlogDumpGTIDRequest + * @static + * @param {Object.} object Plain object + * @returns {binlogdata.BinlogDumpGTIDRequest} BinlogDumpGTIDRequest + */ + BinlogDumpGTIDRequest.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.BinlogDumpGTIDRequest) + return object; + let message = new $root.binlogdata.BinlogDumpGTIDRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".binlogdata.BinlogDumpGTIDRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".binlogdata.BinlogDumpGTIDRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".binlogdata.BinlogDumpGTIDRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.binlog_filename != null) + message.binlog_filename = String(object.binlog_filename); + if (object.binlog_position != null) + if ($util.Long) + (message.binlog_position = $util.Long.fromValue(object.binlog_position)).unsigned = true; + else if (typeof object.binlog_position === "string") + message.binlog_position = parseInt(object.binlog_position, 10); + else if (typeof object.binlog_position === "number") + message.binlog_position = object.binlog_position; + else if (typeof object.binlog_position === "object") + message.binlog_position = new $util.LongBits(object.binlog_position.low >>> 0, object.binlog_position.high >>> 0).toNumber(true); + if (object.gtid_set != null) + message.gtid_set = String(object.gtid_set); + if (object.non_block != null) + message.non_block = Boolean(object.non_block); + return message; + }; + + /** + * Creates a plain object from a BinlogDumpGTIDRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof binlogdata.BinlogDumpGTIDRequest + * @static + * @param {binlogdata.BinlogDumpGTIDRequest} message BinlogDumpGTIDRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + BinlogDumpGTIDRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.binlog_filename = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.binlog_position = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.binlog_position = options.longs === String ? "0" : 0; + object.gtid_set = ""; + object.non_block = false; + } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.binlog_filename != null && message.hasOwnProperty("binlog_filename")) + object.binlog_filename = message.binlog_filename; + if (message.binlog_position != null && message.hasOwnProperty("binlog_position")) + if (typeof message.binlog_position === "number") + object.binlog_position = options.longs === String ? String(message.binlog_position) : message.binlog_position; + else + object.binlog_position = options.longs === String ? $util.Long.prototype.toString.call(message.binlog_position) : options.longs === Number ? new $util.LongBits(message.binlog_position.low >>> 0, message.binlog_position.high >>> 0).toNumber(true) : message.binlog_position; + if (message.gtid_set != null && message.hasOwnProperty("gtid_set")) + object.gtid_set = message.gtid_set; + if (message.non_block != null && message.hasOwnProperty("non_block")) + object.non_block = message.non_block; + return object; + }; + + /** + * Converts this BinlogDumpGTIDRequest to JSON. + * @function toJSON + * @memberof binlogdata.BinlogDumpGTIDRequest + * @instance + * @returns {Object.} JSON object + */ + BinlogDumpGTIDRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for BinlogDumpGTIDRequest + * @function getTypeUrl + * @memberof binlogdata.BinlogDumpGTIDRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + BinlogDumpGTIDRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/binlogdata.BinlogDumpGTIDRequest"; + }; + + return BinlogDumpGTIDRequest; + })(); + + binlogdata.BinlogDumpResponse = (function() { + + /** + * Properties of a BinlogDumpResponse. + * @memberof binlogdata + * @interface IBinlogDumpResponse + * @property {Uint8Array|null} [packet] BinlogDumpResponse packet + */ + + /** + * Constructs a new BinlogDumpResponse. + * @memberof binlogdata + * @classdesc Represents a BinlogDumpResponse. + * @implements IBinlogDumpResponse + * @constructor + * @param {binlogdata.IBinlogDumpResponse=} [properties] Properties to set + */ + function BinlogDumpResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * BinlogDumpResponse packet. + * @member {Uint8Array} packet + * @memberof binlogdata.BinlogDumpResponse + * @instance + */ + BinlogDumpResponse.prototype.packet = $util.newBuffer([]); + + /** + * Creates a new BinlogDumpResponse instance using the specified properties. + * @function create + * @memberof binlogdata.BinlogDumpResponse + * @static + * @param {binlogdata.IBinlogDumpResponse=} [properties] Properties to set + * @returns {binlogdata.BinlogDumpResponse} BinlogDumpResponse instance + */ + BinlogDumpResponse.create = function create(properties) { + return new BinlogDumpResponse(properties); + }; + + /** + * Encodes the specified BinlogDumpResponse message. Does not implicitly {@link binlogdata.BinlogDumpResponse.verify|verify} messages. + * @function encode + * @memberof binlogdata.BinlogDumpResponse + * @static + * @param {binlogdata.IBinlogDumpResponse} message BinlogDumpResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BinlogDumpResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.packet != null && Object.hasOwnProperty.call(message, "packet")) + writer.uint32(/* id 1, wireType 2 =*/10).bytes(message.packet); + return writer; + }; + + /** + * Encodes the specified BinlogDumpResponse message, length delimited. Does not implicitly {@link binlogdata.BinlogDumpResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof binlogdata.BinlogDumpResponse + * @static + * @param {binlogdata.IBinlogDumpResponse} message BinlogDumpResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BinlogDumpResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a BinlogDumpResponse message from the specified reader or buffer. + * @function decode + * @memberof binlogdata.BinlogDumpResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {binlogdata.BinlogDumpResponse} BinlogDumpResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BinlogDumpResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.BinlogDumpResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.packet = reader.bytes(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a BinlogDumpResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof binlogdata.BinlogDumpResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {binlogdata.BinlogDumpResponse} BinlogDumpResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BinlogDumpResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a BinlogDumpResponse message. + * @function verify + * @memberof binlogdata.BinlogDumpResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + BinlogDumpResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.packet != null && message.hasOwnProperty("packet")) + if (!(message.packet && typeof message.packet.length === "number" || $util.isString(message.packet))) + return "packet: buffer expected"; + return null; + }; + + /** + * Creates a BinlogDumpResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof binlogdata.BinlogDumpResponse + * @static + * @param {Object.} object Plain object + * @returns {binlogdata.BinlogDumpResponse} BinlogDumpResponse + */ + BinlogDumpResponse.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.BinlogDumpResponse) + return object; + let message = new $root.binlogdata.BinlogDumpResponse(); + if (object.packet != null) + if (typeof object.packet === "string") + $util.base64.decode(object.packet, message.packet = $util.newBuffer($util.base64.length(object.packet)), 0); + else if (object.packet.length >= 0) + message.packet = object.packet; + return message; + }; + + /** + * Creates a plain object from a BinlogDumpResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof binlogdata.BinlogDumpResponse + * @static + * @param {binlogdata.BinlogDumpResponse} message BinlogDumpResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + BinlogDumpResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + if (options.bytes === String) + object.packet = ""; + else { + object.packet = []; + if (options.bytes !== Array) + object.packet = $util.newBuffer(object.packet); + } + if (message.packet != null && message.hasOwnProperty("packet")) + object.packet = options.bytes === String ? $util.base64.encode(message.packet, 0, message.packet.length) : options.bytes === Array ? Array.prototype.slice.call(message.packet) : message.packet; + return object; + }; + + /** + * Converts this BinlogDumpResponse to JSON. + * @function toJSON + * @memberof binlogdata.BinlogDumpResponse + * @instance + * @returns {Object.} JSON object + */ + BinlogDumpResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for BinlogDumpResponse + * @function getTypeUrl + * @memberof binlogdata.BinlogDumpResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + BinlogDumpResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/binlogdata.BinlogDumpResponse"; + }; + + return BinlogDumpResponse; + })(); + return binlogdata; })();