HN6 (Fetch with an unknown topic)

Hello everyone and thank you for helping check my issue
Currently, I am in HN6 of challenge Kafka, I receive the error about reduced byte after decoding the FetchResponse. It is from bytes 12 to 27 which is the location of the topic ID
The error:


The body creates

func (r *FetchResponse) ToBytes() ([]byte, error) {
	buf := new(bytes.Buffer)
	// tagged_fields
	if err := binary.Write(buf, binary.BigEndian, int8(0)); err != nil {
		return nil, err
	}

	// write throttle_time_ms
	if err := binary.Write(buf, binary.BigEndian, r.ThrottleTimeMs); err != nil {
		return nil, err
	}

	// write error_code
	if err := binary.Write(buf, binary.BigEndian, r.ErrorCode); err != nil {
		return nil, err
	}

	// write session_id
	if err := binary.Write(buf, binary.BigEndian, r.SessionID); err != nil {
		return nil, err
	}

	// write num_of_responses
	if err := binary.Write(buf, binary.BigEndian, int8(len(r.Topics)+1)); err != nil {
		return nil, err
	}

	for _, topic := range r.Topics {
		// write topic_id

		if err := binary.Write(buf, binary.BigEndian, topic.TopicID); err != nil {
			return nil, err
		}

		// write num_of_partitions
		if err := binary.Write(buf, binary.BigEndian, int8(len(topic.Partitions)+1)); err != nil {
			return nil, err
		}

		for _, part := range topic.Partitions {
			// write partition_index
			if err := binary.Write(buf, binary.BigEndian, part.PartitionIndex); err != nil {
				return nil, err
			}

			// write error_code
			if err := binary.Write(buf, binary.BigEndian, part.ErrorCode); err != nil {
				return nil, err
			}

			// // write high_watermark
			// if err := binary.Write(buf, binary.BigEndian, int64(0)); err != nil {
			// 	return nil, err
			// }

			// // write last_stable_offset
			// if err := binary.Write(buf, binary.BigEndian, int64(0)); err != nil {
			// 	return nil, err
			// }

			// // write log_start_offset
			// if err := binary.Write(buf, binary.BigEndian, int64(0)); err != nil {
			// 	return nil, err
			// }

			// write aborted_transactions - length 0
			// if err := binary.Write(buf, binary.BigEndian, int8(1)); err != nil {
			// 	return nil, err
			// }

			// write producer_id
			// if err := binary.Write(buf, binary.BigEndian, int64(0)); err != nil {
			// 	return nil, err
			// }

			// write first_offset
			// if err := binary.Write(buf, binary.BigEndian, int64(0)); err != nil {
			// 	return nil, err
			// }

			// // _tagged_fields
			// if err := binary.Write(buf, binary.BigEndian, int8(0)); err != nil {
			// 	return nil, err
			// }

			// // write preferred_read_replica
			// if err := binary.Write(buf, binary.BigEndian, int32(0)); err != nil {
			// 	return nil, err
			// }

			// // write records_size
			// if err := binary.Write(buf, binary.BigEndian, int8(0)); err != nil {
			// 	return nil, err
			// }

			// _tagged_fields
			if err := binary.Write(buf, binary.BigEndian, int8(0)); err != nil {
				return nil, err
			}
		}

		// tag_buffer 1 byte - end of topic
		if err := binary.Write(buf, binary.BigEndian, int8(0)); err != nil {
			return nil, err
		}
	}

	// tag_buffer
	if err := binary.Write(buf, binary.BigEndian, int8(0)); err != nil {
		return nil, err
	}

	return buf.Bytes(), nil
}

about the topic id uuid, I get it from the request body from buf[23: 23+16]

// parse body
	maxWaitMs := req.body[0:4]
	minBytes := req.body[4:8]
	maxBytes := req.body[8:12]
	isolationLevel := req.body[12:13]
	sessionID := req.body[13:17]
	sessionEpoch := req.body[17:21]
	topicLength := req.body[21:23]
	topicID := req.body[23 : 23+16]

How can I fix the error, please help.
thanks again and sorry if my English wasn’t good

@ntquang98 Could you put your code on GitHub and share a link? It’s easier to debug if I can run your code. :handshake:

1 Like

Hi @andy1li, thanks for replyed. Here is my code codecrafters-kafka/app/server.go at main · ntquang98/codecrafters-kafka · GitHub

@ntquang98 Got it. I’ll take a look on Monday.

1 Like

@ntquang98 I tried running your code, but unfortunately, it couldn’t pass the 2nd stage anymore. Fixing this first might help with the issue in the later stages:

Suggestion: use our CLI to run tests against previous stages like this:

codecrafters test --previous
1 Like

Thank you, I found the problem

1 Like

This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.