BLD: use unsigned instead of signed for lengths, avoid build warnings… · pandas-dev/pandas@2b9b58d (original) (raw)
`@@ -71,9 +71,9 @@ static void free_if_not_null(void **ptr) {
`
71
71
``
72
72
`*/
`
73
73
``
74
``
`-
static void *grow_buffer(void *buffer, int64_t length, int64_t *capacity,
`
``
74
`+
static void *grow_buffer(void *buffer, uint64_t length, uint64_t *capacity,
`
75
75
`int64_t space, int64_t elsize, int *error) {
`
76
``
`-
int64_t cap = *capacity;
`
``
76
`+
uint64_t cap = *capacity;
`
77
77
`void *newbuffer = buffer;
`
78
78
``
79
79
`// Can we fit potentially nbytes tokens (+ null terminators) in the stream?
`
`@@ -248,7 +248,7 @@ void parser_del(parser_t *self) {
`
248
248
`}
`
249
249
``
250
250
`static int make_stream_space(parser_t *self, size_t nbytes) {
`
251
``
`-
int64_t i, cap, length;
`
``
251
`+
uint64_t i, cap, length;
`
252
252
`int status;
`
253
253
`void *orig_ptr, *newptr;
`
254
254
``
`@@ -263,7 +263,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) {
`
263
263
` ("\n\nmake_stream_space: nbytes = %zu. grow_buffer(self->stream...)\n",
`
264
264
`nbytes))
`
265
265
`self->stream = (char *)grow_buffer((void *)self->stream, self->stream_len,
`
266
``
`-
(int64_t*)&self->stream_cap, nbytes * 2,
`
``
266
`+
&self->stream_cap, nbytes * 2,
`
267
267
`sizeof(char), &status);
`
268
268
`TRACE(
`
269
269
` ("make_stream_space: self->stream=%p, self->stream_len = %zu, "
`
`@@ -305,7 +305,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) {
`
305
305
``
306
306
`self->words =
`
307
307
` (char **)grow_buffer((void *)self->words, length,
`
308
``
`-
(int64_t*)&self->words_cap, nbytes,
`
``
308
`+
&self->words_cap, nbytes,
`
309
309
`sizeof(char *), &status);
`
310
310
`TRACE(
`
311
311
` ("make_stream_space: grow_buffer(self->self->words, %zu, %zu, %zu, "
`
`@@ -336,7 +336,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) {
`
336
336
`cap = self->lines_cap;
`
337
337
`self->line_start =
`
338
338
` (int64_t *)grow_buffer((void *)self->line_start, self->lines + 1,
`
339
``
`-
(int64_t*)&self->lines_cap, nbytes,
`
``
339
`+
&self->lines_cap, nbytes,
`
340
340
`sizeof(int64_t), &status);
`
341
341
`TRACE((
`
342
342
`"make_stream_space: grow_buffer(self->line_start, %zu, %zu, %zu, %d)\n",
`
`@@ -471,7 +471,7 @@ static int end_line(parser_t *self) {
`
471
471
`return 0;
`
472
472
` }
`
473
473
``
474
``
`-
if (!(self->lines <= (int64_t) self->header_end + 1) &&
`
``
474
`+
if (!(self->lines <= self->header_end + 1) &&
`
475
475
` (self->expected_fields < 0 && fields > ex_fields) && !(self->usecols)) {
`
476
476
`// increment file line count
`
477
477
`self->file_lines++;
`
`@@ -507,7 +507,7 @@ static int end_line(parser_t *self) {
`
507
507
` }
`
508
508
` } else {
`
509
509
`// missing trailing delimiters
`
510
``
`-
if ((self->lines >= (int64_t) self->header_end + 1) &&
`
``
510
`+
if ((self->lines >= self->header_end + 1) &&
`
511
511
`fields < ex_fields) {
`
512
512
`// might overrun the buffer when closing fields
`
513
513
`if (make_stream_space(self, ex_fields - fields) < 0) {
`
`@@ -651,7 +651,7 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes) {
`
651
651
` stream = self->stream + self->stream_len; \
`
652
652
` slen = self->stream_len; \
`
653
653
` self->state = STATE; \
`
654
``
`-
if (line_limit > 0 && self->lines == start_lines + (int64_t)line_limit) { \
`
``
654
`+
if (line_limit > 0 && self->lines == start_lines + line_limit) { \
`
655
655
` goto linelimit; \
`
656
656
` }
`
657
657
``
`@@ -666,7 +666,7 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes) {
`
666
666
` stream = self->stream + self->stream_len; \
`
667
667
` slen = self->stream_len; \
`
668
668
` self->state = STATE; \
`
669
``
`-
if (line_limit > 0 && self->lines == start_lines + (int64_t)line_limit) { \
`
``
669
`+
if (line_limit > 0 && self->lines == start_lines + line_limit) { \
`
670
670
` goto linelimit; \
`
671
671
` }
`
672
672
``
`@@ -737,7 +737,8 @@ int skip_this_line(parser_t *self, int64_t rownum) {
`
737
737
``
738
738
`int tokenize_bytes(parser_t *self,
`
739
739
`size_t line_limit, int64_t start_lines) {
`
740
``
`-
int64_t i, slen;
`
``
740
`+
int64_t i;
`
``
741
`+
uint64_t slen;
`
741
742
`int should_skip;
`
742
743
`char c;
`
743
744
`char *stream;
`
`@@ -1203,7 +1204,8 @@ static int parser_handle_eof(parser_t *self) {
`
1203
1204
`}
`
1204
1205
``
1205
1206
`int parser_consume_rows(parser_t *self, size_t nrows) {
`
1206
``
`-
int64_t i, offset, word_deletions, char_count;
`
``
1207
`+
int64_t offset, word_deletions;
`
``
1208
`+
uint64_t char_count, i;
`
1207
1209
``
1208
1210
`if (nrows > self->lines) {
`
1209
1211
`nrows = self->lines;
`
`@@ -1229,6 +1231,8 @@ int parser_consume_rows(parser_t *self, size_t nrows) {
`
1229
1231
`self->stream_len -= char_count;
`
1230
1232
``
1231
1233
`/* move token metadata */
`
``
1234
`+
// Note: We should always have words_len < word_deletions, so this
`
``
1235
`+
// subtraction will remain appropriately-typed.
`
1232
1236
`for (i = 0; i < self->words_len - word_deletions; ++i) {
`
1233
1237
`offset = i + word_deletions;
`
1234
1238
``
`@@ -1242,6 +1246,8 @@ int parser_consume_rows(parser_t *self, size_t nrows) {
`
1242
1246
`self->word_start -= char_count;
`
1243
1247
``
1244
1248
`/* move line metadata */
`
``
1249
`+
// Note: We should always have self->lines - nrows + 1 >= 0, so this
`
``
1250
`+
// subtraction will remain appropriately-typed.
`
1245
1251
`for (i = 0; i < self->lines - nrows + 1; ++i) {
`
1246
1252
`offset = i + nrows;
`
1247
1253
`self->line_start[i] = self->line_start[offset] - word_deletions;
`
`@@ -1265,7 +1271,7 @@ int parser_trim_buffers(parser_t *self) {
`
1265
1271
`size_t new_cap;
`
1266
1272
`void *newptr;
`
1267
1273
``
1268
``
`-
int64_t i;
`
``
1274
`+
uint64_t i;
`
1269
1275
``
1270
1276
`/**
`
1271
1277
` * Before we free up space and trim, we should
`