PostgreSQL Source Code: src/backend/access/transam/xloginsert.c Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

21

22#ifdef USE_LZ4

23#include <lz4.h>

24#endif

25

26#ifdef USE_ZSTD

27#include <zstd.h>

28#endif

29

42

43

44

45

46

47#ifdef USE_LZ4

48#define LZ4_MAX_BLCKSZ LZ4_COMPRESSBOUND(BLCKSZ)

49#else

50#define LZ4_MAX_BLCKSZ 0

51#endif

52

53#ifdef USE_ZSTD

54#define ZSTD_MAX_BLCKSZ ZSTD_COMPRESSBOUND(BLCKSZ)

55#else

56#define ZSTD_MAX_BLCKSZ 0

57#endif

58

59#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)

60

61

62#define COMPRESS_BUFSIZE Max(Max(PGLZ_MAX_BLCKSZ, LZ4_MAX_BLCKSZ), ZSTD_MAX_BLCKSZ)

63

64

65

66

67

68typedef struct

69{

70 bool in_use;

78

80

81

82 XLogRecData bkp_rdatas[2];

83

84

85

88

92

93

94

95

96

97

101

102

104

105

106

107

108

109

110

111

112

115

116#define SizeOfXlogOrigin (sizeof(RepOriginId) + sizeof(char))

117#define SizeOfXLogTransactionId (sizeof(TransactionId) + sizeof(char))

118

119#define HEADER_SCRATCH_SIZE \

120 (SizeOfXLogRecord + \

121 MaxSizeOfXLogRecordBlockHeader * (XLR_MAX_BLOCK_ID + 1) + \

122 SizeOfXLogRecordDataHeaderLong + SizeOfXlogOrigin + \

123 SizeOfXLogTransactionId)

124

125

126

127

129static int num_rdatas;

131

133

134

136

140 bool *topxid_included);

143

144

145

146

147

148void

150{

154

155

157 elog(ERROR, "cannot make new WAL entries during recovery");

158

160 elog(ERROR, "XLogBeginInsert was already called");

161

163}

164

165

166

167

168

169

170

171

172

173

174void

176{

177 int nbuffers;

178

179

180

181

182

183

184

186

187

192

194 elog(ERROR, "maximum number of WAL record block references exceeded");

195 nbuffers = max_block_id + 1;

196

198 {

201

202

203

204

205

209 }

210

212 {

215 }

216}

217

218

219

220

221void

223{

224 int i;

225

228

235}

236

237

238

239

240

241void

243{

245

246

249

250

251

252

253

254

255

256

257

258

259#ifdef USE_ASSERT_CHECKING

262#endif

263

265 {

267 elog(ERROR, "too many registered buffers");

269 }

270

272

275 regbuf->flags = flags;

278

279

280

281

282

283#ifdef USE_ASSERT_CHECKING

284 {

285 int i;

286

288 {

290

291 if (i == block_id || !regbuf_old->in_use)

292 continue;

293

297 }

298 }

299#endif

300

301 regbuf->in_use = true;

302}

303

304

305

306

307

308void

311{

313

315

318

320 elog(ERROR, "too many registered buffers");

321

323

324 regbuf->rlocator = *rlocator;

325 regbuf->forkno = forknum;

326 regbuf->block = blknum;

327 regbuf->page = page;

328 regbuf->flags = flags;

331

332

333

334

335

336#ifdef USE_ASSERT_CHECKING

337 {

338 int i;

339

341 {

343

344 if (i == block_id || !regbuf_old->in_use)

345 continue;

346

350 }

351 }

352#endif

353

354 regbuf->in_use = true;

355}

356

357

358

359

360

361

362

363void

365{

367

369

376

379

380

381

382

383

384

387

389}

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404void

406{

409

411

412

415 elog(ERROR, "no block with id %d registered with WAL insertion",

416 block_id);

417

418

419

420

421

422

423

429 if (regbuf->rdata_len + len > UINT16_MAX || len > UINT16_MAX)

432 errdetail_internal("Registering more than maximum %u bytes allowed to block %u: current %u bytes, adding %u bytes.",

433 UINT16_MAX, block_id, regbuf->rdata_len, len)));

434

436

439

443}

444

445

446

447

448

449

450

451

452

453

454

455void

457{

460}

461

462

463

464

465

466

467

468

469

470

471

472

475{

477

478

480 elog(ERROR, "XLogBeginInsert was not called");

481

482

483

484

485

489 elog(PANIC, "invalid xlog info mask %02X", info);

490

491 TRACE_POSTGRESQL_WAL_INSERT(rmid, info);

492

493

494

495

496

498 {

501 return EndPos;

502 }

503

504 do

505 {

508 bool topxid_included = false;

511 int num_fpi = 0;

512

513

514

515

516

517

519

521 &fpw_lsn, &num_fpi, &topxid_included);

522

524 topxid_included);

526

528

529 return EndPos;

530}

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

550 XLogRecPtr *fpw_lsn, int *num_fpi, bool *topxid_included)

551{

553 uint64 total_len = 0;

554 int block_id;

560

561

562

563

564

565

566

569

571 rdt_datas_last = &hdr_rdt;

573

574

575

576

577

578

579

582

583

584

585

586

587

590 {

592 bool needs_backup;

593 bool needs_data;

597 bool samerel;

598 bool is_compressed = false;

599 bool include_image;

600

602 continue;

603

604

606 needs_backup = true;

608 needs_backup = false;

610 needs_backup = false;

611 else

612 {

613

614

615

616

617

619

620 needs_backup = (page_lsn <= RedoRecPtr);

621 if (!needs_backup)

622 {

624 *fpw_lsn = page_lsn;

625 }

626 }

627

628

630 needs_data = false;

632 needs_data = true;

633 else

634 needs_data = !needs_backup;

635

636 bkpb.id = block_id;

639

642

643

644

645

646

648

649 if (include_image)

650 {

652 uint16 compressed_len = 0;

653

654

655

656

657

659 {

660

663

667 {

670 }

671 else

672 {

673

676 }

677 }

678 else

679 {

680

683 }

684

685

686

687

689 {

690 is_compressed =

694 &compressed_len);

695 }

696

697

698

699

700

702

703

704 *num_fpi += 1;

705

706

707

708

710 rdt_datas_last = rdt_datas_last->next;

711

713

714

715

716

717

718

719

720 if (needs_backup)

722

723 if (is_compressed)

724 {

725

726 bimg.length = compressed_len;

727

728

730 {

733 break;

734

736#ifdef USE_LZ4

738#else

739 elog(ERROR, "LZ4 is not supported by this build");

740#endif

741 break;

742

744#ifdef USE_ZSTD

746#else

747 elog(ERROR, "zstd is not supported by this build");

748#endif

749 break;

750

752 Assert(false);

753 break;

754

755 }

756

758 rdt_datas_last->len = compressed_len;

759 }

760 else

761 {

763

765 {

766 rdt_datas_last->data = page;

767 rdt_datas_last->len = BLCKSZ;

768 }

769 else

770 {

771

772 rdt_datas_last->data = page;

774

776 rdt_datas_last = rdt_datas_last->next;

777

778 rdt_datas_last->data =

780 rdt_datas_last->len =

782 }

783 }

784

785 total_len += bimg.length;

786 }

787

788 if (needs_data)

789 {

790

791

792

793

795

796

797

798

799

803

806 }

807

809 {

810 samerel = true;

812 }

813 else

814 samerel = false;

815 prev_regbuf = regbuf;

816

817

820 if (include_image)

821 {

824 if (cbimg.hole_length != 0 && is_compressed)

825 {

826 memcpy(scratch, &cbimg,

829 }

830 }

831 if (!samerel)

832 {

835 }

838 }

839

840

843 {

847 }

848

849

851 {

853

854

855 *topxid_included = true;

856

860 }

861

862

864 {

866 {

867 uint32 mainrdata_len_4b;

868

872 errdetail_internal("Main data length is %" PRIu64 " bytes for a maximum of %u bytes.",

875

878 memcpy(scratch, &mainrdata_len_4b, sizeof(uint32));

879 scratch += sizeof(uint32);

880 }

881 else

882 {

885 }

889 }

890 rdt_datas_last->next = NULL;

891

894

895

896

897

898

899

900

901

902

907

908

909

910

911

912

913

914

918 errdetail_internal("WAL record would be %" PRIu64 " bytes (of maximum %u bytes); rmid %u flags %u.",

920

921

922

923

924

925

931 rechdr->xl_crc = rdata_crc;

932

934}

935

936

937

938

939

940

941

942

943static bool

946{

947 int32 orig_len = BLCKSZ - hole_length;

949 int32 extra_bytes = 0;

952

953 if (hole_length != 0)

954 {

955

956 memcpy(tmp.data, page, hole_offset);

957 memcpy(tmp.data + hole_offset,

958 page + (hole_offset + hole_length),

959 BLCKSZ - (hole_length + hole_offset));

961

962

963

964

965

967 }

968 else

970

972 {

975 break;

976

978#ifdef USE_LZ4

979 len = LZ4_compress_default(source, dest, orig_len,

981 if (len <= 0)

982 len = -1;

983#else

984 elog(ERROR, "LZ4 is not supported by this build");

985#endif

986 break;

987

989#ifdef USE_ZSTD

991 ZSTD_CLEVEL_DEFAULT);

992 if (ZSTD_isError(len))

993 len = -1;

994#else

995 elog(ERROR, "zstd is not supported by this build");

996#endif

997 break;

998

1000 Assert(false);

1001 break;

1002

1003 }

1004

1005

1006

1007

1008

1009

1010 if (len >= 0 &&

1011 len + extra_bytes < orig_len)

1012 {

1013 *dlen = (uint16) len;

1014 return true;

1015 }

1016 return false;

1017}

1018

1019

1020

1021

1022

1023

1024

1025

1026bool

1028{

1032

1034

1036

1038 return true;

1039

1040 return false;

1041}

1042

1043

1044

1045

1046

1047

1048

1049

1050

1051

1052

1053

1054

1055

1056

1057

1058

1059

1060

1061

1062

1063

1066{

1070

1071

1072

1073

1075

1076

1077

1078

1080

1081

1082

1083

1084

1085

1086

1088

1090 {

1091 int flags = 0;

1097

1098

1099

1100

1101

1102

1103 if (buffer_std)

1104 {

1105

1109

1110 memcpy(copied_buffer.data, origdata, lower);

1112 }

1113 else

1114 memcpy(copied_buffer.data, origdata, BLCKSZ);

1115

1117

1118 if (buffer_std)

1120

1121 BufferGetTag(buffer, &rlocator, &forkno, &blkno);

1123

1125 }

1126

1127 return recptr;

1128}

1129

1130

1131

1132

1133

1134

1135

1136

1137

1138

1139

1140

1141

1144 Page page, bool page_std)

1145{

1146 int flags;

1148

1150 if (page_std)

1152

1156

1157

1158

1159

1160

1162 {

1164 }

1165

1166 return recptr;

1167}

1168

1169

1170

1171

1172

1173

1174void

1177{

1178 int flags;

1180 int i;

1181 int j;

1182

1184 if (page_std)

1186

1187

1188

1189

1190

1191

1193

1194 i = 0;

1195 while (i < num_pages)

1196 {

1198 int nbatch;

1199

1201

1202 nbatch = 0;

1204 {

1205 XLogRegisterBlock(nbatch, rlocator, forknum, blknos[i], pages[i], flags);

1206 i++;

1207 nbatch++;

1208 }

1209

1211

1213 {

1214

1215

1216

1217

1219 {

1221 }

1222 }

1223 }

1224}

1225

1226

1227

1228

1229

1230

1231

1232

1233

1234

1235

1238{

1243

1244

1246

1247 BufferGetTag(buffer, &rlocator, &forknum, &blkno);

1248

1249 return log_newpage(&rlocator, forknum, blkno, page, page_std);

1250}

1251

1252

1253

1254

1255

1256

1257

1258

1259

1260

1261

1262

1263

1264

1265

1266

1267

1268

1269void

1272 bool page_std)

1273{

1274 int flags;

1276

1278 if (page_std)

1280

1281

1282

1283

1284

1285

1287

1288 blkno = startblk;

1289 while (blkno < endblk)

1290 {

1293 int nbufs;

1294 int i;

1295

1297

1298

1299 nbufs = 0;

1301 {

1304

1306

1307

1308

1309

1310

1311

1313 bufpack[nbufs++] = buf;

1314 else

1316 blkno++;

1317 }

1318

1319

1320 if (nbufs == 0)

1321 break;

1322

1323

1325

1327 for (i = 0; i < nbufs; i++)

1328 {

1331 }

1332

1334

1335 for (i = 0; i < nbufs; i++)

1336 {

1339 }

1341 }

1342}

1343

1344

1345

1346

1347void

1349{

1350#ifdef USE_ASSERT_CHECKING

1351

1352

1353

1354

1355

1356

1357

1358 size_t max_required =

1360

1362#endif

1363

1364

1366 {

1368 "WAL record construction",

1370 }

1371

1373 {

1378 }

1380 {

1384 }

1385

1386

1387

1388

1392}

bool BufferIsExclusiveLocked(Buffer buffer)

void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)

bool BufferIsDirty(Buffer buffer)

XLogRecPtr BufferGetLSNAtomic(Buffer buffer)

void UnlockReleaseBuffer(Buffer buffer)

void MarkBufferDirty(Buffer buffer)

void LockBuffer(Buffer buffer, int mode)

Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)

static Page BufferGetPage(Buffer buffer)

static Block BufferGetBlock(Buffer buffer)

#define BUFFER_LOCK_EXCLUSIVE

PageHeaderData * PageHeader

static bool PageIsNew(const PageData *page)

#define SizeOfPageHeaderData

static void PageSetLSN(Page page, XLogRecPtr lsn)

static XLogRecPtr PageGetLSN(const PageData *page)

#define MemSet(start, val, len)

int errmsg_internal(const char *fmt,...)

int errdetail_internal(const char *fmt,...)

#define ereport(elevel,...)

volatile uint32 CritSectionCount

Assert(PointerIsAligned(start, uint64))

void * MemoryContextAlloc(MemoryContext context, Size size)

void * MemoryContextAllocZero(MemoryContext context, Size size)

void * repalloc(void *pointer, Size size)

MemoryContext TopMemoryContext

#define AllocSetContextCreate

#define ALLOCSET_DEFAULT_SIZES

#define AllocSizeIsValid(size)

#define IsBootstrapProcessingMode()

#define START_CRIT_SECTION()

#define CHECK_FOR_INTERRUPTS()

#define END_CRIT_SECTION()

Datum lower(PG_FUNCTION_ARGS)

Datum upper(PG_FUNCTION_ARGS)

RepOriginId replorigin_session_origin

#define InvalidRepOriginId

#define XLOG_FPI_FOR_HINT

#define COMP_CRC32C(crc, data, len)

const PGLZ_Strategy *const PGLZ_strategy_default

int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy)

static rewind_source * source

#define DELAY_CHKPT_START

struct RelFileLocator RelFileLocator

#define RelFileLocatorEquals(locator1, locator2)

struct XLogRecData * next

XLogRecData bkp_rdatas[2]

char compressed_page[COMPRESS_BUFSIZE]

Datum batch_start(PG_FUNCTION_ARGS)

TransactionId GetTopTransactionIdIfAny(void)

TransactionId GetCurrentTransactionIdIfAny(void)

bool IsSubxactTopXidLogPending(void)

void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)

XLogRecPtr GetRedoRecPtr(void)

static XLogRecPtr RedoRecPtr

XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi, bool topxid_included)

bool XLogInsertAllowed(void)

bool * wal_consistency_checking

#define XLOG_INCLUDE_ORIGIN

#define SizeOfXLogLongPHD

#define InvalidXLogRecPtr

static XLogRecData * mainrdata_head

static bool XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length, void *dest, uint16 *dlen)

static int max_registered_buffers

XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)

static uint8 curinsert_flags

void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)

bool XLogCheckBufferNeedsBackup(Buffer buffer)

void XLogRegisterData(const void *data, uint32 len)

static uint64 mainrdata_len

XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)

static bool begininsert_called

static int max_registered_block_id

XLogRecPtr log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)

void InitXLogInsert(void)

void XLogSetRecordFlags(uint8 flags)

void log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)

void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)

static XLogRecData * mainrdata_last

static MemoryContext xloginsert_cxt

void log_newpage_range(Relation rel, ForkNumber forknum, BlockNumber startblk, BlockNumber endblk, bool page_std)

void XLogResetInsertion(void)

XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)

static XLogRecData hdr_rdt

void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)

static XLogRecData * XLogRecordAssemble(RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi, bool *topxid_included)

static char * hdr_scratch

static XLogRecData * rdatas

void XLogBeginInsert(void)

void XLogEnsureRecordSpace(int max_block_id, int ndatas)

static registered_buffer * registered_buffers

#define HEADER_SCRATCH_SIZE

#define XLR_NORMAL_MAX_BLOCK_ID

#define REGBUF_FORCE_IMAGE

#define XLR_NORMAL_RDATAS

size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len)

#define SizeOfXLogRecordBlockImageHeader

#define XLogRecordMaxSize

#define BKPIMAGE_COMPRESS_ZSTD

#define BKPBLOCK_HAS_DATA

#define BKPIMAGE_HAS_HOLE

#define XLR_BLOCK_ID_DATA_LONG

#define BKPBLOCK_WILL_INIT

#define XLR_RMGR_INFO_MASK

#define BKPIMAGE_COMPRESS_LZ4

#define XLR_BLOCK_ID_TOPLEVEL_XID

#define XLR_BLOCK_ID_DATA_SHORT

#define SizeOfXLogRecordBlockCompressHeader

#define BKPBLOCK_SAME_REL

#define XLR_SPECIAL_REL_UPDATE

#define SizeOfXLogRecordBlockHeader

#define BKPIMAGE_COMPRESS_PGLZ

#define XLR_BLOCK_ID_ORIGIN

#define BKPBLOCK_HAS_IMAGE

#define XLR_CHECK_CONSISTENCY