LLVM: llvm::DecoderUInt128 Class Reference (original) (raw)

#include "[Target/AMDGPU/Disassembler/AMDGPUDisassembler.h](AMDGPUDisassembler%5F8h%5Fsource.html)"

Public Member Functions
DecoderUInt128 ()=default
DecoderUInt128 (uint64_t Lo, uint64_t Hi=0)
operator bool () const
void insertBits (uint64_t SubBits, unsigned BitPosition, unsigned NumBits)
uint64_t extractBitsAsZExtValue (unsigned NumBits, unsigned BitPosition) const
DecoderUInt128 operator& (const DecoderUInt128 &RHS) const
DecoderUInt128 operator& (const uint64_t &RHS) const
DecoderUInt128 operator~ () const
bool operator== (const DecoderUInt128 &RHS)
bool operator!= (const DecoderUInt128 &RHS)
bool operator!= (const int &RHS)

Definition at line 37 of file AMDGPUDisassembler.h.

llvm::DecoderUInt128::DecoderUInt128 ( ) default

DecoderUInt128() [2/2]

extractBitsAsZExtValue()

insertBits()

operator bool()

llvm::DecoderUInt128::operator bool ( ) const inline

operator!=() [1/2]

operator!=() [2/2]

bool llvm::DecoderUInt128::operator!= ( const int & RHS) inline

operator&() [1/2]

operator&() [2/2]

operator==()

operator~()

operator<<


The documentation for this class was generated from the following file: