bpo-34060: Report system load when running test suite for Windows (GH… · python/cpython@e16467a (original) (raw)

``

1

`+

import subprocess

`

``

2

`+

import sys

`

``

3

`+

import os

`

``

4

`+

import _winapi

`

``

5

`+

import msvcrt

`

``

6

`+

import uuid

`

``

7

`+

from test import support

`

``

8

+

``

9

+

``

10

`+

Max size of asynchronous reads

`

``

11

`+

BUFSIZE = 8192

`

``

12

`+

Exponential damping factor (see below)

`

``

13

`+

LOAD_FACTOR_1 = 0.9200444146293232478931553241

`

``

14

`+

Seconds per measurement

`

``

15

`+

SAMPLING_INTERVAL = 5

`

``

16

`+

COUNTER_NAME = r'\System\Processor Queue Length'

`

``

17

+

``

18

+

``

19

`+

class WindowsLoadTracker():

`

``

20

`+

"""

`

``

21

`` +

This class asynchronously interacts with the typeperf command to read

``

``

22

`+

the system load on Windows. Mulitprocessing and threads can't be used

`

``

23

`+

here because they interfere with the test suite's cases for those

`

``

24

`+

modules.

`

``

25

`+

"""

`

``

26

+

``

27

`+

def init(self):

`

``

28

`+

self.load = 0.0

`

``

29

`+

self.start()

`

``

30

+

``

31

`+

def start(self):

`

``

32

`+

Create a named pipe which allows for asynchronous IO in Windows

`

``

33

`+

pipe_name = r'\.\pipe\typeperf_output_' + str(uuid.uuid4())

`

``

34

+

``

35

`+

open_mode = _winapi.PIPE_ACCESS_INBOUND

`

``

36

`+

open_mode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE

`

``

37

`+

open_mode |= _winapi.FILE_FLAG_OVERLAPPED

`

``

38

+

``

39

`+

This is the read end of the pipe, where we will be grabbing output

`

``

40

`+

self.pipe = _winapi.CreateNamedPipe(

`

``

41

`+

pipe_name, open_mode, _winapi.PIPE_WAIT,

`

``

42

`+

1, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL

`

``

43

`+

)

`

``

44

`+

The write end of the pipe which is passed to the created process

`

``

45

`+

pipe_write_end = _winapi.CreateFile(

`

``

46

`+

pipe_name, _winapi.GENERIC_WRITE, 0, _winapi.NULL,

`

``

47

`+

_winapi.OPEN_EXISTING, 0, _winapi.NULL

`

``

48

`+

)

`

``

49

`+

Open up the handle as a python file object so we can pass it to

`

``

50

`+

subprocess

`

``

51

`+

command_stdout = msvcrt.open_osfhandle(pipe_write_end, 0)

`

``

52

+

``

53

`+

Connect to the read end of the pipe in overlap/async mode

`

``

54

`+

overlap = _winapi.ConnectNamedPipe(self.pipe, overlapped=True)

`

``

55

`+

overlap.GetOverlappedResult(True)

`

``

56

+

``

57

`+

Spawn off the load monitor

`

``

58

`+

command = ['typeperf', COUNTER_NAME, '-si', str(SAMPLING_INTERVAL)]

`

``

59

`+

self.p = subprocess.Popen(command, stdout=command_stdout, cwd=support.SAVEDCWD)

`

``

60

+

``

61

`+

Close our copy of the write end of the pipe

`

``

62

`+

os.close(command_stdout)

`

``

63

+

``

64

`+

def del(self):

`

``

65

`+

self.p.kill()

`

``

66

`+

self.p.wait()

`

``

67

+

``

68

`+

def read_output(self):

`

``

69

`+

import _winapi

`

``

70

+

``

71

`+

overlapped, _ = _winapi.ReadFile(self.pipe, BUFSIZE, True)

`

``

72

`+

bytes_read, res = overlapped.GetOverlappedResult(False)

`

``

73

`+

if res != 0:

`

``

74

`+

return

`

``

75

+

``

76

`+

return overlapped.getbuffer().decode()

`

``

77

+

``

78

`+

def getloadavg(self):

`

``

79

`+

typeperf_output = self.read_output()

`

``

80

`+

Nothing to update, just return the current load

`

``

81

`+

if not typeperf_output:

`

``

82

`+

return self.load

`

``

83

+

``

84

`+

Process the backlog of load values

`

``

85

`+

for line in typeperf_output.splitlines():

`

``

86

`+

typeperf outputs in a CSV format like this:

`

``

87

`+

"07/19/2018 01:32:26.605","3.000000"

`

``

88

`+

toks = line.split(',')

`

``

89

`+

Ignore blank lines and the initial header

`

``

90

`+

if line.strip() == '' or (COUNTER_NAME in line) or len(toks) != 2:

`

``

91

`+

continue

`

``

92

+

``

93

`+

load = float(toks[1].replace('"', ''))

`

``

94

`+

We use an exponentially weighted moving average, imitating the

`

``

95

`+

load calculation on Unix systems.

`

``

96

`+

https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation

`

``

97

`+

new_load = self.load * LOAD_FACTOR_1 + load * (1.0 - LOAD_FACTOR_1)

`

``

98

`+

self.load = new_load

`

``

99

+

``

100

`+

return self.load

`