Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(219)

Side by Side Diff: client/tests/kvm/scripts/hugepage.py

Issue 6246035: Merge remote branch 'cros/upstream' into master (Closed) Base URL: ssh://git@gitrw.chromium.org:9222/autotest.git@master
Patch Set: patch Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 import os, sys, time
4
5 """
6 Simple script to allocate enough hugepages for KVM testing purposes.
7 """
8
9 class HugePageError(Exception):
10 """
11 Simple wrapper for the builtin Exception class.
12 """
13 pass
14
15
16 class HugePage:
17 def __init__(self, hugepage_path=None):
18 """
19 Gets environment variable values and calculates the target number
20 of huge memory pages.
21
22 @param hugepage_path: Path where to mount hugetlbfs path, if not
23 yet configured.
24 """
25 self.vms = len(os.environ['KVM_TEST_vms'].split())
26 self.mem = int(os.environ['KVM_TEST_mem'])
27 try:
28 self.max_vms = int(os.environ['KVM_TEST_max_vms'])
29 except KeyError:
30 self.max_vms = 0
31
32 if hugepage_path:
33 self.hugepage_path = hugepage_path
34 else:
35 self.hugepage_path = '/mnt/kvm_hugepage'
36
37 self.hugepage_size = self.get_hugepage_size()
38 self.target_hugepages = self.get_target_hugepages()
39 print "Number of VMs this test will use: %d" % self.vms
40 print "Amount of memory used by each vm: %s" % self.mem
41 print ("System setting for large memory page size: %s" %
42 self.hugepage_size)
43 print ("Number of large memory pages needed for this test: %s" %
44 self.target_hugepages)
45
46
47 def get_hugepage_size(self):
48 """
49 Get the current system setting for huge memory page size.
50 """
51 meminfo = open('/proc/meminfo', 'r').readlines()
52 huge_line_list = [h for h in meminfo if h.startswith("Hugepagesize")]
53 try:
54 return int(huge_line_list[0].split()[1])
55 except ValueError, e:
56 raise HugePageError("Could not get huge page size setting from "
57 "/proc/meminfo: %s" % e)
58
59
60 def get_target_hugepages(self):
61 """
62 Calculate the target number of hugepages for testing purposes.
63 """
64 if self.vms < self.max_vms:
65 self.vms = self.max_vms
66 # memory of all VMs plus qemu overhead of 64MB per guest
67 vmsm = (self.vms * self.mem) + (self.vms * 64)
68 return int(vmsm * 1024 / self.hugepage_size)
69
70
71 def set_hugepages(self):
72 """
73 Sets the hugepage limit to the target hugepage value calculated.
74 """
75 hugepage_cfg = open("/proc/sys/vm/nr_hugepages", "r+")
76 hp = hugepage_cfg.readline()
77 while int(hp) < self.target_hugepages:
78 loop_hp = hp
79 hugepage_cfg.write(str(self.target_hugepages))
80 hugepage_cfg.flush()
81 hugepage_cfg.seek(0)
82 hp = int(hugepage_cfg.readline())
83 if loop_hp == hp:
84 raise HugePageError("Cannot set the kernel hugepage setting "
85 "to the target value of %d hugepages." %
86 self.target_hugepages)
87 hugepage_cfg.close()
88 print ("Successfuly set %s large memory pages on host " %
89 self.target_hugepages)
90
91
92 def mount_hugepage_fs(self):
93 """
94 Verify if there's a hugetlbfs mount set. If there's none, will set up
95 a hugetlbfs mount using the class attribute that defines the mount
96 point.
97 """
98 if not os.path.ismount(self.hugepage_path):
99 if not os.path.isdir(self.hugepage_path):
100 os.makedirs(self.hugepage_path)
101 cmd = "mount -t hugetlbfs none %s" % self.hugepage_path
102 if os.system(cmd):
103 raise HugePageError("Cannot mount hugetlbfs path %s" %
104 self.hugepage_path)
105
106
107 def setup(self):
108 self.set_hugepages()
109 self.mount_hugepage_fs()
110
111
112 if __name__ == "__main__":
113 if len(sys.argv) < 2:
114 huge_page = HugePage()
115 else:
116 huge_page = HugePage(sys.argv[1])
117
118 huge_page.setup()
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698