From patchwork Thu May 6 18:38:25 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lucas Meneghel Rodrigues X-Patchwork-Id: 97426 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter.kernel.org (8.14.3/8.14.3) with ESMTP id o46IcYCX011870 for ; Thu, 6 May 2010 18:38:34 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753252Ab0EFSia (ORCPT ); Thu, 6 May 2010 14:38:30 -0400 Received: from mx1.redhat.com ([209.132.183.28]:7820 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751228Ab0EFSia (ORCPT ); Thu, 6 May 2010 14:38:30 -0400 Received: from int-mx04.intmail.prod.int.phx2.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.17]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o46IcSpL004604 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Thu, 6 May 2010 14:38:28 -0400 Received: from [10.11.11.121] (vpn-11-121.rdu.redhat.com [10.11.11.121]) by int-mx04.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o46IcPAI016125; Thu, 6 May 2010 14:38:26 -0400 Subject: Re: [Autotest] [KVM_AUTOTEST][PATCH] KSM_overcommit: dynamic reserve calculation From: Lucas Meneghel Rodrigues To: =?UTF-8?Q?Luk=C3=A1=C5=A1?= Doktor Cc: KVM list , Autotest mailing list , Jiri Zupka , Jason Wang In-Reply-To: <4BE1DAAA.80508@redhat.com> References: <4BE1DAAA.80508@redhat.com> Date: Thu, 06 May 2010 15:38:25 -0300 Message-ID: <1273171105.15652.138.camel@freedom> Mime-Version: 1.0 X-Scanned-By: MIMEDefang 2.67 on 10.5.11.17 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Thu, 06 May 2010 18:38:35 +0000 (UTC) diff --git a/client/tests/kvm/kvm_vm.py b/client/tests/kvm/kvm_vm.py index 6bc7987..1d83120 100755 --- a/client/tests/kvm/kvm_vm.py +++ b/client/tests/kvm/kvm_vm.py @@ -396,7 +396,7 @@ class VM: # Find available VNC port, if needed if params.get("display") == "vnc": - self.vnc_port = kvm_utils.find_free_port(5900, 6000) + self.vnc_port = kvm_utils.find_free_port(5900, 6100) # Find random UUID if specified 'uuid = random' in config file if params.get("uuid") == "random": diff --git a/client/tests/kvm/scripts/allocator.py b/client/tests/kvm/scripts/allocator.py index 1036893..227745a 100755 --- a/client/tests/kvm/scripts/allocator.py +++ b/client/tests/kvm/scripts/allocator.py @@ -8,10 +8,12 @@ Auxiliary script used to allocate memory on guests. """ -import os, array, sys, struct, random, copy, inspect, tempfile, datetime +import os, array, sys, struct, random, copy, inspect, tempfile, datetime, math PAGE_SIZE = 4096 # machine page size +TMPFS_OVERHEAD = 0.0022 # overhead on 1MB of write data + ------------------------------------------------------------------------------- Cool, how did you guys get to this constant? ------------------------------------------------------------------------------- class MemFill(object): """ @@ -32,7 +34,8 @@ class MemFill(object): self.tmpdp = tempfile.mkdtemp() ret_code = os.system("mount -o size=%dM tmpfs %s -t tmpfs" % - ((mem + 25), self.tmpdp)) + ((mem+math.ceil(mem*TMPFS_OVERHEAD)), + self.tmpdp)) if ret_code != 0: if os.getuid() != 0: print ("FAIL: Unable to mount tmpfs " @@ -42,7 +45,7 @@ class MemFill(object): else: self.f = tempfile.TemporaryFile(prefix='mem', dir=self.tmpdp) self.allocate_by = 'L' - self.npages = (mem * 1024 * 1024) / PAGE_SIZE + self.npages = ((mem * 1024 * 1024) / PAGE_SIZE) self.random_key = random_key self.static_value = static_value print "PASS: Initialization" @@ -83,7 +86,7 @@ class MemFill(object): @return: return array of bytes size PAGE_SIZE. """ a = array.array("B") - for i in range(PAGE_SIZE / a.itemsize): + for i in range((PAGE_SIZE / a.itemsize)): try: a.append(value) except: diff --git a/client/tests/kvm/tests/ksm_overcommit.py b/client/tests/kvm/tests/ksm_overcommit.py index 2dd46c4..31d5c61 100644 --- a/client/tests/kvm/tests/ksm_overcommit.py +++ b/client/tests/kvm/tests/ksm_overcommit.py @@ -142,6 +142,10 @@ def run_ksm_overcommit(test, params, env): session = None vm = None for i in range(1, vmsc): + # Check VMs + for j in range(0, vmsc): + if not lvms[i].is_alive: + raise error.TestFail("one of other VMs is death") ------------------------------------------------------------------------------- The message above should be along the lines e_msg = "VM %s died while executing static_random_fill on allocator loop" % i ------------------------------------------------------------------------------- vm = lvms[i] session = lsessions[i] a_cmd = "mem.static_random_fill()" @@ -154,6 +158,8 @@ def run_ksm_overcommit(test, params, env): logging.debug("Watching host memory while filling vm %s memory", vm.name) while not out.startswith("PASS") and not out.startswith("FAIL"): + if not vm.is_alive(): + raise error.TestFail("VM is death") ------------------------------------------------------------------------------- The message above also should be along the lines e_msg = "VM %s died while executing static_random_fill on allocator loop" % i ------------------------------------------------------------------------------- free_mem = int(utils.read_from_meminfo("MemFree")) if (ksm_swap): free_mem = (free_mem + @@ -202,7 +208,7 @@ def run_ksm_overcommit(test, params, env): # Verify last machine with randomly generated memory a_cmd = "mem.static_random_verify()" - _execute_allocator(a_cmd, lvms[last_vm], session, + _execute_allocator(a_cmd, lvms[last_vm], lsessions[last_vm], (mem / 200 * 50 * perf_ratio)) logging.debug(kvm_test_utils.get_memory_info([lvms[last_vm]])) @@ -338,12 +344,29 @@ def run_ksm_overcommit(test, params, env): # Main test code logging.info("Starting phase 0: Initialization") + # host_reserve: mem reserve kept for the host system to run - host_reserve = int(params.get("ksm_host_reserve", 512)) + host_reserve = int(params.get("ksm_host_reserve", -1)) + if (host_reserve == -1): + # default host_reserve = MemAvailable + one_minimal_guest(128MB) + # later we add 64MB per additional guest + host_reserve = ((utils.memtotal() - utils.read_from_meminfo("MemFree")) + / 1024 + 128) + # using default reserve + _host_reserve = 1 ------------------------------------------------------------------------------- Be consistent above and assign True instead of 1 ------------------------------------------------------------------------------- + else: + _host_reserve = False + # guest_reserve: mem reserve kept to avoid guest OS to kill processes - guest_reserve = int(params.get("ksm_guest_reserve", 1024)) - logging.debug("Memory reserved for host to run: %d", host_reserve) - logging.debug("Memory reserved for guest to run: %d", guest_reserve) + guest_reserve = int(params.get("ksm_guest_reserve", -1)) + if (guest_reserve == -1): + # default guest_reserve = minimal_system_mem(256MB) + # later we add tmpfs overhead + guest_reserve = 256 + # using default reserve + _guest_reserve = True + else: + _guest_reserve = False max_vms = int(params.get("max_vms", 2)) overcommit = float(params.get("ksm_overcommit_ratio", 2.0)) @@ -355,6 +378,10 @@ def run_ksm_overcommit(test, params, env): if (params['ksm_mode'] == "serial"): max_alloc = vmsc + if _host_reserve: + # First round of additional guest reserves + host_reserve += vmsc * 64 + _host_reserve = vmsc host_mem = (int(utils.memtotal()) / 1024 - host_reserve) @@ -402,6 +429,10 @@ def run_ksm_overcommit(test, params, env): if mem - guest_reserve - 1 > 3100: vmsc = int(math.ceil((host_mem * overcommit) / (3100 + guest_reserve))) + if _host_reserve: + host_reserve += (vmsc - _host_reserve) * 64 + host_mem -= (vmsc - _host_reserve) * 64 + _host_reserve = vmsc mem = int(math.floor(host_mem * overcommit / vmsc)) if os.popen("uname -i").readline().startswith("i386"): @@ -410,8 +441,19 @@ def run_ksm_overcommit(test, params, env): if mem > 3100 - 64: vmsc = int(math.ceil((host_mem * overcommit) / (3100 - 64.0))) + if _host_reserve: + host_reserve += (vmsc - _host_reserve) * 64 + host_mem -= (vmsc - _host_reserve) * 64 + _host_reserve = vmsc mem = int(math.floor(host_mem * overcommit / vmsc)) + # 0.055 represents OS + TMPFS additional reserve per guest ram MB + if _guest_reserve: + guest_reserve += math.ceil(mem * 0.055) + + logging.debug("Memory reserved for host to run: %d", host_reserve) + logging.debug("Memory reserved for guest to run: %d", guest_reserve) + logging.debug("Checking KSM status...") ksm_flag = 0 for line in os.popen('ksmctl info').readlines(): diff --git a/client/tests/kvm/tests_base.cfg.sample b/client/tests/kvm/tests_base.cfg.sample index ee83ac2..d3a5982 100644 --- a/client/tests/kvm/tests_base.cfg.sample +++ b/client/tests/kvm/tests_base.cfg.sample @@ -302,9 +302,9 @@ variants: ksm_overcommit_ratio = 3 # Max paralel runs machine ksm_parallel_ratio = 4 - # Host memory reserve - ksm_host_reserve = 512 - ksm_guest_reserve = 1024 + # Host memory reserve (default - best fit for used mem) + # ksm_host_reserve = 512 + # ksm_guest_reserve = 1024 variants: - ksm_serial: ksm_mode = "serial"