diff mbox

[KVM-AUTOTEST] Adding iperf test

Message ID 1245187799-7757-1-git-send-email-lmr@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Lucas Meneghel Rodrigues June 16, 2009, 9:29 p.m. UTC
Adding iperf network performance test. Basically it tests
networking functionality, stability and performance of guest OSes.
This test is cross-platform -- i.e. it works on both Linux and
Windows VMs.

Signed-off-by: Alexey Eromenko <aeromenk@redhat.com>
---
 client/tests/kvm/kvm.py               |    1 +
 client/tests/kvm/kvm_iperf.py         |  105 +++++++++++++++++++++++++++++++++
 client/tests/kvm/kvm_tests.cfg.sample |    8 +++
 3 files changed, 114 insertions(+), 0 deletions(-)
 create mode 100644 client/tests/kvm/kvm_iperf.py

Comments

Lucas Meneghel Rodrigues June 16, 2009, 9:40 p.m. UTC | #1
On Tue, 2009-06-16 at 18:29 -0300, Lucas Meneghel Rodrigues wrote:
> Adding iperf network performance test. Basically it tests
> networking functionality, stability and performance of guest OSes.
> This test is cross-platform -- i.e. it works on both Linux and
> Windows VMs.

Ok, now that I had rebased the test, I have a few comments to say:

 * I don't like the idea to ship binaries inside the test. Fair enough
that we don't support other archs than x86 and x86_64 (also there's the
problem windows usually doesn't ship a working toolchain), but I would
like to think a bit more about it.
 * Autotest already does have an iperf test, that could be used on Linux
guests. Sure there's a problem of matching what's being executed on
windows, but it's worth a look
 * Autotest iperf test usually runs with 2 machines, one in 'server'
role and other in 'client' mode. I would like to pursue the same model,
2 vms, one running as a server and another as a client.

Alexey and Yaniv, I'd like to hear your opinions on this.

Thanks,

> Signed-off-by: Alexey Eromenko <aeromenk@redhat.com>
> ---
>  client/tests/kvm/kvm.py               |    1 +
>  client/tests/kvm/kvm_iperf.py         |  105 +++++++++++++++++++++++++++++++++
>  client/tests/kvm/kvm_tests.cfg.sample |    8 +++
>  3 files changed, 114 insertions(+), 0 deletions(-)
>  create mode 100644 client/tests/kvm/kvm_iperf.py
> 
> diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py
> index 9428162..e1a6e27 100644
> --- a/client/tests/kvm/kvm.py
> +++ b/client/tests/kvm/kvm.py
> @@ -53,6 +53,7 @@ class kvm(test.test):
>                  "autotest":     test_routine("kvm_tests", "run_autotest"),
>                  "kvm_install":  test_routine("kvm_install", "run_kvm_install"),
>                  "linux_s3":     test_routine("kvm_tests", "run_linux_s3"),
> +                "iperf":        test_routine("kvm_iperf", "run_iperf"),
>                  }
>  
>          # Make it possible to import modules from the test's bindir
> diff --git a/client/tests/kvm/kvm_iperf.py b/client/tests/kvm/kvm_iperf.py
> new file mode 100644
> index 0000000..927c9e5
> --- /dev/null
> +++ b/client/tests/kvm/kvm_iperf.py
> @@ -0,0 +1,105 @@
> +import time, os, logging
> +from autotest_lib.client.common_lib import utils, error
> +import kvm_utils
> +
> +def run_iperf(test, params, env):
> +    """
> +    Runs iperf on the guest system and brings back the result.
> +
> +    @see: http://sourceforge.net/projects/iperf
> +    @param test: kvm test object
> +    @param params: Dictionary with test parameters
> +    @param env: Test environment
> +    """
> +    vm = kvm_utils.env_get_vm(env,  params.get("main_vm"))
> +    if not vm:
> +        message = "VM object not found in environment"
> +        logging.error(message)
> +        raise error.TestError, message
> +    if not vm.is_alive():
> +        message = "VM seems to be dead; Test requires a living VM"
> +        logging.error(message)
> +        raise error.TestError(message)
> +
> +    logging.info("Waiting for guest to be up...")
> +
> +    session = kvm_utils.wait_for(vm.ssh_login, 240, 0, 2)
> +    if not session:
> +        message = "Could not log into guest"
> +        logging.error(message)
> +        raise error.TestFail, message
> +
> +    logging.info("Logged in")
> +
> +    # Checking for GuestOS-compatible iPerf binary existence on host.
> +    iperf_binary = params.get("iperf_binary", "misc/iperf")
> +    iperf_duration = params.get("iperf_duration", 5)
> +    iperf_parallel_threads = params.get("iperf_parallel_threads", 1)
> +    iperf_dest_ip = params.get("iperf_dest_ip", "10.0.2.2")
> +    iperf_binary = os.path.join(test.bindir, iperf_binary)
> +    if not os.path.exists(iperf_binary):
> +        message = "iPerf binary: %s was not found on host" % iperf_binary
> +        logging.error(message)
> +        raise error.TestError, message
> +    else:
> +        logging.info("iPerf binary: %s was found on host" % iperf_binary)
> +
> +    # Starting HostOS-compatible iPerf Server on host
> +    logging.info('VM is up ... \n starting iPerf Server on host')
> +    kvm_utils.run_bg("iperf -s", timeout=5)
> +
> +    # Detecting GuestOS
> +    if iperf_binary.__contains__("exe"):
> +        vm_type="win32"
> +    else:
> +        vm_type="linux32"
> +
> +    # Copying GuestOS-compatible iPerf binary to guest.
> +    # Starting iPerf Client on guest, plus connect to host.
> +    if vm_type == "win32":
> +        win_dir = "/cygdrive/c/"
> +        logging.info('starting copying %s to Windows VM to %s' % (iperf_binary,
> +                                                                  win_dir))
> +        if not vm.scp_to_remote(iperf_binary, win_dir):
> +            message = "Could not copy Win32 iPerf to guest"
> +            logging.error(message)
> +            raise error.TestError(message)
> +        logging.debug("Enabling file permissions of iPerf.exe on Windows VM...")
> +        session.sendline('cacls C:\iperf.exe /P Administrator:F')
> +        session.sendline('y')
> +        session.sendline('')
> +        time.sleep(2)
> +        session.sendline('')
> +        logging.info("starting iPerf client on Windows VM, connecting to host")
> +        session.sendline('C:\iperf -t %s -c %s -P %s' % (int(iperf_duration),
> +                                                         iperf_dest_ip,
> +                                                   int(iperf_parallel_threads)))
> +    else:
> +        logging.info('starting copying %s to Linux VM ' % iperf_binary)
> +        if not vm.scp_to_remote(iperf_binary, "/usr/local/bin"):
> +            message = "Could not copy Linux iPerf to guest"
> +            logging.error(message)
> +            raise error.TestError, message
> +        print "starting iPerf client on VM, connecting to host"
> +        session.sendline('iperf -t %s -c %s -P %s' % (int(iperf_duration),
> +                                                      iperf_dest_ip,
> +                                                   int(iperf_parallel_threads)))
> +
> +    # Analyzing results
> +    iperf_result_match, iperf_result = session.read_up_to_prompt()
> +    logging.debug("iperf_result =", iperf_result)
> +
> +    if iperf_result.__contains__(" 0.00 bits/sec"):
> +        msg = 'Guest returned 0.00 bits/sec during iperf test.'
> +        raise error.TestError(msg)
> +    elif iperf_result.__contains__("No route to host"):
> +        msg = 'SSH to guest returned: No route to host.'
> +        raise error.TestError(msg)
> +    elif iperf_result.__contains__("Access is denied"):
> +        msg = 'SSH to guest returned: Access is denied.'
> +        raise error.TestError(msg)
> +    elif not iperf_result.__contains__("bits/sec"):
> +        msg = 'SSH result unrecognizeable.'
> +        raise error.TestError(msg)
> +
> +    session.close()
> diff --git a/client/tests/kvm/kvm_tests.cfg.sample b/client/tests/kvm/kvm_tests.cfg.sample
> index 2c0b321..931f748 100644
> --- a/client/tests/kvm/kvm_tests.cfg.sample
> +++ b/client/tests/kvm/kvm_tests.cfg.sample
> @@ -82,6 +82,10 @@ variants:
>      - linux_s3:      install setup
>          type = linux_s3
>  
> +    - iperf:        install setup
> +        type = iperf
> +        extra_params += " -snapshot"
> +
>  # NICs
>  variants:
>      - @rtl8139:
> @@ -102,6 +106,8 @@ variants:
>          ssh_status_test_command = echo $?
>          username = root
>          password = 123456
> +        iperf:
> +          iperf_binary = misc/iperf
>  
>          variants:
>              - Fedora:
> @@ -292,6 +298,8 @@ variants:
>          password = 123456
>          migrate:
>              migration_test_command = ver && vol
> +        iperf:
> +            iperf_binary = misc/iperf.exe
>  
>          variants:
>              - Win2000:
Lucas Meneghel Rodrigues June 16, 2009, 11:43 p.m. UTC | #2
On Tue, 2009-06-16 at 18:40 -0300, Lucas Meneghel Rodrigues wrote:
>  * Autotest iperf test usually runs with 2 machines, one in 'server'
> role and other in 'client' mode. I would like to pursue the same model,
> 2 vms, one running as a server and another as a client.

Nevermind this comment, I've missed the part of the test that starts the
server on the host linux system.
sudhir kumar June 30, 2009, 8:52 a.m. UTC | #3
On Wed, Jun 17, 2009 at 2:59 AM, Lucas Meneghel Rodrigues<lmr@redhat.com> wrote:
> Adding iperf network performance test. Basically it tests
> networking functionality, stability and performance of guest OSes.
> This test is cross-platform -- i.e. it works on both Linux and
> Windows VMs.
>

I have a question here. Why are we adding iperf in a way different
than other tests ? We have client/tests/<different_tests> directory
for each test which contains the python modules and the test tarball.
Then why in case of iperf we are putting it under client/tests/kvm and
modifying kvm.py instead of putting the testsuit as part of
autotest(run_autotest is not enough?)? Even if we do not want to touch
the existing iperf test in autotest we can use a separate name like
kvm_iperf. Somehow I have a feeling that there was a discussion on the
list for keeping tests under a particular directory. But still I feel
that should be only for tests specific to KVM and not the guest. Is
there any disadvantage of using the current approach of executing
these testsuits ?


> Signed-off-by: Alexey Eromenko <aeromenk@redhat.com>
> ---
>  client/tests/kvm/kvm.py               |    1 +
>  client/tests/kvm/kvm_iperf.py         |  105 +++++++++++++++++++++++++++++++++
>  client/tests/kvm/kvm_tests.cfg.sample |    8 +++
>  3 files changed, 114 insertions(+), 0 deletions(-)
>  create mode 100644 client/tests/kvm/kvm_iperf.py
>
> diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py
> index 9428162..e1a6e27 100644
> --- a/client/tests/kvm/kvm.py
> +++ b/client/tests/kvm/kvm.py
> @@ -53,6 +53,7 @@ class kvm(test.test):
>                 "autotest":     test_routine("kvm_tests", "run_autotest"),
>                 "kvm_install":  test_routine("kvm_install", "run_kvm_install"),
>                 "linux_s3":     test_routine("kvm_tests", "run_linux_s3"),
> +                "iperf":        test_routine("kvm_iperf", "run_iperf"),
>                 }
>
>         # Make it possible to import modules from the test's bindir
> diff --git a/client/tests/kvm/kvm_iperf.py b/client/tests/kvm/kvm_iperf.py
> new file mode 100644
> index 0000000..927c9e5
> --- /dev/null
> +++ b/client/tests/kvm/kvm_iperf.py
> @@ -0,0 +1,105 @@
> +import time, os, logging
> +from autotest_lib.client.common_lib import utils, error
> +import kvm_utils
> +
> +def run_iperf(test, params, env):
> +    """
> +    Runs iperf on the guest system and brings back the result.
> +
> +    @see: http://sourceforge.net/projects/iperf
> +    @param test: kvm test object
> +    @param params: Dictionary with test parameters
> +    @param env: Test environment
> +    """
> +    vm = kvm_utils.env_get_vm(env,  params.get("main_vm"))
> +    if not vm:
> +        message = "VM object not found in environment"
> +        logging.error(message)
> +        raise error.TestError, message
> +    if not vm.is_alive():
> +        message = "VM seems to be dead; Test requires a living VM"
> +        logging.error(message)
> +        raise error.TestError(message)
> +
> +    logging.info("Waiting for guest to be up...")
> +
> +    session = kvm_utils.wait_for(vm.ssh_login, 240, 0, 2)
> +    if not session:
> +        message = "Could not log into guest"
> +        logging.error(message)
> +        raise error.TestFail, message
> +
> +    logging.info("Logged in")
> +
> +    # Checking for GuestOS-compatible iPerf binary existence on host.
> +    iperf_binary = params.get("iperf_binary", "misc/iperf")
> +    iperf_duration = params.get("iperf_duration", 5)
> +    iperf_parallel_threads = params.get("iperf_parallel_threads", 1)
> +    iperf_dest_ip = params.get("iperf_dest_ip", "10.0.2.2")
> +    iperf_binary = os.path.join(test.bindir, iperf_binary)
> +    if not os.path.exists(iperf_binary):
> +        message = "iPerf binary: %s was not found on host" % iperf_binary
> +        logging.error(message)
> +        raise error.TestError, message
> +    else:
> +        logging.info("iPerf binary: %s was found on host" % iperf_binary)
> +
> +    # Starting HostOS-compatible iPerf Server on host
> +    logging.info('VM is up ... \n starting iPerf Server on host')
> +    kvm_utils.run_bg("iperf -s", timeout=5)
> +
> +    # Detecting GuestOS
> +    if iperf_binary.__contains__("exe"):
> +        vm_type="win32"
> +    else:
> +        vm_type="linux32"
> +
> +    # Copying GuestOS-compatible iPerf binary to guest.
> +    # Starting iPerf Client on guest, plus connect to host.
> +    if vm_type == "win32":
> +        win_dir = "/cygdrive/c/"
> +        logging.info('starting copying %s to Windows VM to %s' % (iperf_binary,
> +                                                                  win_dir))
> +        if not vm.scp_to_remote(iperf_binary, win_dir):
> +            message = "Could not copy Win32 iPerf to guest"
> +            logging.error(message)
> +            raise error.TestError(message)
> +        logging.debug("Enabling file permissions of iPerf.exe on Windows VM...")
> +        session.sendline('cacls C:\iperf.exe /P Administrator:F')
> +        session.sendline('y')
> +        session.sendline('')
> +        time.sleep(2)
> +        session.sendline('')
> +        logging.info("starting iPerf client on Windows VM, connecting to host")
> +        session.sendline('C:\iperf -t %s -c %s -P %s' % (int(iperf_duration),
> +                                                         iperf_dest_ip,
> +                                                   int(iperf_parallel_threads)))
> +    else:
> +        logging.info('starting copying %s to Linux VM ' % iperf_binary)
> +        if not vm.scp_to_remote(iperf_binary, "/usr/local/bin"):
> +            message = "Could not copy Linux iPerf to guest"
> +            logging.error(message)
> +            raise error.TestError, message
> +        print "starting iPerf client on VM, connecting to host"
> +        session.sendline('iperf -t %s -c %s -P %s' % (int(iperf_duration),
> +                                                      iperf_dest_ip,
> +                                                   int(iperf_parallel_threads)))
> +
> +    # Analyzing results
> +    iperf_result_match, iperf_result = session.read_up_to_prompt()
> +    logging.debug("iperf_result =", iperf_result)
> +
> +    if iperf_result.__contains__(" 0.00 bits/sec"):
> +        msg = 'Guest returned 0.00 bits/sec during iperf test.'
> +        raise error.TestError(msg)
> +    elif iperf_result.__contains__("No route to host"):
> +        msg = 'SSH to guest returned: No route to host.'
> +        raise error.TestError(msg)
> +    elif iperf_result.__contains__("Access is denied"):
> +        msg = 'SSH to guest returned: Access is denied.'
> +        raise error.TestError(msg)
> +    elif not iperf_result.__contains__("bits/sec"):
> +        msg = 'SSH result unrecognizeable.'
> +        raise error.TestError(msg)
> +
> +    session.close()
> diff --git a/client/tests/kvm/kvm_tests.cfg.sample b/client/tests/kvm/kvm_tests.cfg.sample
> index 2c0b321..931f748 100644
> --- a/client/tests/kvm/kvm_tests.cfg.sample
> +++ b/client/tests/kvm/kvm_tests.cfg.sample
> @@ -82,6 +82,10 @@ variants:
>     - linux_s3:      install setup
>         type = linux_s3
>
> +    - iperf:        install setup
> +        type = iperf
> +        extra_params += " -snapshot"
> +
>  # NICs
>  variants:
>     - @rtl8139:
> @@ -102,6 +106,8 @@ variants:
>         ssh_status_test_command = echo $?
>         username = root
>         password = 123456
> +        iperf:
> +          iperf_binary = misc/iperf
>
>         variants:
>             - Fedora:
> @@ -292,6 +298,8 @@ variants:
>         password = 123456
>         migrate:
>             migration_test_command = ver && vol
> +        iperf:
> +            iperf_binary = misc/iperf.exe
>
>         variants:
>             - Win2000:
> --
> 1.6.2.2
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
Lucas Meneghel Rodrigues June 30, 2009, 10:46 p.m. UTC | #4
On Tue, 2009-06-30 at 14:22 +0530, sudhir kumar wrote:
> On Wed, Jun 17, 2009 at 2:59 AM, Lucas Meneghel Rodrigues<lmr@redhat.com> wrote:
> > Adding iperf network performance test. Basically it tests
> > networking functionality, stability and performance of guest OSes.
> > This test is cross-platform -- i.e. it works on both Linux and
> > Windows VMs.
> >
> 
> I have a question here. Why are we adding iperf in a way different
> than other tests ? We have client/tests/<different_tests> directory
> for each test which contains the python modules and the test tarball.
> Then why in case of iperf we are putting it under client/tests/kvm and
> modifying kvm.py instead of putting the testsuit as part of
> autotest(run_autotest is not enough?)? Even if we do not want to touch
> the existing iperf test in autotest we can use a separate name like
> kvm_iperf. Somehow I have a feeling that there was a discussion on the
> list for keeping tests under a particular directory. But still I feel
> that should be only for tests specific to KVM and not the guest. Is
> there any disadvantage of using the current approach of executing
> these testsuits ?

Since the kvm subtests are contained under the kvm test dir, adding the
kvm_ file prefixes to the subtests is not necessary IMHO. 

Using the autotest iperf test is doable for linux guests, though it
doesn't work for windows guests, that's why Alexey decided to implement
it from scratch.

Lucas

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexey Eremenko July 1, 2009, 11:43 a.m. UTC | #5
On Tue, Jun 30, 2009 at 11:52 AM, sudhir kumar<smalikphy@gmail.com> wrote:
> On Wed, Jun 17, 2009 at 2:59 AM, Lucas Meneghel Rodrigues<lmr@redhat.com> wrote:
>> Adding iperf network performance test. Basically it tests
>> networking functionality, stability and performance of guest OSes.
>> This test is cross-platform -- i.e. it works on both Linux and
>> Windows VMs.
>>
>
> I have a question here. Why are we adding iperf in a way different
> than other tests ? We have client/tests/<different_tests> directory
> for each test which contains the python modules and the test tarball.
> Then why in case of iperf we are putting it under client/tests/kvm and
> modifying kvm.py instead of putting the testsuit as part of
> autotest(run_autotest is not enough?)? Even if we do not want to touch
> the existing iperf test in autotest we can use a separate name like
> kvm_iperf. Somehow I have a feeling that there was a discussion on the
> list for keeping tests under a particular directory. But still I feel
> that should be only for tests specific to KVM and not the guest. Is
> there any disadvantage of using the current approach of executing
> these testsuits ?

The reason to put my test under "kvm/" test, is because it depends on
KVM-Autotest framework, not just on generic Autotest framework.

In addition, the test is cross-platform on the guest side, currently
supporting Windows and Linux guests, with possibility to support
Solaris and BSD in future.

LMR: me too, hate putting binaries in source tree, but the alternative
option is to provide separate *.tar.bz2 for all the binary utils, and
I don't sure which way is better.
Lucas Meneghel Rodrigues July 1, 2009, 3:57 p.m. UTC | #6
On Wed, 2009-07-01 at 14:43 +0300, Alexey Eremenko wrote:
> LMR: me too, hate putting binaries in source tree, but the alternative
> option is to provide separate *.tar.bz2 for all the binary utils, and
> I don't sure which way is better.

Yes, I don't have a clear idea as well. It's currently under
discussion...

Lucas

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Martin Bligh July 1, 2009, 4:18 p.m. UTC | #7
On Wed, Jul 1, 2009 at 8:57 AM, Lucas Meneghel Rodrigues<lmr@redhat.com> wrote:
> On Wed, 2009-07-01 at 14:43 +0300, Alexey Eremenko wrote:
>> LMR: me too, hate putting binaries in source tree, but the alternative
>> option is to provide separate *.tar.bz2 for all the binary utils, and
>> I don't sure which way is better.
>
> Yes, I don't have a clear idea as well. It's currently under
> discussion...

Is KVM x86_64 only?
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity July 1, 2009, 5:14 p.m. UTC | #8
On 07/01/2009 07:18 PM, Martin Bligh wrote:
> On Wed, Jul 1, 2009 at 8:57 AM, Lucas Meneghel Rodrigues<lmr@redhat.com>  wrote:
>    
>> On Wed, 2009-07-01 at 14:43 +0300, Alexey Eremenko wrote:
>>      
>>> LMR: me too, hate putting binaries in source tree, but the alternative
>>> option is to provide separate *.tar.bz2 for all the binary utils, and
>>> I don't sure which way is better.
>>>        
>> Yes, I don't have a clear idea as well. It's currently under
>> discussion...
>>      
>
> Is KVM x86_64 only?
>    

It's x86-64, i386, ia64, s390, and powerpc 44x/e500 only.
Martin Bligh July 1, 2009, 5:16 p.m. UTC | #9
>>>> LMR: me too, hate putting binaries in source tree, but the alternative
>>>> option is to provide separate *.tar.bz2 for all the binary utils, and
>>>> I don't sure which way is better.
>>>>
>>>
>>> Yes, I don't have a clear idea as well. It's currently under
>>> discussion...
>>>
>>
>> Is KVM x86_64 only?
>>
>
> It's x86-64, i386, ia64, s390, and powerpc 44x/e500 only.

OK, then it's difficult to see using binaries? Can we not
compile these on the system at use time (see the client/deps
directory for other stuff we do this for)

M.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Lucas Meneghel Rodrigues July 1, 2009, 8:29 p.m. UTC | #10
On Wed, 2009-07-01 at 10:16 -0700, Martin Bligh wrote:
> >>>> LMR: me too, hate putting binaries in source tree, but the alternative
> >>>> option is to provide separate *.tar.bz2 for all the binary utils, and
> >>>> I don't sure which way is better.
> >>>>
> >>>
> >>> Yes, I don't have a clear idea as well. It's currently under
> >>> discussion...
> >>>
> >>
> >> Is KVM x86_64 only?
> >>
> >
> > It's x86-64, i386, ia64, s390, and powerpc 44x/e500 only.
> 
> OK, then it's difficult to see using binaries? Can we not
> compile these on the system at use time (see the client/deps
> directory for other stuff we do this for)

Biggest trouble is compiling the test under windows hosts. We are
figuring out a way to work around this problem.


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity July 2, 2009, 8:26 a.m. UTC | #11
On 07/01/2009 08:16 PM, Martin Bligh wrote:
>>> Is KVM x86_64 only?
>>>
>>>        
>> It's x86-64, i386, ia64, s390, and powerpc 44x/e500 only.
>>      
>
> OK, then it's difficult to see using binaries? Can we not
> compile these on the system at use time (see the client/deps
> directory for other stuff we do this for)
>
>    

note kvm-autotest is x86 only at this time.
diff mbox

Patch

diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py
index 9428162..e1a6e27 100644
--- a/client/tests/kvm/kvm.py
+++ b/client/tests/kvm/kvm.py
@@ -53,6 +53,7 @@  class kvm(test.test):
                 "autotest":     test_routine("kvm_tests", "run_autotest"),
                 "kvm_install":  test_routine("kvm_install", "run_kvm_install"),
                 "linux_s3":     test_routine("kvm_tests", "run_linux_s3"),
+                "iperf":        test_routine("kvm_iperf", "run_iperf"),
                 }
 
         # Make it possible to import modules from the test's bindir
diff --git a/client/tests/kvm/kvm_iperf.py b/client/tests/kvm/kvm_iperf.py
new file mode 100644
index 0000000..927c9e5
--- /dev/null
+++ b/client/tests/kvm/kvm_iperf.py
@@ -0,0 +1,105 @@ 
+import time, os, logging
+from autotest_lib.client.common_lib import utils, error
+import kvm_utils
+
+def run_iperf(test, params, env):
+    """
+    Runs iperf on the guest system and brings back the result.
+
+    @see: http://sourceforge.net/projects/iperf
+    @param test: kvm test object
+    @param params: Dictionary with test parameters
+    @param env: Test environment
+    """
+    vm = kvm_utils.env_get_vm(env,  params.get("main_vm"))
+    if not vm:
+        message = "VM object not found in environment"
+        logging.error(message)
+        raise error.TestError, message
+    if not vm.is_alive():
+        message = "VM seems to be dead; Test requires a living VM"
+        logging.error(message)
+        raise error.TestError(message)
+
+    logging.info("Waiting for guest to be up...")
+
+    session = kvm_utils.wait_for(vm.ssh_login, 240, 0, 2)
+    if not session:
+        message = "Could not log into guest"
+        logging.error(message)
+        raise error.TestFail, message
+
+    logging.info("Logged in")
+
+    # Checking for GuestOS-compatible iPerf binary existence on host.
+    iperf_binary = params.get("iperf_binary", "misc/iperf")
+    iperf_duration = params.get("iperf_duration", 5)
+    iperf_parallel_threads = params.get("iperf_parallel_threads", 1)
+    iperf_dest_ip = params.get("iperf_dest_ip", "10.0.2.2")
+    iperf_binary = os.path.join(test.bindir, iperf_binary)
+    if not os.path.exists(iperf_binary):
+        message = "iPerf binary: %s was not found on host" % iperf_binary
+        logging.error(message)
+        raise error.TestError, message
+    else:
+        logging.info("iPerf binary: %s was found on host" % iperf_binary)
+
+    # Starting HostOS-compatible iPerf Server on host
+    logging.info('VM is up ... \n starting iPerf Server on host')
+    kvm_utils.run_bg("iperf -s", timeout=5)
+
+    # Detecting GuestOS
+    if iperf_binary.__contains__("exe"):
+        vm_type="win32"
+    else:
+        vm_type="linux32"
+
+    # Copying GuestOS-compatible iPerf binary to guest.
+    # Starting iPerf Client on guest, plus connect to host.
+    if vm_type == "win32":
+        win_dir = "/cygdrive/c/"
+        logging.info('starting copying %s to Windows VM to %s' % (iperf_binary,
+                                                                  win_dir))
+        if not vm.scp_to_remote(iperf_binary, win_dir):
+            message = "Could not copy Win32 iPerf to guest"
+            logging.error(message)
+            raise error.TestError(message)
+        logging.debug("Enabling file permissions of iPerf.exe on Windows VM...")
+        session.sendline('cacls C:\iperf.exe /P Administrator:F')
+        session.sendline('y')
+        session.sendline('')
+        time.sleep(2)
+        session.sendline('')
+        logging.info("starting iPerf client on Windows VM, connecting to host")
+        session.sendline('C:\iperf -t %s -c %s -P %s' % (int(iperf_duration),
+                                                         iperf_dest_ip,
+                                                   int(iperf_parallel_threads)))
+    else:
+        logging.info('starting copying %s to Linux VM ' % iperf_binary)
+        if not vm.scp_to_remote(iperf_binary, "/usr/local/bin"):
+            message = "Could not copy Linux iPerf to guest"
+            logging.error(message)
+            raise error.TestError, message
+        print "starting iPerf client on VM, connecting to host"
+        session.sendline('iperf -t %s -c %s -P %s' % (int(iperf_duration),
+                                                      iperf_dest_ip,
+                                                   int(iperf_parallel_threads)))
+
+    # Analyzing results
+    iperf_result_match, iperf_result = session.read_up_to_prompt()
+    logging.debug("iperf_result =", iperf_result)
+
+    if iperf_result.__contains__(" 0.00 bits/sec"):
+        msg = 'Guest returned 0.00 bits/sec during iperf test.'
+        raise error.TestError(msg)
+    elif iperf_result.__contains__("No route to host"):
+        msg = 'SSH to guest returned: No route to host.'
+        raise error.TestError(msg)
+    elif iperf_result.__contains__("Access is denied"):
+        msg = 'SSH to guest returned: Access is denied.'
+        raise error.TestError(msg)
+    elif not iperf_result.__contains__("bits/sec"):
+        msg = 'SSH result unrecognizeable.'
+        raise error.TestError(msg)
+
+    session.close()
diff --git a/client/tests/kvm/kvm_tests.cfg.sample b/client/tests/kvm/kvm_tests.cfg.sample
index 2c0b321..931f748 100644
--- a/client/tests/kvm/kvm_tests.cfg.sample
+++ b/client/tests/kvm/kvm_tests.cfg.sample
@@ -82,6 +82,10 @@  variants:
     - linux_s3:      install setup
         type = linux_s3
 
+    - iperf:        install setup
+        type = iperf
+        extra_params += " -snapshot"
+
 # NICs
 variants:
     - @rtl8139:
@@ -102,6 +106,8 @@  variants:
         ssh_status_test_command = echo $?
         username = root
         password = 123456
+        iperf:
+          iperf_binary = misc/iperf
 
         variants:
             - Fedora:
@@ -292,6 +298,8 @@  variants:
         password = 123456
         migrate:
             migration_test_command = ver && vol
+        iperf:
+            iperf_binary = misc/iperf.exe
 
         variants:
             - Win2000: