-
Notifications
You must be signed in to change notification settings - Fork 242
/
guest-hw.cfg
487 lines (475 loc) · 17.8 KB
/
guest-hw.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
# Copy this file to guest-hw.cfg and edit it.
#
# NICs
variants:
- rtl8139:
no ppc64 ppc64le
nic_model = rtl8139
- e1000:
no ppc64 ppc64le
RHEL:
only RHEL.6 RHEL.7
nic_model = e1000
- e1000-82540em:
no ppc64 ppc64le
nic_model = e1000-82540em
- e1000-82544gc:
no ppc64 ppc64le
nic_model = e1000-82544gc
- e1000-82545em:
no ppc64 ppc64le
nic_model = e1000-82545em
- e1000e:
only q35
required_qemu = [2.6.0, )
nic_model = e1000e
- igb:
only q35
required_qemu = [8.0.0, )
nic_model = igb
- virtio_net:
nic_model = virtio
# Assign the queue number of nic device
#queues = 4
# You can add advanced attributes on nic_extra_params such as mrg_rxbuf
#nic_extra_params =
# You can add advanced attributes through netdev_extra_params
# such as sndbuf, as an example, you can uncomment the
# following lines to enable the vhost support ( only available
# for tap )
#netdev_extra_params = ",vhost=on"
enable_msix_vectors = yes
whql.submission.device.net:
test_device = VirtIO Ethernet Adapter$
# Device selection for the NDISTest client machine
dp_regex_testdev = VirtIO Ethernet Adapter$
dp_regex_clientmsgdev = VirtIO Ethernet Adapter #2$
dp_regex_clientsupportdev = VirtIO Ethernet Adapter #3$
# Device selection for the NDISTest server machine
dp_regex_servermsgdev = VirtIO Ethernet Adapter$
dp_regex_serversupportdev = VirtIO Ethernet Adapter #2$
arm64-mmio, s390-virtio, riscv64-mmio:
# Currently arm/s390x does not support msix vectors
enable_msix_vectors = no
- xennet:
# placeholder
- spapr-vlan:
nic_model = spapr-vlan
only pseries
- nic_custom:
# If people want to specify something different.
variants:
- up:
no autotest.npb autotest.tsc
- smp2:
smp = 2
used_cpus = 2
stress_boot: used_cpus = 10
timedrift.with_load: used_cpus = 100
variants:
- ide:
no q35
drive_format=ide
cd_format=ide
- scsi:
drive_format=scsi
- sd:
drive_format=sd
- virtio_blk:
drive_format=virtio
i440fx:
cd_format=ide
q35:
cd_format=ahci
# Add -drive ...boot=yes unless qemu-kvm is 0.12.1.2 or newer
# then kvm_vm will ignore this option.
image_boot~=yes
- virtio_scsi:
no WinXP
# supported formats are: scsi-hd, scsi-cd, scsi-disk, scsi-block,
# scsi-generic
# Use drive_format_$DISKNAME = "scsi-generic" to force disk driver
# for single disk.
# NOTE: scsi-block and scsi-generic are pass-through physical dev only
drive_format=scsi-hd
cd_format=scsi-cd
scsi_hba=virtio-scsi-pci
libvirt_controller=virtio-scsi
- spapr_vscsi:
drive_format=scsi-hd
cd_format=scsi-cd
scsi_hba=spapr-vscsi
libvirt_controller=spapr-vscsi
only pseries
- lsi_scsi:
no Host_RHEL
drive_format=scsi-hd
cd_format=scsi-cd
scsi_hba=lsi53c895a
- ahci:
only q35
drive_format=ahci
cd_format=ahci
- xenblk:
# placeholder
variants:
- luks:
image_format = luks
image_secret = redhat
- qcow2v3:
image_format = qcow2
image_extra_params = "compat=1.1"
check_image = yes
qemu_io_blkdebug, rh_qemu_iotests, incremental_backup..kill_qemu:
check_image = no
- qcow2:
image_format = qcow2
check_image = yes
qemu_io_blkdebug, rh_qemu_iotests, incremental_backup..kill_qemu:
check_image = no
- vmdk:
no ioquit
image_format = vmdk
- raw:
no ioquit
image_format = raw
- raw_dd:
no ioquit
image_format = raw
create_with_dd = yes
dd_create_cmd = "dd if=/dev/zero of=%s bs=1M count=%s"
- qed:
no ioquit
image_format = qed
check_image = yes
variants:
- @no_pci_assignable:
pci_assignable = no
- pf_assignable:
pci_assignable = pf
host_setup_flag = 3
# device_name is optional. If device_name not set, it will try to get
# first available pf. If device_name set, but the device is not
# available, it will also try to get first available pf.
device_name = eth1
nettype = bridge
pf_filter_re = "82576 Gigabit Network"
mac_ip_filter = "inet (.\d+.\d+.\d+.\d+).*?ether (.\w+:\w+:\w+:\w+:\w+:\w+)"
RHEL.6, RHEL.5:
mac_ip_filter = "HWaddr (.\w+:\w+:\w+:\w+:\w+:\w+)\s+?inet addr:(.\d+\.\d+\.\d+\.\d+)"
mac_changeable = "yes"
unattended_install:
mac_changeable = no
- vf_assignable:
pci_assignable = vf
nettype = bridge
host_setup_flag = 3
# Driver (kernel module) that supports SR-IOV hardware.
# As of today (30-11-2009), we have 2 drivers for this type of hardware:
# Intel® 82576 Gigabit Ethernet Controller - igb
# Neterion® X3100™ - vxge
# On PowerPC [Power 8] linux servers SR-IOV hardware supported:
# Mellanox CX4 MT27700 Family
# For PowerPC driver (kernel module) that supports SR-IOV hardware is
# mlx5_core
driver = igb
# vf_filter_re: Regex used to filter vf from lspci.
vf_filter_re = "Virtual Function"
# pf_filter_re: Regex used to filter pf from lspci. It will be used
# when device_name could not filter the pf.
# For PowerPC: pf_filter_re = "MT27700 Family \[ConnectX-4\]"
pf_filter_re = "82576 Gigabit Network"
# Driver option to specify the maximum number of virtual functions
# (on vxge the option is , for example, is max_config_dev)
# the default below is for the igb driver
# For PowerPC with mlx5_core: driver_option = "10" where 10 is numer of
# virtual functions (VF). Maximum supported is 63 VF per port
driver_option = "7"
mac_ip_filter = "inet (.\d+.\d+.\d+.\d+).*?ether (.\w+:\w+:\w+:\w+:\w+:\w+)"
RHEL.6, RHEL.5:
mac_ip_filter = "HWaddr (.\w+:\w+:\w+:\w+:\w+:\w+)\s+?inet addr:(.\d+\.\d+\.\d+\.\d+)"
mac_changeable = "yes"
unattended_install:
mac_changeable = no
sr-iov.sr_iov_negative.negative_max_vfs:
driver_option = "max_vfs=-1"
sr-iov.sr_iov_negative.more_than_max_vfs:
driver_option = "max_vfs=8"
variants:
- @smallpages:
- hugepages:
hugepage = yes
setup_hugepages = yes
# In some special conditions, such as low mem PPC systems, you might
# want to define how many large memory pages you want to set directly.
# If you don't, avocado-vt will use an heuristic to calculate the number
# (default). The heuristic can be found on virttest/test_setup.py
#target_hugepages = 500
# In some special conditions, such as low mem PPC systems, one might
# want to set the overhead to 0, so leave this configurable. Keep in
# mind that, if target_hugepages is set, this value will be ignored
# since virt test won't try to calculate the required pages.
hugepages_qemu_overhead = 128
# Similarly, on low mem systems if you deallocate hugepages and try
# to allocate them again it won't be possible to find contiguous pages,
# so make it possible to turn it off by setting this to 'no'.
hugepages_deallocate = yes
# Some other params that one might find useful to configure
extra_params += " -mem-path /mnt/kvm_hugepage"
domain_xml_snippet = "<memoryBacking><hugepages/></memoryBacking>"
variants:
- @no_9p_export:
- 9p_export:
# 9p_fs_driver support include local, handle, proxy
9p_fs_driver = local
9p_export_dir = /mnt/
# 9p_security_model support include passthrough, mapped, none
9p_security_model = passthrough
9p_immediate_writeout = no
9p_readonly = no
# Only used in case of proxy fs driver
#9p_socket_name = /tmp/virtfs-9p-socket
#9p_proxy_binary = virtfs-proxy-helper
variants image_backend:
- @filesystem:
- gluster:
storage_type = glusterfs
enable_gluster = yes
# please go through http://avocado-vt.readthedocs.org/en/latest/extra/GlusterFs.html for more details.
#Set the brick of gluster server. If set, will try to set gluster
gluster_brick = "images/gl_test"
#Set the volume name of gluster.
gluster_volume_name = "vol_test"
#Remote server address. Please set it if you want use remote gluster server.
#Please also unset gluster_brick.
gluster_server = <YOUR GLUSTER SERVER>
- nbd:
storage_type = nbd
enable_nbd = yes
force_create_image = no
# hostname or ip address, and unix domain socket to access NBD server,
# either nbd_server or nbd_unix_socket is required
#nbd_server =
#nbd_unix_socket =
# TCP port to connect to(default 10809), optional
#nbd_port =
# NBD volume export name, optional
#nbd_export_name =
# TLS credentials path for client, optional
#nbd_client_tls_creds =
# TLS credentials path for server, optional
#nbd_server_tls_creds =
# Force the use of the block driver for the format, used for qemu-nbd, optional
#nbd_export_format =
# NBD volume export description, used for qemu-nbd, optional
#nbd_export_description =
# On an unexpected disconnect, the nbd client tries to connect again
# until succeeding or encountering a serious error, optional
#nbd_reconnect_delay =
# The qcow2 persistent bitmap name, expose that bitmap via the "qemu:dirty-bitmap:name",
# used for qemu-nbd, optional
#nbd_export_bitmap =
- libcurl:
storage_type = curl
enable_curl = yes
force_create_image = no
remove_image = no
image_readonly = yes
skip_hash = yes
# Address of the remote server, required
#curl_server =
# 'http', 'https', 'ftp', or 'ftps', required
#curl_protocol =
# Image path on the remote server, required
#curl_path =
# Whether to verify the remote server's certificate when connecting over SSL.
# It can have the value 'on'(default) or 'off', https/ftps only, optional
#curl_sslverify =
# Cookie data provided in a secure way, http/https only, optional
#curl_cookie_secret =
# Username for authentication to the remote server, optional
#curl_username =
# Password for authentication to the remote server, optional
#curl_password =
# The amount of data to read ahead, which may optionally have the suffix
# 'T', 'G', 'M', 'K', 'k' or 'b'. If it does not have a suffix, it is in bytes.
# The value must be a multiple of 512 bytes. It defaults to 256k, optional
#curl_readahead =
# The timeout in seconds of the CURL connection, it defaults to 5s, optional
#curl_timeout =
- gluster_direct:
# Access gluster storage directly by URI
storage_type = glusterfs-direct
enable_gluster = yes
# hostname or ip address of gluster server,
# unix domain socket to access gluster server,
# either gluster_server or gluster_unix_socket is required
#gluster_server =
#gluster_unix_socket =
# The name of gluster volume where VM image resides, required
#gluster_volume_name =
# A JSON doc string including array of SocketAddress, optional
#gluster_peers = '[{"type": "inet","host": "dell-per415-03.lab.eng.pek2.redhat.com","port":"12345"}, ...]'
# tcp, unix or rdma, optional
#gluster_transport =
# port number, optional
#gluster_port =
# libgfapi log level(1-9), optional
#gluster_debug =
# libgfapi log file, optional
#gluster_logfile =
- lvm_partition:
# use real logical volume partition as qemu block device drive
only raw
force_create_image = no
image_raw_device = yes
storage_type = lvm
emulational_device = no
# vg_name specify a exists real logical volume group name,
# although we support setup a real lvm envrioment automatically,
# but it's not ot recommended and it maybe destroy data on your
# hard disk; If in demand, please configure your pv_name and
# give a vg_name and lv_name;
vg_name = autotest_vg
#pv_name = /dev/sdb
#lv_name = test_lv
# command to reload lvm monitor serivce
#lvm_reload_cmd = "systemctl reload lvm2-lvmetad.service;"
#lvm_reload_cmd += "systemctl reload lvm2-monitor.service"
force_remove_image = no
- emulated_lvm:
storage_type = emulated lvm
only raw
force_create_image = no
image_raw_device = yes
storage_type = lvm
emulational_device = yes
# specify a file as emulated volume, if not will create file in
# autotest data dir;
#emulated_image =
force_remove_image = no
remove_emulated_image = no
#lvm_reload_cmd = "systemctl reload lvm2-lvmetad.service;"
#lvm_reload_cmd += "systemctl reload lvm2-monitor.service"
- ceph:
storage_type = ceph
# Some test case may need images from different backend. Please set up
# enable_ceph_$image_name to no if the special image is not use ceph
# server in your test
enable_ceph = yes
cmds_installed_host += " rbd"
qcow2:
# qcow2 is supported since 4.1.0 on ceph
required_qemu = [4.1.0,)
# Please update following parameters based on your test env
#ceph_monitor = $ceph_monitor_ip
#rbd_pool_name = $autotest_pool_name
# rbd namespace name, optional:
#rbd_namespace_name =
# ceph key string in base64 format, optional:
#ceph_key =
# path to customized ceph configuration file, optional:
#ceph_conf =
# path to customized keyring conf file, optional:
#image_ceph_keyring_conf =
- emulational_iscsi:
only raw
storage_type = emulational_iscsi
emulated_image = /tmp/iscsitest
target = iqn.iscsi-emulation
image_raw_device = yes
storage_type = iscsi
force_create_image = no
- iscsi:
# Please update portal_ip, target and initiator params based on your test env
only raw
storage_type = iscsi
#portal_ip = 192.168.12.10
#target = iqn.2001-05.com.equallogic:0-8a0906-db31f7d03-470263b05654c204-kvm-autotest
#initiator = iqn.2010-07.com.redhat:kvmautotest
image_raw_device = yes
storage_type = iscsi
force_create_image = no
- iscsi_direct:
storage_type = iscsi-direct
enable_iscsi = yes
image_raw_device = yes
- remote_nfs:
storage_type = nfs
#nfs_mount_dir = /mnt/nfs_images
#nfs_mount_options = rw
# Please update nfs_mount_src base your nfs server
#nfs_mount_src = 192.168.12.10:/vol/s2imagetest
- local_nfs:
storage_type = nfs
nfs_mount_dir = /mnt/nfs_images
nfs_mount_options = rw
export_dir = /mnt/nfs_export
nfs_mount_src = localhost:/mnt/nfs_export
setup_local_nfs = yes
export_options = rw
- nvme_direct:
#nvme_pci_address = 0000:44:00.0 (NVMe PCI address, format: $domain:$bus:$slot.$function)
#nvme_namespace = 1 (namespace number starting from 1 according to the NVMe spec)
storage_type = nvme
enable_nvme = yes
remove_image = no
image_raw_device = yes
- libssh:
# QEMU supports SSH (Secure Shell) access to remote disks
# Only be used for read-only devices, but we can create an image with qemu-img
storage_type = ssh
enable_ssh = yes
force_create_image = no
remove_image = no
image_readonly = yes
variants:
- @no_virtio_rng:
- virtio_rng:
no Host_RHEL.m5 Host_RHEL.m6.u0 Host_RHEL.m6.u1 Host_RHEL.m6.u2 Host_RHEL.m6.u3 Host_RHEL.m6.u4 Host_RHEL.m6.u5
virtio_rngs += " rng0"
#max-bytes_virtio-rng =
#period_virtio-rng =
variants:
- rng_builtin:
required_qemu ~= [4.2, )
backend_rng0 = rng-builtin
backend_type = builtin
- rng_random:
backend_rng0 = rng-random
backend_type = passthrough
filename_passthrough = /dev/urandom
- rng_egd:
backend_rng0 = rng-egd
backend_type = chardev
rng_chardev_backend = socket
socket_type = tcp
host_tcp = localhost
port_tcp = 8000
# if egd.pl started on remote host, please set setup_egd = no
setup_egd = yes
variants:
- @default_bios:
- ovmf:
no i386 ppc64 ppc64le s390x
no Win2003 WinXP WinVista
# Please update this based on your test env
ovmf_path = /usr/share/OVMF
ovmf_code_filename = OVMF_CODE.secboot.fd
ovmf_vars_filename = OVMF_VARS.fd
unattended_install:
restore_ovmf_vars = yes
Windows:
send_key_at_install = ret
# Nettype
variants:
- @bridge:
nettype = bridge
- macvtap:
nettype = macvtap
- user:
nettype = user
- network:
nettype = network
- vdpa:
nettype = vdpa