From aa543d0c97d19f8c42b291742df51424f5b5b2c4 Mon Sep 17 00:00:00 2001 From: DongHun Kwak Date: Fri, 14 Jan 2022 13:50:16 +0900 Subject: [PATCH] Imported Upstream version 0.6.4 --- ChangeLog | 6 - Makefile | 5 +- Makefile.inc | 9 +- README | 38 +- kpartx/Makefile | 10 +- kpartx/gpt.c | 1 - kpartx/kpartx.8 | 117 +- kpartx/kpartx.h | 2 + kpartx/kpartx.rules | 2 + kpartx/lopart.c | 4 +- libmpathcmd/mpath_cmd.c | 6 +- libmpathpersist/Makefile | 2 +- libmpathpersist/mpath_persist.c | 24 +- libmpathpersist/mpath_persistent_reserve_in.3 | 172 +- .../mpath_persistent_reserve_out.3 | 200 +- libmpathpersist/mpathpr.h | 2 +- libmultipath/Makefile | 4 +- libmultipath/alias.c | 2 +- libmultipath/blacklist.c | 12 +- libmultipath/checkers.h | 6 + libmultipath/checkers/rbd.c | 123 +- libmultipath/checkers/tur.c | 179 +- libmultipath/config.c | 12 +- libmultipath/config.h | 4 + libmultipath/configure.c | 13 +- libmultipath/debug.c | 4 +- libmultipath/defaults.h | 8 +- libmultipath/devmapper.c | 55 +- libmultipath/devmapper.h | 8 +- libmultipath/dict.c | 24 +- libmultipath/discovery.c | 15 +- libmultipath/discovery.h | 1 + libmultipath/dmparser.c | 8 + libmultipath/file.c | 2 +- libmultipath/hwtable.c | 364 +- libmultipath/log.c | 2 + libmultipath/log.h | 2 +- libmultipath/memory.c | 1 + libmultipath/memory.h | 1 - libmultipath/print.c | 161 +- libmultipath/prio.h | 3 + libmultipath/prioritizers/alua.c | 8 +- libmultipath/prioritizers/const.c | 2 +- libmultipath/prioritizers/datacore.c | 2 +- libmultipath/prioritizers/iet.c | 2 +- libmultipath/prioritizers/ontap.c | 1 - libmultipath/prioritizers/random.c | 2 +- libmultipath/prioritizers/weightedpath.c | 2 +- libmultipath/propsel.c | 24 +- libmultipath/propsel.h | 1 + libmultipath/structs.c | 11 + libmultipath/structs.h | 9 + libmultipath/structs_vec.c | 28 +- libmultipath/time-util.c | 42 + libmultipath/time-util.h | 13 + libmultipath/uevent.c | 39 +- libmultipath/uevent.h | 1 - libmultipath/util.c | 25 +- libmultipath/util.h | 1 + libmultipath/uxsock.c | 19 +- libmultipath/version.h | 4 +- libmultipath/wwids.c | 4 +- mpathpersist/Makefile | 10 +- mpathpersist/main.c | 7 +- mpathpersist/main.h | 48 +- mpathpersist/mpathpersist.8 | 264 +- multipath/Makefile | 10 +- multipath/main.c | 1 + multipath/multipath.8 | 159 +- multipath/multipath.conf.5 | 111 +- multipathd/Makefile | 17 +- multipathd/cli.c | 10 +- multipathd/cli_handlers.c | 15 +- multipathd/main.c | 279 +- multipathd/multipathd.8 | 271 +- multipathd/uxlsnr.c | 21 +- third-party/valgrind/drd.h | 571 ++ third-party/valgrind/valgrind.h | 7126 +++++++++++++++++ 78 files changed, 9690 insertions(+), 1084 deletions(-) delete mode 100644 ChangeLog create mode 100644 libmultipath/time-util.c create mode 100644 libmultipath/time-util.h create mode 100644 third-party/valgrind/drd.h create mode 100644 third-party/valgrind/valgrind.h diff --git a/ChangeLog b/ChangeLog deleted file mode 100644 index 4b547bb..0000000 --- a/ChangeLog +++ /dev/null @@ -1,6 +0,0 @@ -Change logs are at : - -- pre-0.4.5 - http://web.archive.org/web/20070309224034/http://christophe.varoqui.free.fr/wiki/wakka.php?wiki=ChangeLog -- post-0.4.5 - http://git.opensvc.com/?p=multipath-tools/.git;a=log diff --git a/Makefile b/Makefile index 95c7dfb..228d9ac 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,8 @@ ifeq ($(KRNLSRC),) KRNLSRC = $(KRNLLIB)/build KRNLOBJ = $(KRNLLIB)/build endif + export KRNLSRC + export KRNLOBJ endif ifeq ($(MULTIPATH_VERSION),) @@ -22,9 +24,6 @@ else VERSION = $(MULTIPATH_VERSION) endif -export KRNLSRC -export KRNLOBJ - BUILDDIRS = \ libmpathcmd \ libmultipath \ diff --git a/Makefile.inc b/Makefile.inc index 0808e64..1cc8f44 100644 --- a/Makefile.inc +++ b/Makefile.inc @@ -45,7 +45,7 @@ bindir = $(exec_prefix)/sbin libudevdir = $(prefix)/$(SYSTEMDPATH)/udev udevrulesdir = $(libudevdir)/rules.d multipathdir = $(TOPDIR)/libmultipath -mandir = $(prefix)/usr/share/man/man8 +man8dir = $(prefix)/usr/share/man/man8 man5dir = $(prefix)/usr/share/man/man5 man3dir = $(prefix)/usr/share/man/man3 syslibdir = $(prefix)/$(LIB) @@ -54,14 +54,17 @@ libdir = $(prefix)/$(LIB)/multipath unitdir = $(prefix)/$(SYSTEMDPATH)/systemd/system mpathpersistdir = $(TOPDIR)/libmpathpersist mpathcmddir = $(TOPDIR)/libmpathcmd +thirdpartydir = $(TOPDIR)/third-party GZIP = gzip -9 -c RM = rm -f LN = ln -sf INSTALL_PROGRAM = install -OPTFLAGS = -Wunused -Wstrict-prototypes -O2 -g -pipe -Wformat-security -Wall \ - -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 +OPTFLAGS = -O2 -g -pipe -Wall -Wextra -Wformat=2 \ + -Wno-sign-compare -Wno-unused-parameter -Wno-clobbered \ + -Wp,-D_FORTIFY_SOURCE=2 -fstack-protector \ + --param=ssp-buffer-size=4 CFLAGS = $(OPTFLAGS) -fPIC -DLIB_STRING=\"${LIB}\" -DRUN_DIR=\"${RUN}\" SHARED_FLAGS = -shared diff --git a/README b/README index 77b79b8..a18b325 100644 --- a/README +++ b/README @@ -1,8 +1,8 @@ -multipath-tools for Linux + multipath-tools for Linux -This package provides the following binaries to drive the -Device Mapper multipathing driver: +This package provides the following binaries to drive the Device Mapper +multipathing driver: multipath - Device mapper target autoconfig. multipathd - Multipath daemon. @@ -10,20 +10,38 @@ mpathpersist - Manages SCSI persistent reservations on dm multipath devices. kpartx - Create device maps from partition tables. -tarballs are not generated anymore, to get a specific release do: -git clone http://git.opensvc.com/multipath-tools/.git +Releases +======== +Tarballs are not generated anymore, to get a specific release do: +git clone https://git.opensvc.com/multipath-tools/.git cd multipath-tools git tag git archive --format=tar.gz --prefix=multipath-tools-X.Y.Z/ X.Y.Z > ../multipath-tools-X.Y.Z.tar.gz Alternatively it may be obtained from gitweb, go to: -http://git.opensvc.com/?p=multipath-tools/.git;a=tags +https://git.opensvc.com/?p=multipath-tools/.git;a=tags select a release-tag and then click on "snapshot". -To get latest devel code: git clone http://git.opensvc.com/multipath-tools/.git +Source code +=========== +To get latest devel code: git clone https://git.opensvc.com/multipath-tools/.git +Gitweb: https://git.opensvc.com/?p=multipath-tools/.git -Mailing list: http://www.redhat.com/mailman/listinfo/dm-devel -Gitweb: http://git.opensvc.com/?p=multipath-tools/.git -Current maintainer is Christophe Varoqui +Mailing list (subscribers-only) +============ +To subscribe and archives: https://www.redhat.com/mailman/listinfo/dm-devel +Searchable: https://marc.info/?l=dm-devel + + +Changelog +========= +pre-0.4.5: https://web.archive.org/web/20070309224034/http://christophe.varoqui.free.fr/wiki/wakka.php?wiki=ChangeLog +post-0.4.5: https://git.opensvc.com/?p=multipath-tools/.git;a=log + + +Maintainer +========== +Christophe Varoqui +Device-mapper development mailing list diff --git a/kpartx/Makefile b/kpartx/Makefile index 75b7606..e8a59f2 100644 --- a/kpartx/Makefile +++ b/kpartx/Makefile @@ -5,7 +5,7 @@ include ../Makefile.inc CFLAGS += -I. -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -LDFLAGS = -ldevmapper +LIBDEPS += -ldevmapper LIBDM_API_COOKIE = $(shell grep -Ecs '^[a-z]*[[:space:]]+dm_task_set_cookie' /usr/include/libdevmapper.h) @@ -21,7 +21,7 @@ EXEC = kpartx all: $(EXEC) $(EXEC): $(OBJS) - $(CC) $(OBJS) -o $(EXEC) $(LDFLAGS) + $(CC) $(CFLAGS) $(OBJS) -o $(EXEC) $(LDFLAGS) $(LIBDEPS) $(GZIP) $(EXEC).8 > $(EXEC).8.gz install: $(EXEC) $(EXEC).8 @@ -31,12 +31,12 @@ install: $(EXEC) $(EXEC).8 $(INSTALL_PROGRAM) -m 755 kpartx_id $(DESTDIR)$(libudevdir) $(INSTALL_PROGRAM) -d $(DESTDIR)$(libudevdir)/rules.d $(INSTALL_PROGRAM) -m 644 kpartx.rules $(DESTDIR)$(libudevdir)/rules.d/66-kpartx.rules - $(INSTALL_PROGRAM) -d $(DESTDIR)$(mandir) - $(INSTALL_PROGRAM) -m 644 $(EXEC).8.gz $(DESTDIR)$(mandir) + $(INSTALL_PROGRAM) -d $(DESTDIR)$(man8dir) + $(INSTALL_PROGRAM) -m 644 $(EXEC).8.gz $(DESTDIR)$(man8dir) uninstall: $(RM) $(DESTDIR)$(bindir)/$(EXEC) - $(RM) $(DESTDIR)$(mandir)/$(EXEC).8.gz + $(RM) $(DESTDIR)$(man8dir)/$(EXEC).8.gz $(RM) $(DESTDIR)$(libudevdir)/kpartx_id $(RM) $(DESTDIR)$(libudevdir)/rules.d/66-kpartx.rules $(RM) $(DESTDIR)$(libudevdir)/rules.d/67-kpartx-compat.rules diff --git a/kpartx/gpt.c b/kpartx/gpt.c index 3511886..6ef20f9 100644 --- a/kpartx/gpt.c +++ b/kpartx/gpt.c @@ -504,7 +504,6 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, uint64_t lastlba) static int find_valid_gpt(int fd, gpt_header ** gpt, gpt_entry ** ptes) { - extern int force_gpt; int good_pgpt = 0, good_agpt = 0, good_pmbr = 0; gpt_header *pgpt = NULL, *agpt = NULL; gpt_entry *pptes = NULL, *aptes = NULL; diff --git a/kpartx/kpartx.8 b/kpartx/kpartx.8 index 4c55831..ba58acb 100644 --- a/kpartx/kpartx.8 +++ b/kpartx/kpartx.8 @@ -1,49 +1,93 @@ -.TH KPARTX 8 "July 2006" "" "Linux Administrator's Manual" +.\" ---------------------------------------------------------------------------- +.\" Update the date below if you make any significant change. +.\" Make sure there are no errors with: +.\" groff -z -wall -b -e -t kpartx/kpartx.8 +.\" +.\" ---------------------------------------------------------------------------- +. +.TH KPARTX 8 2016-10-28 "Linux" +. +. +.\" ---------------------------------------------------------------------------- .SH NAME -kpartx \- Create device maps from partition tables +.\" ---------------------------------------------------------------------------- +. +kpartx \- Create device maps from partition tables. +. +. +.\" ---------------------------------------------------------------------------- .SH SYNOPSIS +.\" ---------------------------------------------------------------------------- +. .B kpartx -.RB [\| \-a\ \c -.BR |\ -d\ |\ -l \|] +.RB [\| \-a | \-d | \-u | \-l \|] +.RB [\| \-r \|] +.RB [\| \-p \|] +.RB [\| \-f \|] +.RB [\| \-g \|] +.RB [\| \-s \|] .RB [\| \-v \|] -.RB wholedisk +.B wholedisk +. +. +.\" ---------------------------------------------------------------------------- .SH DESCRIPTION -This tool, derived from util-linux' partx, reads partition -tables on specified device and create device maps over partitions -segments detected. It is called from hotplug upon device maps -creation and deletion. +.\" ---------------------------------------------------------------------------- +. +This tool, derived from util-linux' partx, reads partition tables on specified +device and create device maps over partitions segments detected. It is called +from hotplug upon device maps creation and deletion. +. +. +.\" ---------------------------------------------------------------------------- .SH OPTIONS +.\" ---------------------------------------------------------------------------- +. .TP .B \-a -Add partition mappings -.TP -.B \-r -Read-only partition mappings +Add partition mappings. +. .TP .B \-d -Delete partition mappings +Delete partition mappings. +. .TP .B \-u -Update partition mappings +Update partition mappings. +. .TP .B \-l -List partition mappings that would be added \-a +List partition mappings that would be added \-a. +. +.TP +.B \-r +Read-only partition mappings. +. .TP .B \-p -set device name-partition number delimiter +Set device name-partition number delimiter. +. .TP .B \-f -force creation of mappings; overrides 'no_partitions' feature +Force creation of mappings; overrides 'no_partitions' feature. +. .TP .B \-g -force GUID partition table (GPT) -.TP -.B \-v -Operate verbosely +Force GUID partition table (GPT). +. .TP .B \-s -Sync mode. Don't return until the partitions are created +Sync mode. Don't return until the partitions are created. +. +.TP +.B \-v +Operate verbosely. +. +. +.\" ---------------------------------------------------------------------------- .SH EXAMPLE +.\" ---------------------------------------------------------------------------- +. To mount all the partitions in a raw disk image: .IP kpartx \-av disk.img @@ -52,22 +96,31 @@ This will output lines such as: .IP add map loop1p1 (254:4): 0 409597 linear 7:1 3 .PP -The -.I loop1p1 -is the name of a device file under -.I /dev/mapper -which you can use to access the partition, for example to fsck it: +The \fIloop1p1\fR is the name of a device file under \fI/dev/mapper\fR which you +can use to access the partition, for example to fsck it: .IP fsck /dev/mapper/loop1p1 .PP When you're done, you need to remove the devices: .IP kpartx \-d disk.img +. +. +.\" ---------------------------------------------------------------------------- .SH "SEE ALSO" +.\" ---------------------------------------------------------------------------- +. .BR multipath (8) .BR multipathd (8) .BR hotplug (8) -.SH "AUTHORS" -This man page was assembled By Patrick Caulfield -for the Debian project. From documentation provided -by the multipath author Christophe Varoqui, and others. +. +. +.\" ---------------------------------------------------------------------------- +.SH AUTHORS +.\" ---------------------------------------------------------------------------- +. +This man page was assembled By Patrick Caulfield for the Debian project. +.PP +\fImultipath-tools\fR was developed by Christophe Varoqui +and others. +.\" EOF diff --git a/kpartx/kpartx.h b/kpartx/kpartx.h index a55c211..52920e4 100644 --- a/kpartx/kpartx.h +++ b/kpartx/kpartx.h @@ -39,6 +39,8 @@ struct slice { typedef int (ptreader)(int fd, struct slice all, struct slice *sp, int ns); +extern int force_gpt; + extern ptreader read_dos_pt; extern ptreader read_bsd_pt; extern ptreader read_solaris_pt; diff --git a/kpartx/kpartx.rules b/kpartx/kpartx.rules index 1713f3c..48a4d6c 100644 --- a/kpartx/kpartx.rules +++ b/kpartx/kpartx.rules @@ -37,6 +37,8 @@ ENV{ID_FS_USAGE}=="filesystem|other", ENV{ID_FS_LABEL_ENC}=="?*", \ # Create dm tables for partitions ENV{DM_ACTION}=="PATH_FAILED|PATH_REINSTATED", GOTO="kpartx_end" ENV{DM_NR_VALID_PATHS}=="0", GOTO="kpartx_end" +ENV{ENV{DM_UDEV_PRIMARY_SOURCE_FLAG}!="1", IMPORT{db}="DM_SUBSYSTEM_UDEV_FLAG1" +ENV{DM_SUBSYSTEM_UDEV_FLAG1}=="1", GOTO="kpartx_end" ENV{DM_STATE}!="SUSPENDED", ENV{DM_UUID}=="mpath-*", \ RUN+="/sbin/kpartx -u -p -part /dev/$name" diff --git a/kpartx/lopart.c b/kpartx/lopart.c index 0ab1688..14af34f 100644 --- a/kpartx/lopart.c +++ b/kpartx/lopart.c @@ -2,7 +2,7 @@ /* Added vfs mount options - aeb - 960223 */ /* Removed lomount - aeb - 960224 */ -/* 1999-02-22 Arkadiusz Mi¶kiewicz +/* 1999-02-22 Arkadiusz Miśkiewicz * - added Native Language Support * Sun Mar 21 1999 - Arnaldo Carvalho de Melo * - fixed strerr(errno) in gettext calls @@ -228,7 +228,7 @@ find_unused_loop_device (void) " maybe /dev/loop# has the wrong major number?"); } else fprintf(stderr, "mount: could not find any free loop device"); - return 0; + return NULL; } extern int diff --git a/libmpathcmd/mpath_cmd.c b/libmpathcmd/mpath_cmd.c index c058479..d9c5790 100644 --- a/libmpathcmd/mpath_cmd.c +++ b/libmpathcmd/mpath_cmd.c @@ -31,9 +31,9 @@ static ssize_t read_all(int fd, void *buf, size_t len, unsigned int timeout) if (errno == EINTR) continue; return -1; - } else if (!pfd.revents & POLLIN) + } else if (!(pfd.revents & POLLIN)) continue; - n = read(fd, buf, len); + n = recv(fd, buf, len, 0); if (n < 0) { if ((errno == EINTR) || (errno == EAGAIN)) continue; @@ -56,7 +56,7 @@ static size_t write_all(int fd, const void *buf, size_t len) size_t total = 0; while (len) { - ssize_t n = write(fd, buf, len); + ssize_t n = send(fd, buf, len, MSG_NOSIGNAL); if (n < 0) { if ((errno == EINTR) || (errno == EAGAIN)) continue; diff --git a/libmpathpersist/Makefile b/libmpathpersist/Makefile index 1c4b2e1..6e43427 100644 --- a/libmpathpersist/Makefile +++ b/libmpathpersist/Makefile @@ -15,7 +15,7 @@ all: $(LIBS) $(LIBS): $(CC) -c $(CFLAGS) *.c - $(CC) $(SHARED_FLAGS) $(LIBDEPS) -Wl,-soname=$@ $(CFLAGS) -o $@ $(OBJS) + $(CC) $(LDFLAGS) $(SHARED_FLAGS) $(LIBDEPS) -Wl,-soname=$@ $(CFLAGS) -o $@ $(OBJS) $(LN) $(LIBS) $(DEVLIB) $(GZIP) mpath_persistent_reserve_in.3 > mpath_persistent_reserve_in.3.gz $(GZIP) mpath_persistent_reserve_out.3 > mpath_persistent_reserve_out.3.gz diff --git a/libmpathpersist/mpath_persist.c b/libmpathpersist/mpath_persist.c index 7501651..faea2b7 100644 --- a/libmpathpersist/mpath_persist.c +++ b/libmpathpersist/mpath_persist.c @@ -35,6 +35,7 @@ #define __STDC_FORMAT_MACROS 1 struct udev *udev; +struct config *conf; struct config * mpath_lib_init (struct udev *udev) @@ -414,17 +415,6 @@ get_mpvec (vector curmp, vector pathvec, char * refwwid) return MPATH_PR_SUCCESS ; } -void * mpath_prin_pthread_fn (void *p) -{ - int ret; - struct prin_param * pparam = (struct prin_param *)p; - - ret = prin_do_scsi_ioctl(pparam->dev, pparam->rq_servact, - pparam->resp, pparam->noisy); - pparam->status = ret; - pthread_exit(NULL); -} - int mpath_send_prin_activepath (char * dev, int rq_servact, struct prin_resp * resp, int noisy) { @@ -833,26 +823,22 @@ void * mpath_alloc_prin_response(int prin_sa) { case MPATH_PRIN_RKEY_SA: size = sizeof(struct prin_readdescr); - ptr = malloc(size); - memset(ptr, 0, size); break; case MPATH_PRIN_RRES_SA: size = sizeof(struct prin_resvdescr); - ptr = malloc(size); - memset(ptr, 0, size); break; case MPATH_PRIN_RCAP_SA: size=sizeof(struct prin_capdescr); - ptr = malloc(size); - memset(ptr, 0, size); break; case MPATH_PRIN_RFSTAT_SA: size = sizeof(struct print_fulldescr_list) + sizeof(struct prin_fulldescr *)*MPATH_MX_TIDS; - ptr = malloc(size); - memset(ptr, 0, size); break; } + if (size > 0) + { + ptr = calloc(size, 1); + } return ptr; } diff --git a/libmpathpersist/mpath_persistent_reserve_in.3 b/libmpathpersist/mpath_persistent_reserve_in.3 index 8601645..5ed0779 100644 --- a/libmpathpersist/mpath_persistent_reserve_in.3 +++ b/libmpathpersist/mpath_persistent_reserve_in.3 @@ -1,80 +1,116 @@ +.\" ---------------------------------------------------------------------------- +.\" Update the date below if you make any significant change. +.\" Make sure there are no errors with: +.\" groff -z -wall -b -e -t libmpathpersist/mpath_persistent_reserve_in.3 .\" -.TH MPATH_PERSISTENT_RESERVE_IN 3 2011-04-08 "Linux Manpage" +.\" ---------------------------------------------------------------------------- +. +.TH MPATH_PERSISTENT_RESERVE_IN 3 2016-11-01 "Linux" +. +. +.\" ---------------------------------------------------------------------------- .SH NAME +.\" ---------------------------------------------------------------------------- +. mpath_persistent_reserve_in +. +. +.\" ---------------------------------------------------------------------------- .SH SYNOPSIS +.\" ---------------------------------------------------------------------------- +. .B #include -.sp -.BI "int mpath_persistent_reserve_in (int fd, int rq_servact, struct prin_resp *resp, int noisy, int verbose)" -.sp +.P +.BI "int mpath_persistent_reserve_in" "(int fd, int rq_servact, struct prin_resp *resp, int noisy, int verbose)" +.P +. +. +.\" ---------------------------------------------------------------------------- .SH DESCRIPTION -The function in the -.BR mpath_persistent_reserve_in () -sends PRIN command to the DM device and gets the response. -.br -.BI Parameters: -.br +.\" ---------------------------------------------------------------------------- +. +The function in the \fBmpath_persistent_reserve_in ()\fR sends PRIN command to +the DM device and gets the response. +.TP +.B Parameters: +.RS +.TP 12 .I fd -.B The file descriptor of a multipath device. Input argument. -.br +The file descriptor of a multipath device. Input argument. +.TP .I rq_servact -.B PRIN command service action. Input argument -.br +PRIN command service action. Input argument. +.TP .I resp -.B The response from PRIN service action. The caller should manage the memory allocation of this structure -.br +The response from PRIN service action. The caller should manage the memory allocation of this structure. +.TP .I noisy -.B Turn on debugging trace: Input argument. 0->Disable, 1->Enable -.br +Turn on debugging trace: Input argument. 0->Disable, 1->Enable. +.TP .I verbose -.B Set verbosity level. Input argument. value:[0-3]. 0->Crits and Errors, 1->Warnings, 2->Info, 3->Debug -.br - -.SH "RETURNS" -.I MPATH_PR_SUCCESS -.B if PR command successful -.br -.I MPATH_PR_SYNTAX_ERROR -.B if syntax error or invalid parameter -.br -.I MPATH_PR_SENSE_NOT_READY -.B if command fails with [sk,asc,ascq: 0x2,*,*] -.br -.I MPATH_PR_SENSE_MEDIUM_ERROR -.B if command fails with [sk,asc,ascq: 0x3,*,*] -.br -.I MPATH_PR_SENSE_HARDWARE_ERROR -.B if command fails with [sk,asc,ascq: 0x4,*,*] -.br -.I MPATH_PR_SENSE_INVALID_OP -.B if command fails with [sk,asc,ascq: 0x5,0x20,0x0] -.br -.I MPATH_PR_ILLEGAL_REQ -.B if command fails with [sk,asc,ascq: 0x5,*,*] -.br -.I MPATH_PR_SENSE_UNIT_ATTENTION -.B if command fails with [sk,asc,ascq: 0x6,*,*] -.br -.I MPATH_PR_SENSE_ABORTED_COMMAND -.B if command fails with [sk,asc,ascq: 0xb,*,*] -.br -.I MPATH_PR_NO_SENSE -.B if command fails with [sk,asc,ascq: 0x0,*,*] -.br -.I MPATH_PR_SENSE_MALFORMED -.B if command fails with SCSI command malformed -.br -.I MPATH_PR_FILE_ERROR -.B if command fails while accessing file (device node) problems(e.g. not found) -.br -.I MPATH_PR_DMMP_ERROR -.B if Device Mapper related error.(e.g Error in getting dm info) -.br -.I MPATH_PR_OTHER -.B if other error/warning has occurred(e.g transport or driver error) -.br - - +Set verbosity level. Input argument. value:[0-3]. 0->Crits and Errors, 1->Warnings, 2->Info, 3->Debug. +.RE +. +. +.\" ---------------------------------------------------------------------------- +.SH RETURNS +.\" ---------------------------------------------------------------------------- +. +.TP 12 +.B MPATH_PR_SUCCESS +If PR command successful. +.TP +.B MPATH_PR_SYNTAX_ERROR +If syntax error or invalid parameter. +.TP +.B MPATH_PR_SENSE_NOT_READY +If command fails with [sk,asc,ascq: 0x2,*,*]. +.TP +.B MPATH_PR_SENSE_MEDIUM_ERROR +If command fails with [sk,asc,ascq: 0x3,*,*]. +.TP +.B MPATH_PR_SENSE_HARDWARE_ERROR +If command fails with [sk,asc,ascq: 0x4,*,*]. +.TP +.B MPATH_PR_SENSE_INVALID_OP +If command fails with [sk,asc,ascq: 0x5,0x20,0x0]. +.TP +.B MPATH_PR_ILLEGAL_REQ +If command fails with [sk,asc,ascq: 0x5,*,*]. +.TP +.B MPATH_PR_SENSE_UNIT_ATTENTION +If command fails with [sk,asc,ascq: 0x6,*,*]. +.TP +.B MPATH_PR_SENSE_ABORTED_COMMAND +If command fails with [sk,asc,ascq: 0xb,*,*]. +.TP +.B MPATH_PR_NO_SENSE +If command fails with [sk,asc,ascq: 0x0,*,*]. +.TP +.B MPATH_PR_SENSE_MALFORMED +If command fails with SCSI command malformed. +.TP +.B MPATH_PR_FILE_ERROR +If command fails while accessing file (device node) problems(e.g. not found). +.TP +.B MPATH_PR_DMMP_ERROR +If Device Mapper related error.(e.g Error in getting dm info). +.TP +.B MPATH_PR_OTHER +If other error/warning has occurred(e.g transport or driver error). +. +. +.\" ---------------------------------------------------------------------------- .SH "SEE ALSO" -.I mpath_persistent_reserve_out mpathpersist /usr/share/doc/mpathpersist/README -.br +.\" ---------------------------------------------------------------------------- +. +.BR mpathpersist (8). +. +. +.\" ---------------------------------------------------------------------------- +.SH AUTHORS +.\" ---------------------------------------------------------------------------- +. +\fImultipath-tools\fR was developed by Christophe Varoqui +and others. +.\" EOF diff --git a/libmpathpersist/mpath_persistent_reserve_out.3 b/libmpathpersist/mpath_persistent_reserve_out.3 index 8a3b52c..e11eb57 100644 --- a/libmpathpersist/mpath_persistent_reserve_out.3 +++ b/libmpathpersist/mpath_persistent_reserve_out.3 @@ -1,92 +1,136 @@ +.\" ---------------------------------------------------------------------------- +.\" Update the date below if you make any significant change. +.\" Make sure there are no errors with: +.\" groff -z -wall -b -e -t libmpathpersist/mpath_persistent_reserve_out.3 .\" -.TH MPATH_PERSISTENT_RESERVE_OUT 3 2011-04-08 "Linux Manpage" +.\" ---------------------------------------------------------------------------- +. +.TH MPATH_PERSISTENT_RESERVE_OUT 3 2016-11-01 "Linux" +. +. +.\" ---------------------------------------------------------------------------- .SH NAME +.\" ---------------------------------------------------------------------------- +. mpath_persistent_reserve_out +. +. +.\" ---------------------------------------------------------------------------- .SH SYNOPSIS +.\" ---------------------------------------------------------------------------- +. .B #include -.sp -.BI "int mpath_persistent_reserve_out (int fd, int rq_servact, struct prin_resp *resp, int noisy, int verbose)" -.sp +.P +.BI "int mpath_persistent_reserve_out" "(int fd, int rq_servact, struct prin_resp *resp, int noisy, int verbose)" +.P +. +. +.\" ---------------------------------------------------------------------------- .SH DESCRIPTION -The function in the -.BR mpath_persistent_reserve_out () -sends PR OUT command to the DM device and gets the response. -.br -.BI Parameters: -.br +.\" ---------------------------------------------------------------------------- +. +The function in the \fBmpath_persistent_reserve_out ()\fR sends PROUT command to +the DM device and gets the response. +.TP +.B Parameters: +.RS +.TP 12 .I fd -.B The file descriptor of a multipath device. Input argument. -.br +The file descriptor of a multipath device. Input argument. +.TP .I rq_servact -.B PROUT command service action. Input argument -.br +PROUT command service action. Input argument. +.TP .I rq_scope -.B Persistent reservation scope. The value should be always LU_SCOPE (0h). -.br +Persistent reservation scope. The value should be always LU_SCOPE (0h). +.TP .I rq_type -.B Persistent reservation type. The valid values of persistent reservation types are - 5h (Write exclusive - registrants only) - 6h (Exclusive access - registrants only) - 7h (Write exclusive - All registrants) - 8h (Exclusive access - All registrants). -.br +Persistent reservation type. The valid values of persistent reservation types are: +.RS +.IP +5h (Write exclusive - registrants only). +.IP +6h (Exclusive access - registrants only). +.IP +7h (Write exclusive - All registrants). +.IP +8h (Exclusive access - All registrants). +.RE +.TP .I paramp -.B PROUT command parameter data. The paramp is a struct which describes PROUT parameter list. Caller should manage the memory allocation of this structure. -.br +PROUT command parameter data. The paramp is a struct which describes PROUT +parameter list. Caller should manage the memory allocation of this structure. +.TP .I noisy -.B Turn on debugging trace: Input argument. 0->Disable, 1->Enable. -.br +Turn on debugging trace: Input argument. 0->Disable, 1->Enable. +.TP .I verbose -.B Set verbosity level. Input argument. value: 0 to 3. 0->Crits and Errors, 1->Warnings, 2->Info, 3->Debug - -.SH "RETURNS" -.I MPATH_PR_SUCCESS -.B if PR command successful else returns any one of the status mentioned below -.br -.I MPATH_PR_SYNTAX_ERROR -.B if syntax error or invalid parameter -.br -.I MPATH_PR_SENSE_NOT_READY -.B if command fails with [sk,asc,ascq: 0x2,*,*] -.br -.I MPATH_PR_SENSE_MEDIUM_ERROR -.B if command fails with [sk,asc,ascq: 0x3,*,*] -.br -.I MPATH_PR_SENSE_HARDWARE_ERROR -.B if command fails with [sk,asc,ascq: 0x4,*,*] -.br -.I MPATH_PR_SENSE_INVALID_OP -.B if command fails with [sk,asc,ascq: 0x5,0x20,0x0] -.br -.I MPATH_PR_ILLEGAL_REQ -.B if command fails with [sk,asc,ascq: 0x5,*,*] -.br -.I MPATH_PR_SENSE_UNIT_ATTENTION -.B if command fails with [sk,asc,ascq: 0x6,*,*] -.br -.I MPATH_PR_SENSE_ABORTED_COMMAND -.B if command fails with [sk,asc,ascq: 0xb,*,*] -.br -.I MPATH_PR_NO_SENSE -.B if command fails with [sk,asc,ascq: 0x0,*,*] -.br -.I MPATH_PR_SENSE_MALFORMED -.B if command fails with SCSI command malformed -.br -.I MPATH_PR_RESERV_CONFLICT -.B if command fails with reservation conflict -.br -.I MPATH_PR_FILE_ERROR -.B if command fails while accessing file (device node) problems(e.g. not found) -.br -.I MPATH_PR_DMMP_ERROR -.B if Device Mapper related error.(e.g Error in getting dm info) -.br -.I MPATH_PR_OTHER -.B if other error/warning has occurred(e.g transport or driver error) -.br - - +Set verbosity level. Input argument. value: 0 to 3. 0->Crits and Errors, 1->Warnings, 2->Info, 3->Debug. +.RE +. +. +.\" ---------------------------------------------------------------------------- +.SH RETURNS +.\" ---------------------------------------------------------------------------- +. +.TP 12 +.B MPATH_PR_SUCCESS +If PR command successful else returns any one of the status mentioned below. +.TP +.B MPATH_PR_SYNTAX_ERROR +If syntax error or invalid parameter. +.TP +.B MPATH_PR_SENSE_NOT_READY +If command fails with [sk,asc,ascq: 0x2,*,*]. +.TP +.B MPATH_PR_SENSE_MEDIUM_ERROR +If command fails with [sk,asc,ascq: 0x3,*,*]. +.TP +.B MPATH_PR_SENSE_HARDWARE_ERROR +If command fails with [sk,asc,ascq: 0x4,*,*]. +.TP +.B MPATH_PR_SENSE_INVALID_OP +If command fails with [sk,asc,ascq: 0x5,0x20,0x0]. +.TP +.B MPATH_PR_ILLEGAL_REQ +If command fails with [sk,asc,ascq: 0x5,*,*]. +.TP +.B MPATH_PR_SENSE_UNIT_ATTENTION +If command fails with [sk,asc,ascq: 0x6,*,*]. +.TP +.B MPATH_PR_SENSE_ABORTED_COMMAND +If command fails with [sk,asc,ascq: 0xb,*,*]. +.TP +.B MPATH_PR_NO_SENSE +If command fails with [sk,asc,ascq: 0x0,*,*]. +.TP +.B MPATH_PR_SENSE_MALFORMED +If command fails with SCSI command malformed. +.TP +.B MPATH_PR_FILE_ERROR +If command fails while accessing file (device node) problems(e.g. not found). +.TP +.B MPATH_PR_DMMP_ERROR +If Device Mapper related error.(e.g Error in getting dm info). +.TP +.B MPATH_PR_OTHER +If other error/warning has occurred(e.g transport or driver error). +.TP +.B MPATH_PR_RESERV_CONFLICT +If command fails with reservation conflict. +. +. +.\" ---------------------------------------------------------------------------- .SH "SEE ALSO" -.I mpath_persistent_reserve_in mpathpersist /usr/share/doc/mpathpersist/README -.br +.\" ---------------------------------------------------------------------------- +. +.BR mpathpersist (8). +. +. +.\" ---------------------------------------------------------------------------- +.SH AUTHORS +.\" ---------------------------------------------------------------------------- +. +\fImultipath-tools\fR was developed by Christophe Varoqui +and others. +.\" EOF diff --git a/libmpathpersist/mpathpr.h b/libmpathpersist/mpathpr.h index cd58201..056c547 100644 --- a/libmpathpersist/mpathpr.h +++ b/libmpathpersist/mpathpr.h @@ -26,7 +26,7 @@ struct threadinfo { }; -struct config * conf; +extern struct config *conf; int prin_do_scsi_ioctl(char * dev, int rq_servact, struct prin_resp * resp, int noisy); diff --git a/libmultipath/Makefile b/libmultipath/Makefile index 3a20f8e..495cebe 100644 --- a/libmultipath/Makefile +++ b/libmultipath/Makefile @@ -9,7 +9,7 @@ LIBS = $(DEVLIB).$(SONAME) CFLAGS += -I$(mpathcmddir) -LIBDEPS = -lpthread -ldl -ldevmapper -ludev -L$(mpathcmddir) -lmpathcmd +LIBDEPS += -lpthread -ldl -ldevmapper -ludev -L$(mpathcmddir) -lmpathcmd ifdef SYSTEMD CFLAGS += -DUSE_SYSTEMD=$(SYSTEMD) @@ -47,7 +47,7 @@ endif OBJS = memory.o parser.o vector.o devmapper.o callout.o \ hwtable.o blacklist.o util.o dmparser.o config.o \ structs.o discovery.o propsel.o dict.o \ - pgpolicies.o debug.o defaults.o uevent.o \ + pgpolicies.o debug.o defaults.o uevent.o time-util.o \ switchgroup.o uxsock.o print.o alias.o log_pthread.o \ log.o configure.o structs_vec.o sysfs.o prio.o checkers.o \ lock.o waiter.o file.o wwids.o prioritizers/alua_rtpg.o diff --git a/libmultipath/alias.c b/libmultipath/alias.c index b86843a..12afef8 100644 --- a/libmultipath/alias.c +++ b/libmultipath/alias.c @@ -219,7 +219,7 @@ allocate_binding(int fd, char *wwid, int id, char *prefix) strerror(errno)); return NULL; } - if (write_all(fd, buf, strlen(buf)) != strlen(buf)){ + if (write(fd, buf, strlen(buf)) != strlen(buf)){ condlog(0, "Cannot write binding to bindings file : %s", strerror(errno)); /* clear partial write */ diff --git a/libmultipath/blacklist.c b/libmultipath/blacklist.c index 9687399..f6c4506 100644 --- a/libmultipath/blacklist.c +++ b/libmultipath/blacklist.c @@ -172,7 +172,7 @@ setup_default_blist (struct config * conf) char * str; int i; - str = STRDUP("^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"); + str = STRDUP("^(ram|raw|loop|fd|md|dm-|sr|scd|st|dcssblk)[0-9]"); if (!str) return 1; if (store_ble(conf->blist_devnode, str, ORIGIN_DEFAULT)) @@ -184,19 +184,13 @@ setup_default_blist (struct config * conf) if (store_ble(conf->blist_devnode, str, ORIGIN_DEFAULT)) return 1; - str = STRDUP("^dcssblk[0-9]*"); + str = STRDUP("^nvme"); if (!str) return 1; if (store_ble(conf->blist_devnode, str, ORIGIN_DEFAULT)) return 1; - str = STRDUP("^nvme.*"); - if (!str) - return 1; - if (store_ble(conf->blist_devnode, str, ORIGIN_DEFAULT)) - return 1; - - str = STRDUP("(SCSI_IDENT_.*|ID_WWN)"); + str = STRDUP("(SCSI_IDENT_|ID_WWN)"); if (!str) return 1; if (store_ble(conf->elist_property, str, ORIGIN_DEFAULT)) diff --git a/libmultipath/checkers.h b/libmultipath/checkers.h index 4fb97c9..fedc330 100644 --- a/libmultipath/checkers.h +++ b/libmultipath/checkers.h @@ -142,4 +142,10 @@ char * checker_message (struct checker *); void checker_clear_message (struct checker *c); void checker_get (char *, struct checker *, char *); +/* Functions exported by path checker dynamic libraries (.so) */ +int libcheck_check(struct checker *); +int libcheck_init(struct checker *); +void libcheck_free(struct checker *); +void libcheck_repair(struct checker *); + #endif /* _CHECKERS_H */ diff --git a/libmultipath/checkers/rbd.c b/libmultipath/checkers/rbd.c index 6f1b53a..481d860 100644 --- a/libmultipath/checkers/rbd.c +++ b/libmultipath/checkers/rbd.c @@ -26,13 +26,16 @@ #include "checkers.h" #include "../libmultipath/debug.h" -#include "../libmultipath/uevent.h" +#include "../libmultipath/util.h" +#include "../libmultipath/time-util.h" struct rbd_checker_context; typedef int (thread_fn)(struct rbd_checker_context *ct, char *msg); #define RBD_MSG(msg, fmt, args...) snprintf(msg, CHECKER_MSG_LEN, fmt, ##args); +#define RBD_FEATURE_EXCLUSIVE_LOCK (1 << 2) + struct rbd_checker_context { int rbd_bus_id; char *client_addr; @@ -43,6 +46,7 @@ struct rbd_checker_context { char *username; int remapped; int blacklisted; + unsigned lock_on_read:1; rados_t cluster; @@ -65,8 +69,9 @@ int libcheck_init(struct checker * c) struct udev_device *bus_dev; struct udev *udev; struct stat sb; - const char *block_name, *addr, *config_info; + const char *block_name, *addr, *config_info, *features_str; const char *image, *pool, *snap, *username; + uint64_t features = 0; char sysfs_path[PATH_SIZE]; int ret; @@ -75,7 +80,7 @@ int libcheck_init(struct checker * c) return 1; memset(ct, 0, sizeof(struct rbd_checker_context)); ct->holders = 1; - pthread_cond_init(&ct->active, NULL); + pthread_cond_init_mono(&ct->active); pthread_mutex_init(&ct->lock, NULL); pthread_spin_init(&ct->hldr_lock, PTHREAD_PROCESS_PRIVATE); c->context = ct; @@ -110,8 +115,8 @@ int libcheck_init(struct checker * c) addr = udev_device_get_sysattr_value(bus_dev, "client_addr"); if (!addr) { - condlog(0, "Could not find client_addr in rbd sysfs. Try " - "updating kernel"); + condlog(0, "rbd%d: Could not find client_addr in rbd sysfs. " + "Try updating kernel", ct->rbd_bus_id); goto free_dev; } @@ -119,10 +124,28 @@ int libcheck_init(struct checker * c) if (!ct->client_addr) goto free_dev; + features_str = udev_device_get_sysattr_value(bus_dev, "features"); + if (!features_str) + goto free_addr; + features = strtoll(features_str, NULL, 16); + if (!(features & RBD_FEATURE_EXCLUSIVE_LOCK)) { + condlog(3, "rbd%d: Exclusive lock not set.", ct->rbd_bus_id); + goto free_addr; + } + config_info = udev_device_get_sysattr_value(bus_dev, "config_info"); if (!config_info) goto free_addr; + if (!strstr(config_info, "noshare")) { + condlog(3, "rbd%d: Only nonshared clients supported.", + ct->rbd_bus_id); + goto free_addr; + } + + if (strstr(config_info, "lock_on_read")) + ct->lock_on_read = 1; + ct->config_info = strdup(config_info); if (!ct->config_info) goto free_addr; @@ -172,18 +195,20 @@ int libcheck_init(struct checker * c) } if (rados_create(&ct->cluster, NULL) < 0) { - condlog(0, "Could not create rados cluster"); + condlog(0, "rbd%d: Could not create rados cluster", + ct->rbd_bus_id); goto free_snap; } if (rados_conf_read_file(ct->cluster, NULL) < 0) { - condlog(0, "Could not read rados conf"); + condlog(0, "rbd%d: Could not read rados conf", ct->rbd_bus_id); goto shutdown_rados; } ret = rados_connect(ct->cluster); if (ret < 0) { - condlog(0, "Could not connect to rados cluster"); + condlog(0, "rbd%d: Could not connect to rados cluster", + ct->rbd_bus_id); goto shutdown_rados; } @@ -220,7 +245,7 @@ free_ct: return 1; } -void cleanup_context(struct rbd_checker_context *ct) +static void cleanup_context(struct rbd_checker_context *ct) { pthread_mutex_destroy(&ct->lock); pthread_cond_destroy(&ct->active); @@ -274,8 +299,7 @@ static int rbd_is_blacklisted(struct rbd_checker_context *ct, char *msg) ret = rados_mon_command(ct->cluster, (const char **)cmd, 1, "", 0, &blklist, &blklist_len, &stat, &stat_len); if (ret < 0) { - RBD_MSG(msg, "rbd checker failed: mon command failed %d", - ret); + RBD_MSG(msg, "checker failed: mon command failed %d", ret); return ret; } @@ -296,16 +320,15 @@ static int rbd_is_blacklisted(struct rbd_checker_context *ct, char *msg) end = strchr(addr_tok, ' '); if (!end) { - RBD_MSG(msg, "rbd%d checker failed: invalid blacklist %s", - ct->rbd_bus_id, addr_tok); + RBD_MSG(msg, "checker failed: invalid blacklist %s", + addr_tok); break; } *end = '\0'; if (!strcmp(addr_tok, ct->client_addr)) { ct->blacklisted = 1; - RBD_MSG(msg, "rbd%d checker: %s is blacklisted", - ct->rbd_bus_id, ct->client_addr); + RBD_MSG(msg, "%s is blacklisted", ct->client_addr); ret = 1; break; } @@ -317,12 +340,12 @@ free_bufs: return ret; } -int rbd_check(struct rbd_checker_context *ct, char *msg) +static int rbd_check(struct rbd_checker_context *ct, char *msg) { if (ct->blacklisted || rbd_is_blacklisted(ct, msg) == 1) return PATH_DOWN; - RBD_MSG(msg, "rbd checker reports path is up"); + RBD_MSG(msg, "checker reports path is up"); /* * Path may have issues, but the ceph cluster is at least * accepting IO, so we can attempt to do IO. @@ -333,7 +356,7 @@ int rbd_check(struct rbd_checker_context *ct, char *msg) return PATH_UP; } -int safe_write(int fd, const void *buf, size_t count) +static int safe_write(int fd, const void *buf, size_t count) { while (count > 0) { ssize_t r = write(fd, buf, count); @@ -355,7 +378,7 @@ static int sysfs_write_rbd_bus(const char *which, const char *buf, int fd; int r; - /* we require newer kernels so single_major should alwayws be there */ + /* we require newer kernels so single_major should always be there */ snprintf(sysfs_path, sizeof(sysfs_path), "/sys/bus/rbd/%s_single_major", which); fd = open(sysfs_path, O_WRONLY); @@ -379,7 +402,10 @@ static int rbd_remap(struct rbd_checker_context *ct) case 0: argv[i++] = "rbd"; argv[i++] = "map"; - argv[i++] = "-o noshare"; + if (ct->lock_on_read) + argv[i++] = "-o noshare,lock_on_read"; + else + argv[i++] = "-o noshare"; if (ct->username) { argv[i++] = "--id"; argv[i++] = ct->username; @@ -394,10 +420,12 @@ static int rbd_remap(struct rbd_checker_context *ct) argv[i] = NULL; ret = execvp(argv[0], argv); - condlog(0, "Error executing rbd: %s", strerror(errno)); + condlog(0, "rbd%d: Error executing rbd: %s", ct->rbd_bus_id, + strerror(errno)); exit(-1); case -1: - condlog(0, "fork failed: %s", strerror(errno)); + condlog(0, "rbd%d: fork failed: %s", ct->rbd_bus_id, + strerror(errno)); return -1; default: ret = -1; @@ -407,7 +435,8 @@ static int rbd_remap(struct rbd_checker_context *ct) if (status == 0) ret = 0; else - condlog(0, "rbd failed with %d", status); + condlog(0, "rbd%d: failed with %d", + ct->rbd_bus_id, status); } } @@ -435,14 +464,14 @@ static int rbd_rm_blacklist(struct rbd_checker_context *ct) cmd[1] = NULL; ret = rados_mon_command(ct->cluster, (const char **)cmd, 1, "", 0, - NULL, 0, &stat, &stat_len); + NULL, NULL, &stat, &stat_len); if (ret < 0) { - condlog(1, "rbd%d repair failed to remove blacklist for %s %d", + condlog(1, "rbd%d: repair failed to remove blacklist for %s %d", ct->rbd_bus_id, ct->client_addr, ret); goto free_cmd; } - condlog(1, "rbd%d repair rm blacklist for %s", + condlog(1, "rbd%d: repair rm blacklist for %s", ct->rbd_bus_id, ct->client_addr); free(stat); free_cmd: @@ -461,8 +490,7 @@ static int rbd_repair(struct rbd_checker_context *ct, char *msg) if (!ct->remapped) { ret = rbd_remap(ct); if (ret) { - RBD_MSG(msg, "rbd%d repair failed to remap. Err %d", - ct->rbd_bus_id, ret); + RBD_MSG(msg, "repair failed to remap. Err %d", ret); return PATH_DOWN; } } @@ -471,29 +499,28 @@ static int rbd_repair(struct rbd_checker_context *ct, char *msg) snprintf(del, sizeof(del), "%d force", ct->rbd_bus_id); ret = sysfs_write_rbd_remove(del, strlen(del) + 1); if (ret) { - RBD_MSG(msg, "rbd%d repair failed to clean up. Err %d", - ct->rbd_bus_id, ret); + RBD_MSG(msg, "repair failed to clean up. Err %d", ret); return PATH_DOWN; } ret = rbd_rm_blacklist(ct); if (ret) { - RBD_MSG(msg, "rbd%d repair could not remove blacklist entry. Err %d", - ct->rbd_bus_id, ret); + RBD_MSG(msg, "repair could not remove blacklist entry. Err %d", + ret); return PATH_DOWN; } ct->remapped = 0; ct->blacklisted = 0; - RBD_MSG(msg, "rbd%d has been repaired", ct->rbd_bus_id); + RBD_MSG(msg, "has been repaired"); return PATH_UP; } #define rbd_thread_cleanup_push(ct) pthread_cleanup_push(cleanup_func, ct) #define rbd_thread_cleanup_pop(ct) pthread_cleanup_pop(1) -void cleanup_func(void *data) +static void cleanup_func(void *data) { int holders; struct rbd_checker_context *ct = data; @@ -506,12 +533,12 @@ void cleanup_func(void *data) cleanup_context(ct); } -void *rbd_thread(void *ctx) +static void *rbd_thread(void *ctx) { struct rbd_checker_context *ct = ctx; int state; - condlog(3, "rbd%d thread starting up", ct->rbd_bus_id); + condlog(3, "rbd%d: thread starting up", ct->rbd_bus_id); ct->message[0] = '\0'; /* This thread can be canceled, so setup clean up */ @@ -527,10 +554,10 @@ void *rbd_thread(void *ctx) /* checker done */ pthread_mutex_lock(&ct->lock); ct->state = state; - pthread_mutex_unlock(&ct->lock); pthread_cond_signal(&ct->active); + pthread_mutex_unlock(&ct->lock); - condlog(3, "rbd%d thead finished, state %s", ct->rbd_bus_id, + condlog(3, "rbd%d: thead finished, state %s", ct->rbd_bus_id, checker_state_name(state)); rbd_thread_cleanup_pop(ct); return ((void *)0); @@ -538,12 +565,9 @@ void *rbd_thread(void *ctx) static void rbd_timeout(struct timespec *tsp) { - struct timeval now; - - gettimeofday(&now, NULL); - tsp->tv_sec = now.tv_sec; - tsp->tv_nsec = now.tv_usec * 1000; - tsp->tv_nsec += 1000000; /* 1 millisecond */ + clock_gettime(CLOCK_MONOTONIC, tsp); + tsp->tv_nsec += 1000 * 1000; /* 1 millisecond */ + normalize_timespec(tsp); } static int rbd_exec_fn(struct checker *c, thread_fn *fn) @@ -554,22 +578,23 @@ static int rbd_exec_fn(struct checker *c, thread_fn *fn) int rbd_status, r; if (c->sync) - return rbd_check(ct, c->message); + return fn(ct, c->message); /* * Async mode */ r = pthread_mutex_lock(&ct->lock); if (r != 0) { - condlog(2, "rbd%d mutex lock failed with %d", ct->rbd_bus_id, + condlog(2, "rbd%d: mutex lock failed with %d", ct->rbd_bus_id, r); - MSG(c, "rbd%d thread failed to initialize", ct->rbd_bus_id); + MSG(c, "rbd%d: thread failed to initialize", ct->rbd_bus_id); return PATH_WILD; } if (ct->running) { /* Check if checker is still running */ if (ct->thread) { - condlog(3, "rbd%d thread not finished", ct->rbd_bus_id); + condlog(3, "rbd%d: thread not finished", + ct->rbd_bus_id); rbd_status = PATH_PENDING; } else { /* checker done */ @@ -606,7 +631,7 @@ static int rbd_exec_fn(struct checker *c, thread_fn *fn) if (ct->thread && (rbd_status == PATH_PENDING || rbd_status == PATH_UNCHECKED)) { - condlog(3, "rbd%d thread still running", + condlog(3, "rbd%d: thread still running", ct->rbd_bus_id); ct->running = 1; rbd_status = PATH_PENDING; diff --git a/libmultipath/checkers/tur.c b/libmultipath/checkers/tur.c index c2ff5e9..92200aa 100644 --- a/libmultipath/checkers/tur.c +++ b/libmultipath/checkers/tur.c @@ -19,7 +19,9 @@ #include "../libmultipath/debug.h" #include "../libmultipath/sg_include.h" -#include "../libmultipath/uevent.h" +#include "../libmultipath/util.h" +#include "../libmultipath/time-util.h" +#include "../libmultipath/util.h" #define TUR_CMD_LEN 6 #define HEAVY_CHECK_COUNT 10 @@ -46,11 +48,23 @@ struct tur_checker_context { char message[CHECKER_MSG_LEN]; }; -#define TUR_DEVT(c) major((c)->devt), minor((c)->devt) +static const char *tur_devt(char *devt_buf, int size, + struct tur_checker_context *ct) +{ + dev_t devt; + + pthread_mutex_lock(&ct->lock); + devt = ct->devt; + pthread_mutex_unlock(&ct->lock); + + snprintf(devt_buf, size, "%d:%d", major(devt), minor(devt)); + return devt_buf; +} int libcheck_init (struct checker * c) { struct tur_checker_context *ct; + pthread_mutexattr_t attr; ct = malloc(sizeof(struct tur_checker_context)); if (!ct) @@ -60,15 +74,18 @@ int libcheck_init (struct checker * c) ct->state = PATH_UNCHECKED; ct->fd = -1; ct->holders = 1; - pthread_cond_init(&ct->active, NULL); - pthread_mutex_init(&ct->lock, NULL); + pthread_cond_init_mono(&ct->active); + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&ct->lock, &attr); + pthread_mutexattr_destroy(&attr); pthread_spin_init(&ct->hldr_lock, PTHREAD_PROCESS_PRIVATE); c->context = ct; return 0; } -void cleanup_context(struct tur_checker_context *ct) +static void cleanup_context(struct tur_checker_context *ct) { pthread_mutex_destroy(&ct->lock); pthread_cond_destroy(&ct->active); @@ -102,10 +119,17 @@ void libcheck_repair (struct checker * c) return; } -#define TUR_MSG(msg, fmt, args...) snprintf(msg, CHECKER_MSG_LEN, fmt, ##args); - -int -tur_check(int fd, unsigned int timeout, char *msg) +#define TUR_MSG(fmt, args...) \ + do { \ + char msg[CHECKER_MSG_LEN]; \ + \ + snprintf(msg, sizeof(msg), fmt, ##args); \ + copy_message(cb_arg, msg); \ + } while (0) + +static int +tur_check(int fd, unsigned int timeout, + void (*copy_message)(void *, const char *), void *cb_arg) { struct sg_io_hdr io_hdr; unsigned char turCmdBlk[TUR_CMD_LEN] = { 0x00, 0, 0, 0, 0, 0 }; @@ -124,7 +148,7 @@ retry: io_hdr.timeout = timeout * 1000; io_hdr.pack_id = 0; if (ioctl(fd, SG_IO, &io_hdr) < 0) { - TUR_MSG(msg, MSG_TUR_DOWN); + TUR_MSG(MSG_TUR_DOWN); return PATH_DOWN; } if ((io_hdr.status & 0x7e) == 0x18) { @@ -132,7 +156,7 @@ retry: * SCSI-3 arrays might return * reservation conflict on TUR */ - TUR_MSG(msg, MSG_TUR_UP); + TUR_MSG(MSG_TUR_UP); return PATH_UP; } if (io_hdr.info & SG_INFO_OK_MASK) { @@ -177,21 +201,21 @@ retry: * LOGICAL UNIT NOT ACCESSIBLE, * TARGET PORT IN STANDBY STATE */ - TUR_MSG(msg, MSG_TUR_GHOST); + TUR_MSG(MSG_TUR_GHOST); return PATH_GHOST; } } - TUR_MSG(msg, MSG_TUR_DOWN); + TUR_MSG(MSG_TUR_DOWN); return PATH_DOWN; } - TUR_MSG(msg, MSG_TUR_UP); + TUR_MSG(MSG_TUR_UP); return PATH_UP; } #define tur_thread_cleanup_push(ct) pthread_cleanup_push(cleanup_func, ct) #define tur_thread_cleanup_pop(ct) pthread_cleanup_pop(1) -void cleanup_func(void *data) +static void cleanup_func(void *data) { int holders; struct tur_checker_context *ct = data; @@ -204,65 +228,92 @@ void cleanup_func(void *data) cleanup_context(ct); } -void *tur_thread(void *ctx) +static int tur_running(struct tur_checker_context *ct) +{ + pthread_t thread; + + pthread_spin_lock(&ct->hldr_lock); + thread = ct->thread; + pthread_spin_unlock(&ct->hldr_lock); + + return thread != 0; +} + +static void copy_msg_to_tcc(void *ct_p, const char *msg) +{ + struct tur_checker_context *ct = ct_p; + + pthread_mutex_lock(&ct->lock); + strlcpy(ct->message, msg, sizeof(ct->message)); + pthread_mutex_unlock(&ct->lock); +} + +static void *tur_thread(void *ctx) { struct tur_checker_context *ct = ctx; int state; + char devt[32]; - condlog(3, "%d:%d: tur checker starting up", TUR_DEVT(ct)); + condlog(3, "%s: tur checker starting up", + tur_devt(devt, sizeof(devt), ct)); - ct->message[0] = '\0'; /* This thread can be canceled, so setup clean up */ - tur_thread_cleanup_push(ct) + tur_thread_cleanup_push(ct); /* TUR checker start up */ pthread_mutex_lock(&ct->lock); ct->state = PATH_PENDING; + ct->message[0] = '\0'; pthread_mutex_unlock(&ct->lock); - state = tur_check(ct->fd, ct->timeout, ct->message); + state = tur_check(ct->fd, ct->timeout, copy_msg_to_tcc, ct->message); /* TUR checker done */ pthread_mutex_lock(&ct->lock); ct->state = state; - pthread_mutex_unlock(&ct->lock); pthread_cond_signal(&ct->active); + pthread_mutex_unlock(&ct->lock); - condlog(3, "%d:%d: tur checker finished, state %s", - TUR_DEVT(ct), checker_state_name(state)); + condlog(3, "%s: tur checker finished, state %s", + tur_devt(devt, sizeof(devt), ct), checker_state_name(state)); tur_thread_cleanup_pop(ct); + return ((void *)0); } -void tur_timeout(struct timespec *tsp) +static void tur_timeout(struct timespec *tsp) { - struct timeval now; - - gettimeofday(&now, NULL); - tsp->tv_sec = now.tv_sec; - tsp->tv_nsec = now.tv_usec * 1000; - tsp->tv_nsec += 1000000; /* 1 millisecond */ + clock_gettime(CLOCK_MONOTONIC, tsp); + tsp->tv_nsec += 1000 * 1000; /* 1 millisecond */ + normalize_timespec(tsp); } -void tur_set_async_timeout(struct checker *c) +static void tur_set_async_timeout(struct checker *c) { struct tur_checker_context *ct = c->context; - struct timeval now; + struct timespec now; - gettimeofday(&now, NULL); + clock_gettime(CLOCK_MONOTONIC, &now); ct->time = now.tv_sec + c->timeout; } -int tur_check_async_timeout(struct checker *c) +static int tur_check_async_timeout(struct checker *c) { struct tur_checker_context *ct = c->context; - struct timeval now; + struct timespec now; - gettimeofday(&now, NULL); + clock_gettime(CLOCK_MONOTONIC, &now); return (now.tv_sec > ct->time); } +static void copy_msg_to_checker(void *c_p, const char *msg) +{ + struct checker *c = c_p; + + strlcpy(c->message, msg, sizeof(c->message)); +} + extern int libcheck_check (struct checker * c) { @@ -271,41 +322,51 @@ libcheck_check (struct checker * c) struct stat sb; pthread_attr_t attr; int tur_status, r; + char devt[32]; if (!ct) return PATH_UNCHECKED; - if (fstat(c->fd, &sb) == 0) + if (fstat(c->fd, &sb) == 0) { + pthread_mutex_lock(&ct->lock); ct->devt = sb.st_rdev; + pthread_mutex_unlock(&ct->lock); + } if (c->sync) - return tur_check(c->fd, c->timeout, c->message); + return tur_check(c->fd, c->timeout, copy_msg_to_checker, c); /* * Async mode */ r = pthread_mutex_lock(&ct->lock); if (r != 0) { - condlog(2, "%d:%d: tur mutex lock failed with %d", - TUR_DEVT(ct), r); + condlog(2, "%s: tur mutex lock failed with %d", + tur_devt(devt, sizeof(devt), ct), r); MSG(c, MSG_TUR_FAILED); return PATH_WILD; } if (ct->running) { - /* Check if TUR checker is still running */ + /* + * Check if TUR checker is still running. Hold hldr_lock + * around the pthread_cancel() call to avoid that + * pthread_cancel() gets called after the (detached) TUR + * thread has exited. + */ + pthread_spin_lock(&ct->hldr_lock); if (ct->thread) { if (tur_check_async_timeout(c)) { - condlog(3, "%d:%d: tur checker timeout", - TUR_DEVT(ct)); + condlog(3, "%s: tur checker timeout", + tur_devt(devt, sizeof(devt), ct)); pthread_cancel(ct->thread); ct->running = 0; MSG(c, MSG_TUR_TIMEOUT); tur_status = PATH_TIMEOUT; } else { - condlog(3, "%d:%d: tur checker not finished", - TUR_DEVT(ct)); + condlog(3, "%s: tur checker not finished", + tur_devt(devt, sizeof(devt), ct)); ct->running++; tur_status = PATH_PENDING; } @@ -313,16 +374,16 @@ libcheck_check (struct checker * c) /* TUR checker done */ ct->running = 0; tur_status = ct->state; - strncpy(c->message, ct->message, CHECKER_MSG_LEN); - c->message[CHECKER_MSG_LEN - 1] = '\0'; + strlcpy(c->message, ct->message, sizeof(c->message)); } + pthread_spin_unlock(&ct->hldr_lock); pthread_mutex_unlock(&ct->lock); } else { - if (ct->thread) { + if (tur_running(ct)) { /* pthread cancel failed. continue in sync mode */ pthread_mutex_unlock(&ct->lock); - condlog(3, "%d:%d: tur thread not responding", - TUR_DEVT(ct)); + condlog(3, "%s: tur thread not responding", + tur_devt(devt, sizeof(devt), ct)); return PATH_TIMEOUT; } /* Start new TUR checker */ @@ -335,27 +396,27 @@ libcheck_check (struct checker * c) tur_set_async_timeout(c); setup_thread_attr(&attr, 32 * 1024, 1); r = pthread_create(&ct->thread, &attr, tur_thread, ct); + pthread_attr_destroy(&attr); if (r) { pthread_spin_lock(&ct->hldr_lock); ct->holders--; pthread_spin_unlock(&ct->hldr_lock); pthread_mutex_unlock(&ct->lock); ct->thread = 0; - condlog(3, "%d:%d: failed to start tur thread, using" - " sync mode", TUR_DEVT(ct)); - return tur_check(c->fd, c->timeout, c->message); + condlog(3, "%s: failed to start tur thread, using" + " sync mode", tur_devt(devt, sizeof(devt), ct)); + return tur_check(c->fd, c->timeout, + copy_msg_to_checker, c); } - pthread_attr_destroy(&attr); tur_timeout(&tsp); r = pthread_cond_timedwait(&ct->active, &ct->lock, &tsp); tur_status = ct->state; - strncpy(c->message, ct->message,CHECKER_MSG_LEN); - c->message[CHECKER_MSG_LEN - 1] = '\0'; + strlcpy(c->message, ct->message, sizeof(c->message)); pthread_mutex_unlock(&ct->lock); - if (ct->thread && + if (tur_running(ct) && (tur_status == PATH_PENDING || tur_status == PATH_UNCHECKED)) { - condlog(3, "%d:%d: tur checker still running", - TUR_DEVT(ct)); + condlog(3, "%s: tur checker still running", + tur_devt(devt, sizeof(devt), ct)); ct->running = 1; tur_status = PATH_PENDING; } diff --git a/libmultipath/config.c b/libmultipath/config.c index a48b8af..2d629ef 100644 --- a/libmultipath/config.c +++ b/libmultipath/config.c @@ -80,7 +80,8 @@ hwe_regmatch (struct hwentry *hwe1, struct hwentry *hwe2) regcomp(&rre, hwe1->revision, REG_EXTENDED|REG_NOSUB)) goto out_pre; - if ((!hwe1->vendor || !hwe2->vendor || + if ((hwe2->vendor || hwe2->product || hwe2->revision) && + (!hwe1->vendor || !hwe2->vendor || !regexec(&vre, hwe2->vendor, 0, NULL, 0)) && (!hwe1->product || !hwe2->product || !regexec(&pre, hwe2->product, 0, NULL, 0)) && @@ -347,6 +348,7 @@ merge_hwe (struct hwentry * dst, struct hwentry * src) merge_num(deferred_remove); merge_num(delay_watch_checks); merge_num(delay_wait_checks); + merge_num(skip_kpartx); /* * Make sure features is consistent with @@ -597,7 +599,7 @@ load_config (char * file) conf->wwids_file = set_default(DEFAULT_WWIDS_FILE); conf->multipath_dir = set_default(DEFAULT_MULTIPATHDIR); conf->features = set_default(DEFAULT_FEATURES); - conf->flush_on_last_del = 0; + conf->flush_on_last_del = DEFAULT_FLUSH; conf->attribute_flags = 0; conf->reassign_maps = DEFAULT_REASSIGN_MAPS; conf->checkint = DEFAULT_CHECKINT; @@ -606,8 +608,8 @@ load_config (char * file) conf->fast_io_fail = DEFAULT_FAST_IO_FAIL; conf->retain_hwhandler = DEFAULT_RETAIN_HWHANDLER; conf->detect_prio = DEFAULT_DETECT_PRIO; - conf->force_sync = 0; - conf->partition_delim = NULL; + conf->force_sync = DEFAULT_FORCE_SYNC; + conf->partition_delim = DEFAULT_PARTITION_DELIM; conf->processed_main_config = 0; conf->find_multipaths = DEFAULT_FIND_MULTIPATHS; conf->uxsock_timeout = DEFAULT_REPLY_TIMEOUT; @@ -616,6 +618,8 @@ load_config (char * file) conf->retrigger_delay = DEFAULT_RETRIGGER_DELAY; conf->uev_wait_timeout = DEFAULT_UEV_WAIT_TIMEOUT; conf->deferred_remove = DEFAULT_DEFERRED_REMOVE; + conf->skip_kpartx = DEFAULT_SKIP_KPARTX; + conf->disable_changed_wwids = DEFAULT_DISABLE_CHANGED_WWIDS; /* * preload default hwtable diff --git a/libmultipath/config.h b/libmultipath/config.h index a41207a..dbdaa44 100644 --- a/libmultipath/config.h +++ b/libmultipath/config.h @@ -65,6 +65,7 @@ struct hwentry { int deferred_remove; int delay_watch_checks; int delay_wait_checks; + int skip_kpartx; char * bl_product; }; @@ -91,6 +92,7 @@ struct mpentry { int deferred_remove; int delay_watch_checks; int delay_wait_checks; + int skip_kpartx; uid_t uid; gid_t gid; mode_t mode; @@ -141,6 +143,8 @@ struct config { int ignore_new_devs; int delayed_reconfig; int uev_wait_timeout; + int skip_kpartx; + int disable_changed_wwids; unsigned int version[3]; char * multipath_dir; diff --git a/libmultipath/configure.c b/libmultipath/configure.c index 707e6be..d428099 100644 --- a/libmultipath/configure.c +++ b/libmultipath/configure.c @@ -295,6 +295,7 @@ setup_map (struct multipath * mpp, char * params, int params_size) select_deferred_remove(conf, mpp); select_delay_watch_checks(conf, mpp); select_delay_wait_checks(conf, mpp); + select_skip_kpartx(conf, mpp); sysfs_set_scsi_tmo(mpp, conf->checkint); put_multipath_config(conf); @@ -641,14 +642,14 @@ domap (struct multipath * mpp, char * params, int is_daemon) case ACT_RENAME: conf = get_multipath_config(); r = dm_rename(mpp->alias_old, mpp->alias, - conf->partition_delim); + conf->partition_delim, mpp->skip_kpartx); put_multipath_config(conf); break; case ACT_FORCERENAME: conf = get_multipath_config(); r = dm_rename(mpp->alias_old, mpp->alias, - conf->partition_delim); + conf->partition_delim, mpp->skip_kpartx); put_multipath_config(conf); if (r) r = dm_addmap_reload(mpp, params, 0); @@ -808,8 +809,10 @@ coalesce_paths (struct vectors * vecs, vector newmp, char * refwwid, int force_r * at this point, we know we really got a new mp */ mpp = add_map_with_path(vecs, pp1, 0); - if (!mpp) - return 1; + if (!mpp) { + orphan_path(pp1, "failed to create multipath device"); + continue; + } if (pp1->priority == PRIO_UNDEF) mpp->action = ACT_REJECT; @@ -861,7 +864,7 @@ coalesce_paths (struct vectors * vecs, vector newmp, char * refwwid, int force_r condlog(3, "%s: domap (%u) failure " "for create/reload map", mpp->alias, r); - if (r == DOMAP_FAIL) { + if (r == DOMAP_FAIL || is_daemon) { condlog(2, "%s: %s map", mpp->alias, (mpp->action == ACT_CREATE)? "ignoring" : "removing"); diff --git a/libmultipath/debug.c b/libmultipath/debug.c index b2e344d..fbe171a 100644 --- a/libmultipath/debug.c +++ b/libmultipath/debug.c @@ -8,7 +8,7 @@ #include "log_pthread.h" #include #include - +#include "../third-party/valgrind/drd.h" #include "vector.h" #include "config.h" @@ -20,7 +20,9 @@ void dlog (int sink, int prio, const char * fmt, ...) va_start(ap, fmt); conf = get_multipath_config(); + ANNOTATE_IGNORE_READS_BEGIN(); thres = (conf) ? conf->verbosity : 0; + ANNOTATE_IGNORE_READS_END(); put_multipath_config(conf); if (prio <= thres) { diff --git a/libmultipath/defaults.h b/libmultipath/defaults.h index 9bf27d6..a72078f 100644 --- a/libmultipath/defaults.h +++ b/libmultipath/defaults.h @@ -28,9 +28,15 @@ #define DEFAULT_RETRIGGER_DELAY 10 #define DEFAULT_RETRIGGER_TRIES 3 #define DEFAULT_UEV_WAIT_TIMEOUT 30 -#define DEFAULT_PRIO "const" +#define DEFAULT_PRIO PRIO_CONST #define DEFAULT_PRIO_ARGS "" #define DEFAULT_CHECKER TUR +#define DEFAULT_FLUSH FLUSH_DISABLED +#define DEFAULT_USER_FRIENDLY_NAMES USER_FRIENDLY_NAMES_OFF +#define DEFAULT_FORCE_SYNC 0 +#define DEFAULT_PARTITION_DELIM NULL +#define DEFAULT_SKIP_KPARTX SKIP_KPARTX_OFF +#define DEFAULT_DISABLE_CHANGED_WWIDS 0 #define DEFAULT_CHECKINT 5 #define MAX_CHECKINT(a) (a << 2) diff --git a/libmultipath/devmapper.c b/libmultipath/devmapper.c index 5eb1713..5aea5b6 100644 --- a/libmultipath/devmapper.c +++ b/libmultipath/devmapper.c @@ -213,8 +213,9 @@ dm_prereq (void) static int dm_simplecmd (int task, const char *name, int no_flush, int need_sync, uint16_t udev_flags, int deferred_remove) { int r = 0; - int udev_wait_flag = (need_sync && (task == DM_DEVICE_RESUME || - task == DM_DEVICE_REMOVE)); + int udev_wait_flag = ((need_sync || udev_flags) && + (task == DM_DEVICE_RESUME || + task == DM_DEVICE_REMOVE)); uint32_t cookie = 0; struct dm_task *dmt; @@ -266,11 +267,12 @@ dm_device_remove (const char *name, int needsync, int deferred_remove) { static int dm_addmap (int task, const char *target, struct multipath *mpp, - char * params, int ro) { + char * params, int ro, int skip_kpartx) { int r = 0; struct dm_task *dmt; char *prefixed_uuid = NULL; uint32_t cookie = 0; + uint16_t udev_flags = DM_UDEV_DISABLE_LIBRARY_FALLBACK | ((skip_kpartx == SKIP_KPARTX_ON)? MPATH_UDEV_NO_KPARTX_FLAG : 0); if (!(dmt = dm_task_create (task))) return 0; @@ -319,8 +321,7 @@ dm_addmap (int task, const char *target, struct multipath *mpp, dm_task_no_open_count(dmt); if (task == DM_DEVICE_CREATE && - !dm_task_set_cookie(dmt, &cookie, - DM_UDEV_DISABLE_LIBRARY_FALLBACK)) + !dm_task_set_cookie(dmt, &cookie, udev_flags)) goto freeout; r = dm_task_run (dmt); @@ -344,7 +345,8 @@ dm_addmap_create (struct multipath *mpp, char * params) { for (ro = 0; ro <= 1; ro++) { int err; - if (dm_addmap(DM_DEVICE_CREATE, TGT_MPATH, mpp, params, ro)) + if (dm_addmap(DM_DEVICE_CREATE, TGT_MPATH, mpp, params, ro, + mpp->skip_kpartx)) return 1; /* * DM_DEVICE_CREATE is actually DM_DEV_CREATE + DM_TABLE_LOAD. @@ -371,7 +373,9 @@ extern int dm_addmap_reload (struct multipath *mpp, char *params, int flush) { int r; - uint16_t udev_flags = flush ? 0 : MPATH_UDEV_RELOAD_FLAG; + uint16_t udev_flags = (flush ? 0 : MPATH_UDEV_RELOAD_FLAG) | + ((mpp->skip_kpartx == SKIP_KPARTX_ON)? + MPATH_UDEV_NO_KPARTX_FLAG : 0); /* * DM_DEVICE_RELOAD cannot wait on a cookie, as @@ -379,12 +383,13 @@ dm_addmap_reload (struct multipath *mpp, char *params, int flush) * DM_DEVICE_RESUME. So call DM_DEVICE_RESUME * after each successful call to DM_DEVICE_RELOAD. */ - r = dm_addmap(DM_DEVICE_RELOAD, TGT_MPATH, mpp, params, ADDMAP_RW); + r = dm_addmap(DM_DEVICE_RELOAD, TGT_MPATH, mpp, params, ADDMAP_RW, + SKIP_KPARTX_OFF); if (!r) { if (errno != EROFS) return 0; r = dm_addmap(DM_DEVICE_RELOAD, TGT_MPATH, mpp, - params, ADDMAP_RO); + params, ADDMAP_RO, SKIP_KPARTX_OFF); } if (r) r = dm_simplecmd(DM_DEVICE_RESUME, mpp->alias, flush, @@ -534,8 +539,8 @@ dm_get_status(char * name, char * outstatus) int r = 1; struct dm_task *dmt; uint64_t start, length; - char *target_type; - char *status; + char *target_type = NULL; + char *status = NULL; if (!(dmt = dm_task_create(DM_DEVICE_STATUS))) return 1; @@ -551,6 +556,10 @@ dm_get_status(char * name, char * outstatus) /* Fetch 1st target */ dm_get_next_target(dmt, NULL, &start, &length, &target_type, &status); + if (!status) { + condlog(2, "get null status."); + goto out; + } if (snprintf(outstatus, PARAMS_SIZE, "%s", status) <= PARAMS_SIZE) r = 0; @@ -756,6 +765,12 @@ out: return r; } +static int +has_partmap(const char *name, void *data) +{ + return 1; +} + static int partmap_in_use(const char *name, void *data) { @@ -835,10 +850,16 @@ dm_suspend_and_flush_map (const char * mapname) int s = 0, queue_if_no_path = 0; unsigned long long mapsize; char params[PARAMS_SIZE] = {0}; + int udev_flags = 0; if (!dm_is_mpath(mapname)) return 0; /* nothing to do */ + /* if the device currently has no partitions, do not + run kpartx on it if you fail to delete it */ + if (do_foreach_partmaps(mapname, has_partmap, NULL) == 0) + udev_flags |= MPATH_UDEV_NO_KPARTX_FLAG; + if (!dm_get_map(mapname, &mapsize, params)) { if (strstr(params, "queue_if_no_path")) queue_if_no_path = 1; @@ -857,7 +878,7 @@ dm_suspend_and_flush_map (const char * mapname) return 0; } condlog(2, "failed to remove multipath map %s", mapname); - dm_simplecmd_noflush(DM_DEVICE_RESUME, mapname, 0); + dm_simplecmd_noflush(DM_DEVICE_RESUME, mapname, udev_flags); if (queue_if_no_path) s = dm_queue_if_no_path((char *)mapname, 1); return 1; @@ -1376,7 +1397,7 @@ rename_partmap (const char *name, void *data) for (offset = strlen(rd->old); name[offset] && !(isdigit(name[offset])); offset++); /* do nothing */ snprintf(buff, PARAMS_SIZE, "%s%s%s", rd->new, rd->delim, name + offset); - dm_rename(name, buff, rd->delim); + dm_rename(name, buff, rd->delim, SKIP_KPARTX_OFF); condlog(4, "partition map %s renamed", name); return 0; } @@ -1399,11 +1420,12 @@ dm_rename_partmaps (const char * old, char * new, char *delim) } int -dm_rename (const char * old, char * new, char *delim) +dm_rename (const char * old, char * new, char *delim, int skip_kpartx) { int r = 0; struct dm_task *dmt; - uint32_t cookie; + uint32_t cookie = 0; + uint16_t udev_flags = DM_UDEV_DISABLE_LIBRARY_FALLBACK | ((skip_kpartx == SKIP_KPARTX_ON)? MPATH_UDEV_NO_KPARTX_FLAG : 0); if (dm_rename_partmaps(old, new, delim)) return r; @@ -1419,8 +1441,7 @@ dm_rename (const char * old, char * new, char *delim) dm_task_no_open_count(dmt); - if (!dm_task_set_cookie(dmt, &cookie, - DM_UDEV_DISABLE_LIBRARY_FALLBACK)) + if (!dm_task_set_cookie(dmt, &cookie, udev_flags)) goto out; r = dm_task_run(dmt); diff --git a/libmultipath/devmapper.h b/libmultipath/devmapper.h index 442d42e..e6d1090 100644 --- a/libmultipath/devmapper.h +++ b/libmultipath/devmapper.h @@ -12,6 +12,12 @@ #define MPATH_UDEV_RELOAD_FLAG 0 #endif +#ifdef DM_SUBSYSTEM_UDEV_FLAG1 +#define MPATH_UDEV_NO_KPARTX_FLAG DM_SUBSYSTEM_UDEV_FLAG1 +#else +#define MPATH_UDEV_NO_KPARTX_FLAG 0 +#endif + void dm_init(int verbosity); int dm_prereq (void); int dm_drv_version (unsigned int * version, char * str); @@ -46,7 +52,7 @@ int dm_remove_partmaps (const char * mapname, int need_sync, int deferred_remove); int dm_get_uuid(char *name, char *uuid); int dm_get_info (char * mapname, struct dm_info ** dmi); -int dm_rename (const char * old, char * new, char * delim); +int dm_rename (const char * old, char * new, char * delim, int skip_kpartx); int dm_reassign(const char * mapname); int dm_reassign_table(const char *name, char *old, char *new); int dm_setgeometry(struct multipath *mpp); diff --git a/libmultipath/dict.c b/libmultipath/dict.c index e8c6804..61b6910 100644 --- a/libmultipath/dict.c +++ b/libmultipath/dict.c @@ -403,6 +403,18 @@ declare_def_snprint(uev_wait_timeout, print_int) declare_def_handler(strict_timing, set_yes_no) declare_def_snprint(strict_timing, print_yes_no) +declare_def_handler(skip_kpartx, set_yes_no_undef) +declare_def_snprint_defint(skip_kpartx, print_yes_no_undef, YNU_NO) +declare_ovr_handler(skip_kpartx, set_yes_no_undef) +declare_ovr_snprint(skip_kpartx, print_yes_no_undef) +declare_hw_handler(skip_kpartx, set_yes_no_undef) +declare_hw_snprint(skip_kpartx, print_yes_no_undef) +declare_mp_handler(skip_kpartx, set_yes_no_undef) +declare_mp_snprint(skip_kpartx, print_yes_no_undef) + +declare_def_handler(disable_changed_wwids, set_yes_no) +declare_def_snprint(disable_changed_wwids, print_yes_no) + static int def_config_dir_handler(struct config *conf, vector strvec) { @@ -1330,7 +1342,7 @@ snprint_deprecated (struct config *conf, char * buff, int len, void * data) #define __deprecated /* - * If you add or remove a keywork also update multipath/multipath.conf.5 + * If you add or remove a keyword also update multipath/multipath.conf.5 */ void init_keywords(vector keywords) @@ -1385,6 +1397,8 @@ init_keywords(vector keywords) install_keyword("retrigger_tries", &def_retrigger_tries_handler, &snprint_def_retrigger_tries); install_keyword("retrigger_delay", &def_retrigger_delay_handler, &snprint_def_retrigger_delay); install_keyword("missing_uev_wait_timeout", &def_uev_wait_timeout_handler, &snprint_def_uev_wait_timeout); + install_keyword("skip_kpartx", &def_skip_kpartx_handler, &snprint_def_skip_kpartx); + install_keyword("disable_changed_wwids", &def_disable_changed_wwids_handler, &snprint_def_disable_changed_wwids); __deprecated install_keyword("default_selector", &def_selector_handler, NULL); __deprecated install_keyword("default_path_grouping_policy", &def_pgpolicy_handler, NULL); __deprecated install_keyword("default_uid_attribute", &def_uid_attribute_handler, NULL); @@ -1421,7 +1435,10 @@ init_keywords(vector keywords) __deprecated install_keyword("product", &ble_product_handler, &snprint_bled_product); __deprecated install_sublevel_end(); #endif - +/* + * If you add or remove a "device subsection" keyword also update + * multipath/multipath.conf.5 and the TEMPLATE in libmultipath/hwtable.c + */ install_keyword_root("devices", &devices_handler); install_keyword_multi("device", &device_handler, NULL); install_sublevel(); @@ -1455,6 +1472,7 @@ init_keywords(vector keywords) install_keyword("deferred_remove", &hw_deferred_remove_handler, &snprint_hw_deferred_remove); install_keyword("delay_watch_checks", &hw_delay_watch_checks_handler, &snprint_hw_delay_watch_checks); install_keyword("delay_wait_checks", &hw_delay_wait_checks_handler, &snprint_hw_delay_wait_checks); + install_keyword("skip_kpartx", &hw_skip_kpartx_handler, &snprint_hw_skip_kpartx); install_sublevel_end(); install_keyword_root("overrides", &overrides_handler); @@ -1482,6 +1500,7 @@ init_keywords(vector keywords) install_keyword("deferred_remove", &ovr_deferred_remove_handler, &snprint_ovr_deferred_remove); install_keyword("delay_watch_checks", &ovr_delay_watch_checks_handler, &snprint_ovr_delay_watch_checks); install_keyword("delay_wait_checks", &ovr_delay_wait_checks_handler, &snprint_ovr_delay_wait_checks); + install_keyword("skip_kpartx", &ovr_skip_kpartx_handler, &snprint_ovr_skip_kpartx); install_keyword_root("multipaths", &multipaths_handler); install_keyword_multi("multipath", &multipath_handler, NULL); @@ -1508,5 +1527,6 @@ init_keywords(vector keywords) install_keyword("deferred_remove", &mp_deferred_remove_handler, &snprint_mp_deferred_remove); install_keyword("delay_watch_checks", &mp_delay_watch_checks_handler, &snprint_mp_delay_watch_checks); install_keyword("delay_wait_checks", &mp_delay_wait_checks_handler, &snprint_mp_delay_wait_checks); + install_keyword("skip_kpartx", &mp_skip_kpartx_handler, &snprint_mp_skip_kpartx); install_sublevel_end(); } diff --git a/libmultipath/discovery.c b/libmultipath/discovery.c index bb3116d..756344f 100644 --- a/libmultipath/discovery.c +++ b/libmultipath/discovery.c @@ -1538,13 +1538,12 @@ get_prio (struct path * pp) } static int -get_udev_uid(struct path * pp, char *uid_attribute) +get_udev_uid(struct path * pp, char *uid_attribute, struct udev_device *udev) { ssize_t len; const char *value; - value = udev_device_get_property_value(pp->udev, - uid_attribute); + value = udev_device_get_property_value(udev, uid_attribute); if (!value || strlen(value) == 0) value = getenv(uid_attribute); if (value && strlen(value)) { @@ -1625,8 +1624,8 @@ get_vpd_uid(struct path * pp) return get_vpd_sysfs(parent, 0x83, pp->wwid, WWID_SIZE); } -static int -get_uid (struct path * pp, int path_state) +int +get_uid (struct path * pp, int path_state, struct udev_device *udev) { char *c; const char *origin = "unknown"; @@ -1639,7 +1638,7 @@ get_uid (struct path * pp, int path_state) put_multipath_config(conf); } - if (!pp->udev) { + if (!udev) { condlog(1, "%s: no udev information", pp->dev); return 1; } @@ -1669,7 +1668,7 @@ get_uid (struct path * pp, int path_state) int retrigger; if (pp->uid_attribute) { - len = get_udev_uid(pp, pp->uid_attribute); + len = get_udev_uid(pp, pp->uid_attribute, udev); origin = "udev"; if (len <= 0) condlog(1, @@ -1798,7 +1797,7 @@ pathinfo (struct path *pp, struct config *conf, int mask) } if ((mask & DI_WWID) && !strlen(pp->wwid)) { - get_uid(pp, path_state); + get_uid(pp, path_state, pp->udev); if (!strlen(pp->wwid)) { pp->initialized = INIT_MISSING_UDEV; pp->tick = conf->retrigger_delay; diff --git a/libmultipath/discovery.h b/libmultipath/discovery.h index 0f5b1e6..176eac1 100644 --- a/libmultipath/discovery.h +++ b/libmultipath/discovery.h @@ -49,6 +49,7 @@ ssize_t sysfs_get_vpd (struct udev_device * udev, int pg, unsigned char * buff, size_t len); int sysfs_get_asymmetric_access_state(struct path *pp, char *buff, int buflen); +int get_uid(struct path * pp, int path_state, struct udev_device *udev); /* * discovery bitmask diff --git a/libmultipath/dmparser.c b/libmultipath/dmparser.c index 9e79ecd..87e8398 100644 --- a/libmultipath/dmparser.c +++ b/libmultipath/dmparser.c @@ -380,6 +380,14 @@ disassemble_map (vector pathvec, char * params, struct multipath * mpp, strncpy(pp->wwid, mpp->wwid, WWID_SIZE - 1); + /* + * Do not allow in-use patch to change wwid + */ + else if (strcmp(pp->wwid, mpp->wwid) != 0) { + condlog(0, "%s: path wwid appears to have changed. Using map wwid.\n", pp->dev_t); + strncpy(pp->wwid, mpp->wwid, WWID_SIZE); + } + pgp->id ^= (long)pp; pp->pgindex = i + 1; diff --git a/libmultipath/file.c b/libmultipath/file.c index 74cde64..e4951c9 100644 --- a/libmultipath/file.c +++ b/libmultipath/file.c @@ -158,7 +158,7 @@ open_file(char *file, int *can_write, char *header) goto fail; /* If file is empty, write the header */ size_t len = strlen(header); - if (write_all(fd, header, len) != len) { + if (write(fd, header, len) != len) { condlog(0, "Cannot write header to file %s : %s", file, strerror(errno)); diff --git a/libmultipath/hwtable.c b/libmultipath/hwtable.c index e518d6a..340035e 100644 --- a/libmultipath/hwtable.c +++ b/libmultipath/hwtable.c @@ -22,6 +22,9 @@ * * Devices with a proprietary handler must also be included in * the kernel side. Currently at drivers/scsi/scsi_dh.c + * + * Moreover, if a device needs a special treatment by the SCSI + * subsystem it should be included in drivers/scsi/scsi_devinfo.c */ static struct hwentry default_hw[] = { /* @@ -34,7 +37,6 @@ static struct hwentry default_hw[] = { .vendor = "APPLE", .product = "Xserve RAID", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, /* * HPE @@ -52,10 +54,9 @@ static struct hwentry default_hw[] = { /* RA8000 / ESA12000 */ .vendor = "DEC", .product = "HSG80", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, .hwhandler = "1 hp_sw", .pgpolicy = GROUP_BY_PRIO, - .pgfailback = FAILBACK_UNDEF, .checker_name = HP_SW, .prio_name = PRIO_HP_SW, }, @@ -64,19 +65,15 @@ static struct hwentry default_hw[] = { .vendor = "HP", .product = "A6189A", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, .no_path_retry = 12, }, { /* MSA 1000/1500 and EVA 3000/5000, with old firmware */ .vendor = "(COMPAQ|HP)", .product = "(MSA|HSV)1[01]0", - .features = "1 queue_if_no_path", .hwhandler = "1 hp_sw", .pgpolicy = GROUP_BY_PRIO, - .pgfailback = FAILBACK_UNDEF, .no_path_retry = 12, - .minio = 100, .checker_name = HP_SW, .prio_name = PRIO_HP_SW, }, @@ -87,7 +84,6 @@ static struct hwentry default_hw[] = { .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 12, - .minio = 100, .prio_name = PRIO_ALUA, }, { @@ -97,7 +93,6 @@ static struct hwentry default_hw[] = { .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 12, - .minio = 100, .prio_name = PRIO_ALUA, }, { @@ -105,9 +100,7 @@ static struct hwentry default_hw[] = { .vendor = "HP", .product = "(MSA2[02]12fc|MSA2012i)", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, .no_path_retry = 18, - .minio = 100, }, { /* MSA2000 family with new firmware */ @@ -116,7 +109,6 @@ static struct hwentry default_hw[] = { .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 18, - .minio = 100, .prio_name = PRIO_ALUA, }, { @@ -126,7 +118,6 @@ static struct hwentry default_hw[] = { .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 18, - .minio = 100, .prio_name = PRIO_ALUA, }, { @@ -137,7 +128,6 @@ static struct hwentry default_hw[] = { .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 12, - .minio = 100, .prio_name = PRIO_ALUA, }, { @@ -145,7 +135,6 @@ static struct hwentry default_hw[] = { .vendor = "HP", .product = "LOGICAL VOLUME", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, .no_path_retry = 12, }, { @@ -155,7 +144,15 @@ static struct hwentry default_hw[] = { .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 18, - .minio = 100, + .prio_name = PRIO_ALUA, + }, + { + /* StoreVirtual 4000 family */ + .vendor = "LEFTHAND", + .product = "^(P4000|iSCSIDisk)", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .no_path_retry = 18, .prio_name = PRIO_ALUA, }, /* @@ -165,25 +162,36 @@ static struct hwentry default_hw[] = { .vendor = "DDN", .product = "SAN DataDirector", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, + }, + { + .vendor = "DDN", + .product = "^EF3010", + .pgpolicy = MULTIBUS, + .no_path_retry = 30, + }, + { + .vendor = "DDN", + .product = "^(EF3015|S2A|SFA)", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = 30, }, /* - * EMC + * Dell EMC */ { /* Symmetrix / DMX / VMAX */ .vendor = "EMC", .product = "SYMMETRIX", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, .no_path_retry = 6, }, { - /* DGC CLARiiON CX/AX and EMC VNX */ + /* DGC CLARiiON CX/AX / EMC VNX and Unity */ .vendor = "^DGC", .product = "^(RAID|DISK|VRAID)", .bl_product = "LUNZ", - .features = "1 queue_if_no_path", .hwhandler = "1 emc", .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, @@ -197,26 +205,26 @@ static struct hwentry default_hw[] = { .product = "Invista", .bl_product = "LUNZ", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, .no_path_retry = 5, }, { .vendor = "XtremIO", .product = "XtremApp", - .selector = "queue-length 0", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, - /* - * DELL - */ { + /* + * Dell SC Series, formerly Compellent + * + * Maintainer : Sean McGinnis + * Mail : sean_mcginnis@dell.com + */ .vendor = "COMPELNT", .product = "Compellent Vol", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, .no_path_retry = NO_PATH_RETRY_QUEUE, }, + /* MD Series */ { .vendor = "DELL", .product = "MD3000", @@ -260,12 +268,10 @@ static struct hwentry default_hw[] = { .vendor = "FSC", .product = "CentricStor", .pgpolicy = GROUP_BY_SERIAL, - .pgfailback = FAILBACK_UNDEF, }, { .vendor = "FUJITSU", .product = "ETERNUS_DX(H|L|M|400|8000)", - .features = "1 queue_if_no_path", .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 10, @@ -276,7 +282,14 @@ static struct hwentry default_hw[] = { .vendor = "(EUROLOGC|EuroLogc)", .product = "FC2502", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, + }, + { + .vendor = "FUJITSU", + .product = "E[248]000", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .no_path_retry = 10, + .prio_name = PRIO_ALUA, }, /* * Hitachi @@ -289,13 +302,12 @@ static struct hwentry default_hw[] = { .vendor = "(HITACHI|HP)", .product = "^OPEN-", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, { /* AMS 2000 and HUS 100 families */ - .vendor = "HITACHI", + .vendor = "(HITACHI|HP)", .product = "^DF", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .prio_name = PRIO_HDS, @@ -310,7 +322,6 @@ static struct hwentry default_hw[] = { .vendor = "IBM", .product = "ProFibre 4000R", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, { /* DS4300 / FAStT600 */ @@ -339,7 +350,7 @@ static struct hwentry default_hw[] = { .no_path_retry = 30, }, { - /* DS3200 / DS3300 / DS3400 / Boot DS */ + /* DS3000 / DS3200 / DS3300 / DS3400 / Boot DS */ .vendor = "IBM", .product = "^1726", .bl_product = "Universal Xport", @@ -443,42 +454,46 @@ static struct hwentry default_hw[] = { .no_path_retry = 30, }, { - /* DS4200 / FAStT200 */ + /* FAStT200 and FAStT500 */ .vendor = "IBM", - .product = "^3542", - .pgpolicy = GROUP_BY_SERIAL, - .pgfailback = FAILBACK_UNDEF, + .product = "^(3542|3552)", + .bl_product = "Universal Xport", + .pgpolicy = GROUP_BY_PRIO, + .checker_name = RDAC, + .features = "2 pg_init_retries 50", + .hwhandler = "1 rdac", + .prio_name = PRIO_RDAC, + .pgfailback = -FAILBACK_IMMEDIATE, + .no_path_retry = 30, }, { /* Enterprise Storage Server / Shark family */ .vendor = "IBM", .product = "^2105", - .features = "1 queue_if_no_path", - .pgpolicy = GROUP_BY_SERIAL, - .pgfailback = FAILBACK_UNDEF, + .no_path_retry = NO_PATH_RETRY_QUEUE, + .pgpolicy = MULTIBUS, }, { - /* DS6000 */ + /* DS6000 / DS6800 */ .vendor = "IBM", .product = "^1750500", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .prio_name = PRIO_ALUA, }, { - /* DS8000 */ + /* DS8000 family */ .vendor = "IBM", .product = "^2107900", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, { - /* Storwize family / SAN Volume Controller / Flex System V7000 */ + /* Storwize family / SAN Volume Controller / Flex System V7000 / FlashSystem V840/V9000 */ .vendor = "IBM", .product = "^2145", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .prio_name = PRIO_ALUA, @@ -488,24 +503,22 @@ static struct hwentry default_hw[] = { .product = "S/390 DASD ECKD", .bl_product = "S/390", .uid_attribute = "ID_UID", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, { .vendor = "IBM", .product = "S/390 DASD FBA", .bl_product = "S/390", .uid_attribute = "ID_UID", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, { /* Power RAID */ .vendor = "IBM", .product = "^IPR", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, .hwhandler = "1 alua", .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, @@ -518,17 +531,30 @@ static struct hwentry default_hw[] = { .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = NO_PATH_RETRY_QUEUE, - .minio = 100, .prio_name = PRIO_ALUA, }, { - /* XIV Storage System */ + /* XIV Storage System / FlashSystem A9000/A9000R */ .vendor = "IBM", .product = "2810XIV", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, + .pgpolicy = MULTIBUS, + }, + { + /* FlashSystem 710/720/810/820/840/900 */ + .vendor = "IBM", + .product = "FlashSystem", + .no_path_retry = NO_PATH_RETRY_FAIL, .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, - .minio = 15, + }, + { + /* DDN */ + .vendor = "IBM", + .product = "^(DCS9900|2851)", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = 30, }, /* * IBM Power Virtual SCSI Devices @@ -540,14 +566,12 @@ static struct hwentry default_hw[] = { .vendor = "AIX", .product = "VDASD", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, .no_path_retry = (300 / DEFAULT_CHECKINT), }, { /* 3303 NVDISK */ .vendor = "IBM", .product = "3303[ ]+NVDISK", - .pgfailback = FAILBACK_UNDEF, .no_path_retry = (300 / DEFAULT_CHECKINT), }, { @@ -571,11 +595,11 @@ static struct hwentry default_hw[] = { */ .vendor = "NETAPP", .product = "LUN", - .features = "3 queue_if_no_path pg_init_retries 50", + .features = "2 pg_init_retries 50", + .no_path_retry = NO_PATH_RETRY_QUEUE, .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .flush_on_last_del = FLUSH_ENABLED, - .minio = 128, .dev_loss = MAX_DEV_LOSS_TMO, .prio_name = PRIO_ONTAP, }, @@ -597,6 +621,18 @@ static struct hwentry default_hw[] = { .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 30, }, + { + /* + * SolidFir family + * + * Maintainer : PJ Waskiewicz + * Mail : pj.waskiewicz@netapp.com + */ + .vendor = "SolidFir", + .product = "SSD SAN", + .pgpolicy = MULTIBUS, + .no_path_retry = 24, + }, /* * Nexenta * @@ -606,11 +642,8 @@ static struct hwentry default_hw[] = { { .vendor = "NEXENTA", .product = "COMSTAR", - .features = "1 queue_if_no_path", .pgpolicy = GROUP_BY_SERIAL, - .pgfailback = FAILBACK_UNDEF, .no_path_retry = 30, - .minio = 128, }, /* * SGI @@ -619,11 +652,11 @@ static struct hwentry default_hw[] = { .vendor = "SGI", .product = "TP9100", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, { + /* Total Performance family */ .vendor = "SGI", - .product = "TP9[345]00", + .product = "TP9[3457]00", .bl_product = "Universal Xport", .pgpolicy = GROUP_BY_PRIO, .checker_name = RDAC, @@ -634,7 +667,7 @@ static struct hwentry default_hw[] = { .no_path_retry = 30, }, { - /* InfiniteStorage ??? */ + /* InfiniteStorage family */ .vendor = "SGI", .product = "IS", .bl_product = "Universal Xport", @@ -646,6 +679,15 @@ static struct hwentry default_hw[] = { .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 30, }, + { + /* DDN */ + .vendor = "SGI", + .product = "^DD[46]A-", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = 30, + }, /* * NEC */ @@ -683,8 +725,9 @@ static struct hwentry default_hw[] = { }, /* Sun - StorageTek */ { + /* B210, B220, B240 and B280 */ .vendor = "STK", - .product = "OPENstorage D280", + .product = "BladeCtlr", .bl_product = "Universal Xport", .pgpolicy = GROUP_BY_PRIO, .checker_name = RDAC, @@ -695,6 +738,20 @@ static struct hwentry default_hw[] = { .no_path_retry = 30, }, { + /* 9176, D173, D178, D210, D220, D240 and D280 */ + .vendor = "STK", + .product = "OPENstorage", + .bl_product = "Universal Xport", + .pgpolicy = GROUP_BY_PRIO, + .checker_name = RDAC, + .features = "2 pg_init_retries 50", + .hwhandler = "1 rdac", + .prio_name = PRIO_RDAC, + .pgfailback = -FAILBACK_IMMEDIATE, + .no_path_retry = 30, + }, + { + /* 6540 */ .vendor = "STK", .product = "FLEXLINE 380", .bl_product = "Universal Xport", @@ -707,10 +764,10 @@ static struct hwentry default_hw[] = { .no_path_retry = 30, }, { + /* 3510 / 6020 and 6120 */ .vendor = "SUN", .product = "(StorEdge 3510|T4)", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, { .vendor = "SUN", @@ -725,9 +782,9 @@ static struct hwentry default_hw[] = { .no_path_retry = 30, }, { - /* 6140 */ + /* 6130 / 6140 */ .vendor = "SUN", - .product = "CSM200_R", + .product = "CSM[12]00_R", .bl_product = "Universal Xport", .pgpolicy = GROUP_BY_PRIO, .checker_name = RDAC, @@ -738,7 +795,7 @@ static struct hwentry default_hw[] = { .no_path_retry = 30, }, { - /* 2510 / 2530 / 2540 */ + /* 2500 / 2510 / 2530 / 2540 */ .vendor = "SUN", .product = "LCSM100_[IEFS]", .bl_product = "Universal Xport", @@ -762,6 +819,15 @@ static struct hwentry default_hw[] = { .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 30, }, + { + /* ZFS Storage Appliances */ + .vendor = "SUN", + .product = "(Sun Storage|ZFS Storage|COMSTAR)", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = 30, + }, /* * Pivot3 * @@ -771,10 +837,16 @@ static struct hwentry default_hw[] = { { .vendor = "PIVOT3", .product = "RAIGE VOLUME", - .features = "1 queue_if_no_path", + .no_path_retry = NO_PATH_RETRY_QUEUE, .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, - .minio = 100, + }, + { + .vendor = "(NexGen|Pivot3)", + .product = "(TierStore|vSTAC)", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = NO_PATH_RETRY_QUEUE, }, /* * Intel @@ -782,6 +854,7 @@ static struct hwentry default_hw[] = { { .vendor = "(Intel|INTEL)", .product = "Multi-Flex", + .bl_product = "VTrak V-LUN", .hwhandler = "1 alua", .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, @@ -798,7 +871,6 @@ static struct hwentry default_hw[] = { .pgpolicy = GROUP_BY_PRIO, .pgfailback = -FAILBACK_IMMEDIATE, .no_path_retry = 12, - .minio = 100, .prio_name = PRIO_ALUA, }, /* @@ -826,9 +898,7 @@ static struct hwentry default_hw[] = { { .vendor = "PURE", .product = "FlashArray", - .selector = "queue-length 0", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, .fast_io_fail = 10, .dev_loss = 60, }, @@ -840,7 +910,6 @@ static struct hwentry default_hw[] = { .vendor = "(HUAWEI|HUASY)", .product = "XSG1", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, }, /* * Red Hat @@ -862,16 +931,135 @@ static struct hwentry default_hw[] = { .vendor = "KOVE", .product = "XPD", .pgpolicy = MULTIBUS, - .pgfailback = FAILBACK_UNDEF, + }, + /* + * Infinidat + */ + { + .vendor = "NFINIDAT", + .product = "InfiniBox", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + }, + /* + * Nimble Storage + */ + { + .vendor = "Nimble", + .product = "Server", + .hwhandler = "1 alua", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = NO_PATH_RETRY_QUEUE, + }, + /* + * Kaminario + */ + { + .vendor = "KMNRIO", + .product = "K2", + .pgpolicy = MULTIBUS, + }, + /* + * Tegile Systems + */ + { + .vendor = "TEGILE", + .product = "(ZEBI-(FC|ISCSI)|INTELLIFLASH)", + .hwhandler = "1 alua", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = 10, + }, + /* + * Imation/Nexsan + */ + { + /* E-Series */ + .vendor = "NEXSAN", + .product = "NXS-B0", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = 15, + }, + { + /* SATABeast / SATABoy */ + .vendor = "NEXSAN", + .product = "SATAB", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = 15, + }, + /* + * Xiotech + */ + { + /* Intelligent Storage Elements family */ + .vendor = "(XIOTECH|XIOtech)", + .product = "ISE", + .pgpolicy = MULTIBUS, + .no_path_retry = 12, + }, + /* + * Violin Memory + */ + { + /* 3000 / 6000 Series */ + .vendor = "VIOLIN", + .product = "SAN ARRAY$", + .pgpolicy = GROUP_BY_SERIAL, + .no_path_retry = 30, + }, + { + .vendor = "VIOLIN", + .product = "SAN ARRAY ALUA", + .hwhandler = "1 alua", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = 30, + }, + { + /* FSP 7000 family */ + .vendor = "VIOLIN", + .product = "CONCERTO ARRAY", + .pgpolicy = MULTIBUS, + .no_path_retry = 30, + }, + /* + * Promise Technology + */ + { + .vendor = "Promise", + .product = "VTrak", + .bl_product = "VTrak V-LUN", + .pgpolicy = MULTIBUS, + .no_path_retry = 30, + }, + /* + * Infortrend Technology + */ + { + /* EonStor / ESVA */ + .vendor = "^IFT", + .product = ".*", + .pgpolicy = GROUP_BY_PRIO, + .pgfailback = -FAILBACK_IMMEDIATE, + .prio_name = PRIO_ALUA, + .no_path_retry = 30, }, #if 0 /* * Copy this TEMPLATE to add new hardware. * - * Keep only mandatory and modified attributes. + * Keep only mandatory(.vendor and .product) and modified attributes. * Attributes with default values must be removed. - * Only .vendor and .product are mandatory, all other are optional. - * .vendor, .product, .revision and .bl_product are POSIX Extended regex + * .vendor, .product, .revision and .bl_product are POSIX Extended regex. * * COMPANY_NAME * @@ -888,9 +1076,10 @@ static struct hwentry default_hw[] = { .uid_attribute = "ID_SERIAL", .selector = "service-time 0", .checker_name = TUR, + .alias_prefix = "mpath", .features = "0", .hwhandler = "0", - .prio_name = "const", + .prio_name = PRIO_CONST, .prio_args = "", .pgfailback = -FAILBACK_MANUAL, .rr_weight = RR_WEIGHT_NONE, @@ -898,6 +1087,7 @@ static struct hwentry default_hw[] = { .minio = 1000, .minio_rq = 1, .flush_on_last_del = FLUSH_DISABLED, + .user_friendly_names = USER_FRIENDLY_NAMES_OFF, .fast_io_fail = 5, .dev_loss = 600, .retain_hwhandler = RETAIN_HWHANDLER_ON, diff --git a/libmultipath/log.c b/libmultipath/log.c index ab92e2a..debd36d 100644 --- a/libmultipath/log.c +++ b/libmultipath/log.c @@ -15,6 +15,8 @@ #define ALIGN(len, s) (((len)+(s)-1)/(s)*(s)) +struct logarea* la; + #if LOGDBG static void dump_logarea (void) { diff --git a/libmultipath/log.h b/libmultipath/log.h index 984f047..6551b5c 100644 --- a/libmultipath/log.h +++ b/libmultipath/log.h @@ -29,7 +29,7 @@ struct logarea { char * buff; }; -struct logarea * la; +extern struct logarea* la; int log_init (char * progname, int size); void log_close (void); diff --git a/libmultipath/memory.c b/libmultipath/memory.c index 5441e6a..293a688 100644 --- a/libmultipath/memory.c +++ b/libmultipath/memory.c @@ -20,6 +20,7 @@ * Copyright (C) 2001-2005 Alexandre Cassen, */ +#include #include "memory.h" /* diff --git a/libmultipath/memory.h b/libmultipath/memory.h index 29a75ed..a3c478e 100644 --- a/libmultipath/memory.h +++ b/libmultipath/memory.h @@ -27,7 +27,6 @@ #include #include #include -#include /* Local defines */ #ifdef _DEBUG_ diff --git a/libmultipath/print.c b/libmultipath/print.c index 94d6384..865562b 100644 --- a/libmultipath/print.c +++ b/libmultipath/print.c @@ -73,7 +73,6 @@ static int snprint_size (char * buff, size_t len, unsigned long long size) { float s = (float)(size >> 1); /* start with KB */ - char fmt[6] = {}; char units[] = {'K','M','G','T','P'}; char *u = units; @@ -81,12 +80,8 @@ snprint_size (char * buff, size_t len, unsigned long long size) s = s / 1024; u++; } - if (s < 10) - snprintf(fmt, 6, "%%.1f%c", *u); - else - snprintf(fmt, 6, "%%.0f%c", *u); - return snprintf(buff, len, fmt, s); + return snprintf(buff, len, "%.*f%c", s < 10, s, *u); } /* @@ -249,6 +244,12 @@ snprint_q_timeouts (char * buff, size_t len, struct multipath * mpp) return snprint_uint(buff, len, mpp->stat_queueing_timeouts); } +static int +snprint_map_failures (char * buff, size_t len, struct multipath * mpp) +{ + return snprint_uint(buff, len, mpp->stat_map_failures); +} + static int snprint_multipath_uuid (char * buff, size_t len, struct multipath * mpp) { @@ -624,6 +625,7 @@ struct multipath_data mpd[] = { {'t', "dm-st", 0, snprint_dm_map_state}, {'S', "size", 0, snprint_multipath_size}, {'f', "features", 0, snprint_features}, + {'x', "failures", 0, snprint_map_failures}, {'h', "hwhandler", 0, snprint_hwhandler}, {'A', "action", 0, snprint_action}, {'0', "path_faults", 0, snprint_path_faults}, @@ -783,7 +785,7 @@ snprint_multipath_header (char * line, int len, char * format) struct multipath_data * data; do { - if (!TAIL) + if (TAIL <= 0) break; if (*f != '%') { @@ -816,7 +818,7 @@ snprint_multipath (char * line, int len, char * format, char buff[MAX_FIELD_LEN] = {}; do { - if (!TAIL) + if (TAIL <= 0) break; if (*f != '%') { @@ -850,7 +852,7 @@ snprint_path_header (char * line, int len, char * format) struct path_data * data; do { - if (!TAIL) + if (TAIL <= 0) break; if (*f != '%') { @@ -883,7 +885,7 @@ snprint_path (char * line, int len, char * format, char buff[MAX_FIELD_LEN]; do { - if (!TAIL) + if (TAIL <= 0) break; if (*f != '%') { @@ -918,7 +920,7 @@ snprint_pathgroup (char * line, int len, char * format, char buff[MAX_FIELD_LEN]; do { - if (!TAIL) + if (TAIL <= 0) break; if (*f != '%') { @@ -1009,11 +1011,11 @@ snprint_multipath_topology (char * buff, int len, struct multipath * mpp, c += sprintf(c, "%c[%dm", 0x1B, 0); /* bold off */ fwd += snprint_multipath(buff + fwd, len - fwd, style, mpp, 1); - if (fwd > len) + if (fwd >= len) return len; fwd += snprint_multipath(buff + fwd, len - fwd, PRINT_MAP_PROPS, mpp, 1); - if (fwd > len) + if (fwd >= len) return len; if (!mpp->pg) @@ -1027,7 +1029,7 @@ snprint_multipath_topology (char * buff, int len, struct multipath * mpp, } else strcpy(f, "`-+- " PRINT_PG_INDENT); fwd += snprint_pathgroup(buff + fwd, len - fwd, fmt, pgp); - if (fwd > len) + if (fwd >= len) return len; vector_foreach_slot (pgp->paths, pp, i) { @@ -1040,13 +1042,14 @@ snprint_multipath_topology (char * buff, int len, struct multipath * mpp, else strcpy(f, " `- " PRINT_PATH_INDENT); fwd += snprint_path(buff + fwd, len - fwd, fmt, pp, 1); - if (fwd > len) + if (fwd >= len) return len; } } return fwd; } + static int snprint_json (char * buff, int len, int indent, char *json_str) { @@ -1054,7 +1057,7 @@ snprint_json (char * buff, int len, int indent, char *json_str) for (i = 0; i < indent; i++) { fwd += snprintf(buff + fwd, len - fwd, PRINT_JSON_INDENT); - if (fwd > len) + if (fwd >= len) return fwd; } @@ -1068,7 +1071,7 @@ snprint_json_header (char * buff, int len) int fwd = 0; fwd += snprint_json(buff, len, 0, PRINT_JSON_START_ELEM); - if (fwd > len) + if (fwd >= len) return fwd; fwd += snprintf(buff + fwd, len - fwd, PRINT_JSON_START_VERSION, @@ -1083,7 +1086,7 @@ snprint_json_elem_footer (char * buff, int len, int indent, int last) for (i = 0; i < indent; i++) { fwd += snprintf(buff + fwd, len - fwd, PRINT_JSON_INDENT); - if (fwd > len) + if (fwd >= len) return fwd; } @@ -1103,50 +1106,50 @@ snprint_multipath_fields_json (char * buff, int len, struct pathgroup *pgp; fwd += snprint_multipath(buff, len, PRINT_JSON_MAP, mpp, 0); - if (fwd > len) + if (fwd >= len) return fwd; fwd += snprint_json(buff + fwd, len - fwd, 2, PRINT_JSON_START_GROUPS); - if (fwd > len) + if (fwd >= len) return fwd; vector_foreach_slot (mpp->pg, pgp, i) { pgp->selector = mpp->selector; fwd += snprint_pathgroup(buff + fwd, len - fwd, PRINT_JSON_GROUP, pgp); - if (fwd > len) + if (fwd >= len) return fwd; fwd += snprintf(buff + fwd, len - fwd, PRINT_JSON_GROUP_NUM, i + 1); - if (fwd > len) + if (fwd >= len) return fwd; fwd += snprint_json(buff + fwd, len - fwd, 3, PRINT_JSON_START_PATHS); - if (fwd > len) + if (fwd >= len) return fwd; vector_foreach_slot (pgp->paths, pp, j) { fwd += snprint_path(buff + fwd, len - fwd, PRINT_JSON_PATH, pp, 0); - if (fwd > len) + if (fwd >= len) return fwd; fwd += snprint_json_elem_footer(buff + fwd, len - fwd, 3, j + 1 == VECTOR_SIZE(pgp->paths)); - if (fwd > len) + if (fwd >= len) return fwd; } fwd += snprint_json(buff + fwd, len - fwd, 0, PRINT_JSON_END_ARRAY); - if (fwd > len) + if (fwd >= len) return fwd; fwd += snprint_json_elem_footer(buff + fwd, len - fwd, 2, i + 1 == VECTOR_SIZE(mpp->pg)); - if (fwd > len) + if (fwd >= len) return fwd; } fwd += snprint_json(buff + fwd, len - fwd, 0, PRINT_JSON_END_ARRAY); - if (fwd > len) + if (fwd >= len) return fwd; fwd += snprint_json_elem_footer(buff + fwd, len - fwd, 1, last); @@ -1159,23 +1162,23 @@ snprint_multipath_map_json (char * buff, int len, int fwd = 0; fwd += snprint_json_header(buff, len); - if (fwd > len) + if (fwd >= len) return len; fwd += snprint_json(buff + fwd, len - fwd, 0, PRINT_JSON_START_MAP); - if (fwd > len) + if (fwd >= len) return len; fwd += snprint_multipath_fields_json(buff + fwd, len - fwd, mpp, 1); - if (fwd > len) + if (fwd >= len) return len; fwd += snprint_json(buff + fwd, len - fwd, 0, "\n"); - if (fwd > len) + if (fwd >= len) return len; fwd += snprint_json(buff + fwd, len - fwd, 0, PRINT_JSON_END_LAST); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1187,26 +1190,26 @@ snprint_multipath_topology_json (char * buff, int len, struct vectors * vecs) struct multipath * mpp; fwd += snprint_json_header(buff, len); - if (fwd > len) + if (fwd >= len) return len; fwd += snprint_json(buff + fwd, len - fwd, 1, PRINT_JSON_START_MAPS); - if (fwd > len) + if (fwd >= len) return len; vector_foreach_slot(vecs->mpvec, mpp, i) { fwd += snprint_multipath_fields_json(buff + fwd, len - fwd, mpp, i + 1 == VECTOR_SIZE(vecs->mpvec)); - if (fwd > len) + if (fwd >= len) return len; } fwd += snprint_json(buff + fwd, len - fwd, 0, PRINT_JSON_END_ARRAY); - if (fwd > len) + if (fwd >= len) return len; fwd += snprint_json(buff + fwd, len - fwd, 0, PRINT_JSON_END_LAST); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1230,16 +1233,16 @@ snprint_hwentry (struct config *conf, char * buff, int len, struct hwentry * hwe return 0; fwd += snprintf(buff + fwd, len - fwd, "\tdevice {\n"); - if (fwd > len) + if (fwd >= len) return len; iterate_sub_keywords(rootkw, kw, i) { fwd += snprint_keyword(buff + fwd, len - fwd, "\t\t%k %v\n", kw, hwe); - if (fwd > len) + if (fwd >= len) return len; } fwd += snprintf(buff + fwd, len - fwd, "\t}\n"); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1257,15 +1260,15 @@ snprint_hwtable (struct config *conf, char * buff, int len, vector hwtable) return 0; fwd += snprintf(buff + fwd, len - fwd, "devices {\n"); - if (fwd > len) + if (fwd >= len) return len; vector_foreach_slot (hwtable, hwe, i) { fwd += snprint_hwentry(conf, buff + fwd, len - fwd, hwe); - if (fwd > len) + if (fwd >= len) return len; } fwd += snprintf(buff + fwd, len - fwd, "}\n"); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1283,16 +1286,16 @@ snprint_mpentry (struct config *conf, char * buff, int len, struct mpentry * mpe return 0; fwd += snprintf(buff + fwd, len - fwd, "\tmultipath {\n"); - if (fwd > len) + if (fwd >= len) return len; iterate_sub_keywords(rootkw, kw, i) { fwd += snprint_keyword(buff + fwd, len - fwd, "\t\t%k %v\n", kw, mpe); - if (fwd > len) + if (fwd >= len) return len; } fwd += snprintf(buff + fwd, len - fwd, "\t}\n"); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1310,15 +1313,15 @@ snprint_mptable (struct config *conf, char * buff, int len, vector mptable) return 0; fwd += snprintf(buff + fwd, len - fwd, "multipaths {\n"); - if (fwd > len) + if (fwd >= len) return len; vector_foreach_slot (mptable, mpe, i) { fwd += snprint_mpentry(conf, buff + fwd, len - fwd, mpe); - if (fwd > len) + if (fwd >= len) return len; } fwd += snprintf(buff + fwd, len - fwd, "}\n"); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1336,19 +1339,19 @@ snprint_overrides (struct config *conf, char * buff, int len, struct hwentry *ov return 0; fwd += snprintf(buff + fwd, len - fwd, "overrides {\n"); - if (fwd > len) + if (fwd >= len) return len; if (!overrides) goto out; iterate_sub_keywords(rootkw, kw, i) { fwd += snprint_keyword(buff + fwd, len - fwd, "\t%k %v\n", kw, NULL); - if (fwd > len) + if (fwd >= len) return len; } out: fwd += snprintf(buff + fwd, len - fwd, "}\n"); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1366,17 +1369,17 @@ snprint_defaults (struct config *conf, char * buff, int len) return 0; fwd += snprintf(buff + fwd, len - fwd, "defaults {\n"); - if (fwd > len) + if (fwd >= len) return len; iterate_sub_keywords(rootkw, kw, i) { fwd += snprint_keyword(buff + fwd, len - fwd, "\t%k %v\n", kw, NULL); - if (fwd > len) + if (fwd >= len) return len; } fwd += snprintf(buff + fwd, len - fwd, "}\n"); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1513,7 +1516,7 @@ snprint_blacklist (struct config *conf, char * buff, int len) return 0; fwd += snprintf(buff + fwd, len - fwd, "blacklist {\n"); - if (fwd > len) + if (fwd >= len) return len; vector_foreach_slot (conf->blist_devnode, ble, i) { @@ -1522,7 +1525,7 @@ snprint_blacklist (struct config *conf, char * buff, int len) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t%k %v\n", kw, ble); - if (fwd > len) + if (fwd >= len) return len; } vector_foreach_slot (conf->blist_wwid, ble, i) { @@ -1531,7 +1534,7 @@ snprint_blacklist (struct config *conf, char * buff, int len) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t%k %v\n", kw, ble); - if (fwd > len) + if (fwd >= len) return len; } vector_foreach_slot (conf->blist_property, ble, i) { @@ -1540,7 +1543,7 @@ snprint_blacklist (struct config *conf, char * buff, int len) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t%k %v\n", kw, ble); - if (fwd > len) + if (fwd >= len) return len; } rootkw = find_keyword(conf->keywords, rootkw->sub, "device"); @@ -1549,28 +1552,28 @@ snprint_blacklist (struct config *conf, char * buff, int len) vector_foreach_slot (conf->blist_device, bled, i) { fwd += snprintf(buff + fwd, len - fwd, "\tdevice {\n"); - if (fwd > len) + if (fwd >= len) return len; kw = find_keyword(conf->keywords, rootkw->sub, "vendor"); if (!kw) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t\t%k %v\n", kw, bled); - if (fwd > len) + if (fwd >= len) return len; kw = find_keyword(conf->keywords, rootkw->sub, "product"); if (!kw) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t\t%k %v\n", kw, bled); - if (fwd > len) + if (fwd >= len) return len; fwd += snprintf(buff + fwd, len - fwd, "\t}\n"); - if (fwd > len) + if (fwd >= len) return len; } fwd += snprintf(buff + fwd, len - fwd, "}\n"); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1590,7 +1593,7 @@ snprint_blacklist_except (struct config *conf, char * buff, int len) return 0; fwd += snprintf(buff + fwd, len - fwd, "blacklist_exceptions {\n"); - if (fwd > len) + if (fwd >= len) return len; vector_foreach_slot (conf->elist_devnode, ele, i) { @@ -1599,7 +1602,7 @@ snprint_blacklist_except (struct config *conf, char * buff, int len) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t%k %v\n", kw, ele); - if (fwd > len) + if (fwd >= len) return len; } vector_foreach_slot (conf->elist_wwid, ele, i) { @@ -1608,7 +1611,7 @@ snprint_blacklist_except (struct config *conf, char * buff, int len) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t%k %v\n", kw, ele); - if (fwd > len) + if (fwd >= len) return len; } vector_foreach_slot (conf->elist_property, ele, i) { @@ -1617,7 +1620,7 @@ snprint_blacklist_except (struct config *conf, char * buff, int len) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t%k %v\n", kw, ele); - if (fwd > len) + if (fwd >= len) return len; } rootkw = find_keyword(conf->keywords, rootkw->sub, "device"); @@ -1626,28 +1629,28 @@ snprint_blacklist_except (struct config *conf, char * buff, int len) vector_foreach_slot (conf->elist_device, eled, i) { fwd += snprintf(buff + fwd, len - fwd, "\tdevice {\n"); - if (fwd > len) + if (fwd >= len) return len; kw = find_keyword(conf->keywords, rootkw->sub, "vendor"); if (!kw) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t\t%k %v\n", kw, eled); - if (fwd > len) + if (fwd >= len) return len; kw = find_keyword(conf->keywords, rootkw->sub, "product"); if (!kw) return 0; fwd += snprint_keyword(buff + fwd, len - fwd, "\t\t%k %v\n", kw, eled); - if (fwd > len) + if (fwd >= len) return len; fwd += snprintf(buff + fwd, len - fwd, "\t}\n"); - if (fwd > len) + if (fwd >= len) return len; } fwd += snprintf(buff + fwd, len - fwd, "}\n"); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1679,7 +1682,7 @@ snprint_status (char * buff, int len, struct vectors *vecs) fwd += snprintf(buff + fwd, len - fwd, "\npaths: %d\nbusy: %s\n", monitored_count, is_uevent_busy()? "True" : "False"); - if (fwd > len) + if (fwd >= len) return len; return fwd; } @@ -1745,17 +1748,11 @@ snprint_devices (struct config *conf, char * buff, int len, struct vectors *vecs } closedir(blkdir); - if (fwd > len) + if (fwd >= len) return len; return fwd; } -extern int -snprint_config (char * buff, int len) -{ - return 0; -} - /* * stdout printing helpers */ diff --git a/libmultipath/prio.h b/libmultipath/prio.h index 261105b..0193c52 100644 --- a/libmultipath/prio.h +++ b/libmultipath/prio.h @@ -64,4 +64,7 @@ char * prio_name (struct prio *); char * prio_args (struct prio *); int prio_set_args (struct prio *, char *); +/* The only function exported by prioritizer dynamic libraries (.so) */ +int getprio(struct path *, char *, unsigned int); + #endif /* _PRIO_H */ diff --git a/libmultipath/prioritizers/alua.c b/libmultipath/prioritizers/alua.c index 5636974..03d0a0e 100644 --- a/libmultipath/prioritizers/alua.c +++ b/libmultipath/prioritizers/alua.c @@ -31,8 +31,8 @@ static const char * aas_string[] = { [AAS_NON_OPTIMIZED] = "active/non-optimized", [AAS_STANDBY] = "standby", [AAS_UNAVAILABLE] = "unavailable", - [AAS_LBA_DEPENDENT] = "lba dependent", - [AAS_RESERVED] = "invalid/reserved", + [AAS_LBA_DEPENDENT] = "logical block dependent", + [AAS_RESERVED] = "ARRAY BUG: invalid TPGs state!", [AAS_OFFLINE] = "offline", [AAS_TRANSITIONING] = "transitioning between states", }; @@ -65,12 +65,12 @@ get_alua_info(struct path * pp, unsigned int timeout) return -ALUA_PRIO_NOT_SUPPORTED; return -ALUA_PRIO_RTPG_FAILED; } - condlog(3, "reported target port group is %i", tpg); + condlog(3, "%s: reported target port group is %i", pp->dev, tpg); rc = get_asymmetric_access_state(pp->fd, tpg, timeout); if (rc < 0) return -ALUA_PRIO_GETAAS_FAILED; - condlog(3, "aas = %02x [%s]%s", rc, aas_print_string(rc), + condlog(3, "%s: aas = %02x [%s]%s", pp->dev, rc, aas_print_string(rc), (rc & 0x80) ? " [preferred]" : ""); return rc; } diff --git a/libmultipath/prioritizers/const.c b/libmultipath/prioritizers/const.c index 9d9d003..aad6927 100644 --- a/libmultipath/prioritizers/const.c +++ b/libmultipath/prioritizers/const.c @@ -2,7 +2,7 @@ #include "prio.h" -int getprio (struct path * pp, char * args) +int getprio(struct path * pp, char * args, unsigned int timeout) { return 1; } diff --git a/libmultipath/prioritizers/datacore.c b/libmultipath/prioritizers/datacore.c index 050a94c..36465ac 100644 --- a/libmultipath/prioritizers/datacore.c +++ b/libmultipath/prioritizers/datacore.c @@ -106,7 +106,7 @@ int datacore_prio (const char *dev, int sg_fd, char * args) return 0; } -int getprio (struct path * pp, char * args) +int getprio(struct path * pp, char * args, unsigned int timeout) { return datacore_prio(pp->dev, pp->fd, args); } diff --git a/libmultipath/prioritizers/iet.c b/libmultipath/prioritizers/iet.c index aa852a0..a4ea61e 100644 --- a/libmultipath/prioritizers/iet.c +++ b/libmultipath/prioritizers/iet.c @@ -138,7 +138,7 @@ int iet_prio(const char *dev, char * args) return 10; } -int getprio(struct path * pp, char * args) +int getprio(struct path * pp, char * args, unsigned int timeout) { return iet_prio(pp->dev, args); } diff --git a/libmultipath/prioritizers/ontap.c b/libmultipath/prioritizers/ontap.c index 4084c65..38495cd 100644 --- a/libmultipath/prioritizers/ontap.c +++ b/libmultipath/prioritizers/ontap.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "sg_include.h" #include "debug.h" diff --git a/libmultipath/prioritizers/random.c b/libmultipath/prioritizers/random.c index c3ea3ac..4a27123 100644 --- a/libmultipath/prioritizers/random.c +++ b/libmultipath/prioritizers/random.c @@ -5,7 +5,7 @@ #include "prio.h" -int getprio (struct path * pp, char * args) +int getprio(struct path * pp, char * args, unsigned int timeout) { struct timeval tv; diff --git a/libmultipath/prioritizers/weightedpath.c b/libmultipath/prioritizers/weightedpath.c index a62b86e..34a43a8 100644 --- a/libmultipath/prioritizers/weightedpath.c +++ b/libmultipath/prioritizers/weightedpath.c @@ -151,7 +151,7 @@ int prio_path_weight(struct path *pp, char *prio_args) return priority; } -int getprio(struct path *pp, char *args) +int getprio(struct path *pp, char *args, unsigned int timeout) { return prio_path_weight(pp, args); } diff --git a/libmultipath/propsel.c b/libmultipath/propsel.c index 1625990..ec1fd92 100644 --- a/libmultipath/propsel.c +++ b/libmultipath/propsel.c @@ -137,7 +137,7 @@ select_rr_weight (struct config *conf, struct multipath * mp) mp_set_ovr(rr_weight); mp_set_hwe(rr_weight); mp_set_conf(rr_weight); - mp_set_default(rr_weight, RR_WEIGHT_NONE); + mp_set_default(rr_weight, DEFAULT_RR_WEIGHT); out: print_rr_weight(buff, 13, &mp->rr_weight); condlog(3, "%s: rr_weight = %s %s", mp->alias, buff, origin); @@ -228,7 +228,7 @@ want_user_friendly_names(struct config *conf, struct multipath * mp) "(controller setting)"); do_set(user_friendly_names, conf, user_friendly_names, "(config file setting)"); - do_default(user_friendly_names, USER_FRIENDLY_NAMES_OFF); + do_default(user_friendly_names, DEFAULT_USER_FRIENDLY_NAMES); out: condlog(3, "%s: user_friendly_names = %s %s", mp->wwid, (user_friendly_names == USER_FRIENDLY_NAMES_ON)? "yes" : "no", @@ -550,7 +550,7 @@ select_flush_on_last_del(struct config *conf, struct multipath *mp) mp_set_ovr(flush_on_last_del); mp_set_hwe(flush_on_last_del); mp_set_conf(flush_on_last_del); - mp_set_default(flush_on_last_del, FLUSH_DISABLED); + mp_set_default(flush_on_last_del, DEFAULT_FLUSH); out: condlog(3, "%s: flush_on_last_del = %s %s", mp->alias, (mp->flush_on_last_del == FLUSH_ENABLED)? "yes" : "no", origin); @@ -665,4 +665,22 @@ out: print_delay_checks(buff, 12, &mp->delay_wait_checks); condlog(3, "%s: delay_wait_checks = %s %s", mp->alias, buff, origin); return 0; + +} + +extern int +select_skip_kpartx (struct config *conf, struct multipath * mp) +{ + char *origin; + + mp_set_mpe(skip_kpartx); + mp_set_ovr(skip_kpartx); + mp_set_hwe(skip_kpartx); + mp_set_conf(skip_kpartx); + mp_set_default(skip_kpartx, DEFAULT_SKIP_KPARTX); +out: + condlog(3, "%s: skip_kpartx = %s %s", mp->alias, + (mp->skip_kpartx == SKIP_KPARTX_ON)? "yes" : "no", + origin); + return 0; } diff --git a/libmultipath/propsel.h b/libmultipath/propsel.h index 5941a5f..3e6d607 100644 --- a/libmultipath/propsel.h +++ b/libmultipath/propsel.h @@ -22,3 +22,4 @@ int select_detect_prio(struct config *conf, struct path * pp); int select_deferred_remove(struct config *conf, struct multipath *mp); int select_delay_watch_checks (struct config *conf, struct multipath * mp); int select_delay_wait_checks (struct config *conf, struct multipath * mp); +int select_skip_kpartx (struct config *conf, struct multipath * mp); diff --git a/libmultipath/structs.c b/libmultipath/structs.c index fee58e5..e4bf4c6 100644 --- a/libmultipath/structs.c +++ b/libmultipath/structs.c @@ -520,6 +520,17 @@ add_feature (char **f, char *n) if (!n || *n == '0') return 0; + /* default feature is null */ + if(!*f) + { + l = asprintf(&t, "1 %s", n); + if(l == -1) + return 1; + + *f = t; + return 0; + } + /* Check if feature is already present */ if (strstr(*f, n)) return 0; diff --git a/libmultipath/structs.h b/libmultipath/structs.h index cb5d532..58508f6 100644 --- a/libmultipath/structs.h +++ b/libmultipath/structs.h @@ -128,6 +128,12 @@ enum deferred_remove_states { DEFERRED_REMOVE_IN_PROGRESS, }; +enum skip_kpartx_states { + SKIP_KPARTX_UNDEF = YNU_UNDEF, + SKIP_KPARTX_OFF = YNU_NO, + SKIP_KPARTX_ON = YNU_YES, +}; + enum scsi_protocol { SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */ SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */ @@ -211,6 +217,7 @@ struct path { int fd; int initialized; int retriggers; + int wwid_changed; /* configlet pointers */ struct hwentry * hwe; @@ -243,6 +250,7 @@ struct multipath { int deferred_remove; int delay_watch_checks; int delay_wait_checks; + int skip_kpartx; unsigned int dev_loss; uid_t uid; gid_t gid; @@ -270,6 +278,7 @@ struct multipath { unsigned int stat_map_loads; unsigned int stat_total_queueing_time; unsigned int stat_queueing_timeouts; + unsigned int stat_map_failures; /* checkers shared data */ void * mpcontext; diff --git a/libmultipath/structs_vec.c b/libmultipath/structs_vec.c index a0c8869..e898528 100644 --- a/libmultipath/structs_vec.c +++ b/libmultipath/structs_vec.c @@ -610,19 +610,23 @@ int update_multipath (struct vectors *vecs, char *mapname, int reset) */ void update_queue_mode_del_path(struct multipath *mpp) { - if (--mpp->nr_active == 0 && mpp->no_path_retry > 0) { - struct config *conf = get_multipath_config(); + if (--mpp->nr_active == 0) { + if (mpp->no_path_retry > 0) { + struct config *conf = get_multipath_config(); - /* - * Enter retry mode. - * meaning of +1: retry_tick may be decremented in - * checkerloop before starting retry. - */ - mpp->stat_queueing_timeouts++; - mpp->retry_tick = mpp->no_path_retry * conf->checkint + 1; - condlog(1, "%s: Entering recovery mode: max_retries=%d", - mpp->alias, mpp->no_path_retry); - put_multipath_config(conf); + /* + * Enter retry mode. + * meaning of +1: retry_tick may be decremented in + * checkerloop before starting retry. + */ + mpp->stat_queueing_timeouts++; + mpp->retry_tick = mpp->no_path_retry * + conf->checkint + 1; + condlog(1, "%s: Entering recovery mode: max_retries=%d", + mpp->alias, mpp->no_path_retry); + put_multipath_config(conf); + } else if (mpp->no_path_retry != NO_PATH_RETRY_QUEUE) + mpp->stat_map_failures++; } condlog(2, "%s: remaining active paths: %d", mpp->alias, mpp->nr_active); } diff --git a/libmultipath/time-util.c b/libmultipath/time-util.c new file mode 100644 index 0000000..6d79c0e --- /dev/null +++ b/libmultipath/time-util.c @@ -0,0 +1,42 @@ +#include +#include +#include +#include "time-util.h" + +/* Initialize @cond as a condition variable that uses the monotonic clock */ +void pthread_cond_init_mono(pthread_cond_t *cond) +{ + pthread_condattr_t attr; + int res; + + res = pthread_condattr_init(&attr); + assert(res == 0); + res = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); + assert(res == 0); + res = pthread_cond_init(cond, &attr); + assert(res == 0); + res = pthread_condattr_destroy(&attr); + assert(res == 0); +} + +/* Ensure that 0 <= ts->tv_nsec && ts->tv_nsec < 1000 * 1000 * 1000. */ +void normalize_timespec(struct timespec *ts) +{ + while (ts->tv_nsec < 0) { + ts->tv_nsec += 1000UL * 1000 * 1000; + ts->tv_sec--; + } + while (ts->tv_nsec >= 1000UL * 1000 * 1000) { + ts->tv_nsec -= 1000UL * 1000 * 1000; + ts->tv_sec++; + } +} + +/* Compute *res = *a - *b */ +void timespecsub(const struct timespec *a, const struct timespec *b, + struct timespec *res) +{ + res->tv_sec = a->tv_sec - b->tv_sec; + res->tv_nsec = a->tv_nsec - b->tv_nsec; + normalize_timespec(res); +} diff --git a/libmultipath/time-util.h b/libmultipath/time-util.h new file mode 100644 index 0000000..b76d2aa --- /dev/null +++ b/libmultipath/time-util.h @@ -0,0 +1,13 @@ +#ifndef _TIME_UTIL_H_ +#define _TIME_UTIL_H_ + +#include + +struct timespec; + +void pthread_cond_init_mono(pthread_cond_t *cond); +void normalize_timespec(struct timespec *ts); +void timespecsub(const struct timespec *a, const struct timespec *b, + struct timespec *res); + +#endif /* _TIME_UTIL_H_ */ diff --git a/libmultipath/uevent.c b/libmultipath/uevent.c index 6247898..19b910f 100644 --- a/libmultipath/uevent.c +++ b/libmultipath/uevent.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include @@ -50,7 +49,6 @@ typedef int (uev_trigger)(struct uevent *, void * trigger_data); -pthread_t uevq_thr; LIST_HEAD(uevq); pthread_mutex_t uevq_lock = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t *uevq_lockp = &uevq_lock; @@ -80,33 +78,6 @@ struct uevent * alloc_uevent (void) return uev; } -void -setup_thread_attr(pthread_attr_t *attr, size_t stacksize, int detached) -{ - if (pthread_attr_init(attr)) { - fprintf(stderr, "can't initialize thread attr: %s\n", - strerror(errno)); - exit(1); - } - if (stacksize < PTHREAD_STACK_MIN) - stacksize = PTHREAD_STACK_MIN; - - if (pthread_attr_setstacksize(attr, stacksize)) { - fprintf(stderr, "can't set thread stack size to %lu: %s\n", - (unsigned long)stacksize, strerror(errno)); - exit(1); - } - if (detached && pthread_attr_setdetachstate(attr, - PTHREAD_CREATE_DETACHED)) { - fprintf(stderr, "can't set thread to detached: %s\n", - strerror(errno)); - exit(1); - } -} - -/* - * Called with uevq_lockp held - */ void service_uevq(struct list_head *tmpq) { @@ -124,15 +95,11 @@ service_uevq(struct list_head *tmpq) } } -static void uevq_stop(void *arg) +static void uevent_cleanup(void *arg) { struct udev *udev = arg; - condlog(3, "Stopping uev queue"); - pthread_mutex_lock(uevq_lockp); - my_uev_trigger = NULL; - pthread_cond_signal(uev_condp); - pthread_mutex_unlock(uevq_lockp); + condlog(3, "Releasing uevent_listen() resources"); udev_unref(udev); } @@ -495,7 +462,7 @@ int uevent_listen(struct udev *udev) return 1; } udev_ref(udev); - pthread_cleanup_push(uevq_stop, udev); + pthread_cleanup_push(uevent_cleanup, udev); monitor = udev_monitor_new_from_netlink(udev, "udev"); if (!monitor) { diff --git a/libmultipath/uevent.h b/libmultipath/uevent.h index e5fdfcc..9d22dcd 100644 --- a/libmultipath/uevent.h +++ b/libmultipath/uevent.h @@ -27,7 +27,6 @@ struct uevent { }; int is_uevent_busy(void); -void setup_thread_attr(pthread_attr_t *attr, size_t stacksize, int detached); int uevent_listen(struct udev *udev); int uevent_dispatch(int (*store_uev)(struct uevent *, void * trigger_data), diff --git a/libmultipath/util.c b/libmultipath/util.c index ac0d1b2..0a136b4 100644 --- a/libmultipath/util.c +++ b/libmultipath/util.c @@ -1,7 +1,10 @@ -#include +#include #include -#include +#include +#include +#include #include +#include #include #include "debug.h" @@ -258,3 +261,21 @@ dev_t parse_devt(const char *dev_t) return makedev(maj, min); } + +void +setup_thread_attr(pthread_attr_t *attr, size_t stacksize, int detached) +{ + int ret; + + ret = pthread_attr_init(attr); + assert(ret == 0); + if (stacksize < PTHREAD_STACK_MIN) + stacksize = PTHREAD_STACK_MIN; + ret = pthread_attr_setstacksize(attr, stacksize); + assert(ret == 0); + if (detached) { + ret = pthread_attr_setdetachstate(attr, + PTHREAD_CREATE_DETACHED); + assert(ret == 0); + } +} diff --git a/libmultipath/util.h b/libmultipath/util.h index 8861085..f3b37ee 100644 --- a/libmultipath/util.h +++ b/libmultipath/util.h @@ -12,6 +12,7 @@ size_t strlcat(char *dst, const char *src, size_t size); int devt2devname (char *, int, char *); dev_t parse_devt(const char *dev_t); char *convert_dev(char *dev, int is_path_device); +void setup_thread_attr(pthread_attr_t *attr, size_t stacksize, int detached); #define safe_sprintf(var, format, args...) \ snprintf(var, sizeof(var), format, ##args) >= sizeof(var) diff --git a/libmultipath/uxsock.c b/libmultipath/uxsock.c index 775e278..b158a56 100644 --- a/libmultipath/uxsock.c +++ b/libmultipath/uxsock.c @@ -81,7 +81,7 @@ size_t write_all(int fd, const void *buf, size_t len) size_t total = 0; while (len) { - ssize_t n = write(fd, buf, len); + ssize_t n = send(fd, buf, len, MSG_NOSIGNAL); if (n < 0) { if ((errno == EINTR) || (errno == EAGAIN)) continue; @@ -116,7 +116,7 @@ ssize_t read_all(int fd, void *buf, size_t len, unsigned int timeout) if (errno == EINTR) continue; return -errno; - } else if (!pfd.revents & POLLIN) + } else if (!(pfd.revents & POLLIN)) continue; n = read(fd, buf, len); if (n < 0) { @@ -138,20 +138,7 @@ ssize_t read_all(int fd, void *buf, size_t len, unsigned int timeout) */ int send_packet(int fd, const char *buf) { - int ret = 0; - sigset_t set, old; - - /* Block SIGPIPE */ - sigemptyset(&set); - sigaddset(&set, SIGPIPE); - pthread_sigmask(SIG_BLOCK, &set, &old); - - ret = mpath_send_cmd(fd, buf); - - /* And unblock it again */ - pthread_sigmask(SIG_SETMASK, &old, NULL); - - return ret; + return mpath_send_cmd(fd, buf); } /* diff --git a/libmultipath/version.h b/libmultipath/version.h index dea59fe..f00476d 100644 --- a/libmultipath/version.h +++ b/libmultipath/version.h @@ -20,8 +20,8 @@ #ifndef _VERSION_H #define _VERSION_H -#define VERSION_CODE 0x000603 -#define DATE_CODE 0x080f10 +#define VERSION_CODE 0x000604 +#define DATE_CODE 0x030b10 #define PROG "multipath-tools" diff --git a/libmultipath/wwids.c b/libmultipath/wwids.c index babf149..bc70a27 100644 --- a/libmultipath/wwids.c +++ b/libmultipath/wwids.c @@ -71,7 +71,7 @@ write_out_wwid(int fd, char *wwid) { strerror(errno)); return -1; } - if (write_all(fd, buf, strlen(buf)) != strlen(buf)) { + if (write(fd, buf, strlen(buf)) != strlen(buf)) { condlog(0, "cannot write wwid to wwids file : %s", strerror(errno)); if (ftruncate(fd, offset)) @@ -110,7 +110,7 @@ replace_wwids(vector mp) goto out_file; } len = strlen(WWIDS_FILE_HEADER); - if (write_all(fd, WWIDS_FILE_HEADER, len) != len) { + if (write(fd, WWIDS_FILE_HEADER, len) != len) { condlog(0, "Can't write wwid file header : %s", strerror(errno)); /* cleanup partially written header */ diff --git a/mpathpersist/Makefile b/mpathpersist/Makefile index d545514..47043bb 100644 --- a/mpathpersist/Makefile +++ b/mpathpersist/Makefile @@ -2,7 +2,7 @@ include ../Makefile.inc CFLAGS += -I$(multipathdir) -I$(mpathpersistdir) -LDFLAGS += -lpthread -ldevmapper -L$(mpathpersistdir) -lmpathpersist \ +LIBDEPS += -lpthread -ldevmapper -L$(mpathpersistdir) -lmpathpersist \ -L$(multipathdir) -L$(mpathcmddir) -lmpathcmd -lmultipath -ludev EXEC = mpathpersist @@ -12,18 +12,18 @@ OBJS = main.o all: $(EXEC) $(EXEC): $(OBJS) - $(CC) -g $(OBJS) -o $(EXEC) $(LDFLAGS) $(CFLAGS) + $(CC) $(OBJS) -o $(EXEC) $(LDFLAGS) $(CFLAGS) $(LIBDEPS) $(GZIP) $(EXEC).8 > $(EXEC).8.gz install: $(INSTALL_PROGRAM) -d $(DESTDIR)$(bindir) $(INSTALL_PROGRAM) -m 755 $(EXEC) $(DESTDIR)$(bindir)/ - $(INSTALL_PROGRAM) -d $(DESTDIR)$(mandir) - $(INSTALL_PROGRAM) -m 644 $(EXEC).8.gz $(DESTDIR)$(mandir) + $(INSTALL_PROGRAM) -d $(DESTDIR)$(man8dir) + $(INSTALL_PROGRAM) -m 644 $(EXEC).8.gz $(DESTDIR)$(man8dir) clean: $(RM) core *.o $(EXEC) *.gz uninstall: $(RM) $(DESTDIR)$(bindir)/$(EXEC) - $(RM) $(DESTDIR)$(mandir)/$(EXEC).8.gz + $(RM) $(DESTDIR)$(man8dir)/$(EXEC).8.gz diff --git a/mpathpersist/main.c b/mpathpersist/main.c index a55865f..2e0aba3 100644 --- a/mpathpersist/main.c +++ b/mpathpersist/main.c @@ -105,7 +105,12 @@ int main (int argc, char * argv[]) udev = udev_new(); conf = mpath_lib_init(udev); - memset(transportids,0,MPATH_MX_TIDS); + if(!conf) { + udev_unref(udev); + exit(1); + } + + memset(transportids, 0, MPATH_MX_TIDS * sizeof(struct transportid)); multipath_conf = conf; while (1) diff --git a/mpathpersist/main.h b/mpathpersist/main.h index 7c31262..5c0e089 100644 --- a/mpathpersist/main.h +++ b/mpathpersist/main.h @@ -1,28 +1,28 @@ static struct option long_options[] = { - {"verbose", 1, 0, 'v'}, - {"clear", 0, 0, 'C'}, - {"device", 1, 0, 'd'}, - {"help", 0, 0, 'h'}, - {"hex", 0, 0, 'H'}, - {"in", 0, 0, 'i'}, - {"out", 0, 0, 'o'}, - {"param-aptpl", 0, 0, 'Z'}, - {"param-rk", 1, 0, 'K'}, - {"param-sark", 1, 0, 'S'}, - {"preempt", 0, 0, 'P'}, - {"preempt-abort", 0, 0, 'A'}, - {"prout-type", 1, 0, 'T'}, - {"read-full-status", 0, 0, 's'}, - {"read-keys", 0, 0, 'k'}, - {"read-reservation", 0, 0, 'r'}, - {"register", 0, 0, 'G'}, - {"register-ignore", 0, 0, 'I'}, - {"release", 0, 0, 'L'}, - {"report-capabilities", 0, 0, 'c'}, - {"reserve", 0, 0, 'R'}, - {"transport-id", 1, 0, 'X'}, - {"alloc-length", 1, 0, 'l'}, - {0, 0, 0, 0} + {"verbose", 1, NULL, 'v'}, + {"clear", 0, NULL, 'C'}, + {"device", 1, NULL, 'd'}, + {"help", 0, NULL, 'h'}, + {"hex", 0, NULL, 'H'}, + {"in", 0, NULL, 'i'}, + {"out", 0, NULL, 'o'}, + {"param-aptpl", 0, NULL, 'Z'}, + {"param-rk", 1, NULL, 'K'}, + {"param-sark", 1, NULL, 'S'}, + {"preempt", 0, NULL, 'P'}, + {"preempt-abort", 0, NULL, 'A'}, + {"prout-type", 1, NULL, 'T'}, + {"read-full-status", 0, NULL, 's'}, + {"read-keys", 0, NULL, 'k'}, + {"read-reservation", 0, NULL, 'r'}, + {"register", 0, NULL, 'G'}, + {"register-ignore", 0, NULL, 'I'}, + {"release", 0, NULL, 'L'}, + {"report-capabilities", 0, NULL, 'c'}, + {"reserve", 0, NULL, 'R'}, + {"transport-id", 1, NULL, 'X'}, + {"alloc-length", 1, NULL, 'l'}, + {NULL, 0, NULL, 0} }; static void usage(void); diff --git a/mpathpersist/mpathpersist.8 b/mpathpersist/mpathpersist.8 index a47a82a..4b15666 100644 --- a/mpathpersist/mpathpersist.8 +++ b/mpathpersist/mpathpersist.8 @@ -1,96 +1,176 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.39.2. -.TH MPATHPERSIST "8" "April 2011" "mpathpersist" "User Commands" +.\" ---------------------------------------------------------------------------- +.\" Update the date below if you make any significant change. +.\" Make sure there are no errors with: +.\" groff -z -wall -b -e -t mpathpersist/mpathpersist.8 +.\" +.\" ---------------------------------------------------------------------------- +. +.TH MPATHPERSIST 8 2016-10-30 "Linux" +. +. +.\" ---------------------------------------------------------------------------- .SH NAME -mpathpersist +.\" ---------------------------------------------------------------------------- +. +mpathpersist \- Manages SCSI persistent reservations on dm multipath devices. +. +. +.\" ---------------------------------------------------------------------------- .SH SYNOPSIS +.\" ---------------------------------------------------------------------------- +. .B mpathpersist -[\fIOPTIONS\fR] [\fIDEVICE\fR] +.RB [\| OPTIONS \|] +.I device +. +. +.\" ---------------------------------------------------------------------------- .SH DESCRIPTION -.IP -Options: -.TP -\fB\-\-verbose\fR|\-v level -verbosity level -.TP -0 -Critical and error messages -.TP -1 -Warning messages -.TP -2 -Informational messages -.TP -3 -Informational messages with trace enabled -.TP -\fB\-\-clear\fR|\-C -PR Out: Clear -.TP -\fB\-\-device\fR=\fIDEVICE\fR|\-d DEVICE -query or change DEVICE -.TP -\fB\-\-help\fR|\-h -output this usage message -.TP -\fB\-\-hex\fR|\-H -output response in hex -.TP -\fB\-\-in\fR|\-i -request PR In command -.TP -\fB\-\-out\fR|\-o -request PR Out command -.TP -\fB\-\-param\-aptpl\fR|\-Z -PR Out parameter 'APTPL' -.TP -\fB\-\-read\-keys\fR|\-k -PR In: Read Keys -.TP -\fB\-\-param\-sark\fR=\fISARK\fR|\-S SARK -PR Out parameter service action -reservation key (SARK is in hex) -.TP -\fB\-\-preempt\fR|\-P -PR Out: Preempt -.TP -\fB\-\-preempt\-abort\fR|\-A -PR Out: Preempt and Abort -.TP -\fB\-\-prout\-type\fR=\fITYPE\fR|\-T TYPE -PR Out command type -.TP -\fB\-\-read\-status\fR|\-s -PR In: Read Full Status -.TP -\fB\-\-read\-keys\fR|\-k -PR In: Read Keys -.TP -\fB\-\-read\-reservation\fR|\-r -PR In: Read Reservation -.TP -\fB\-\-register\fR|\-G -PR Out: Register -.TP -\fB\-\-register\-ignore\fR|\-I -PR Out: Register and Ignore -.TP -\fB\-\-release\fR|\-L -PR Out: Release -.TP -\fB\-\-report\-capabilities\fR|\-c -PR In: Report Capabilities -.TP -\fB\-\-reserve\fR|\-R -PR Out: Reserve -.TP -\fB\-\-transport\-id\fR=\fITIDS\fR|\-X TIDS -TransportIDs can be mentioned -in several forms -.IP -Examples: -.IP -mpathpersist \fB\-\-out\fR \fB\-\-register\fR \fB\-\-param\-sark\fR=\fI123abc\fR \fB\-\-prout\-type\fR=\fI5\fR /dev/mapper/mpath9 -mpathpersist \fB\-i\fR \fB\-k\fR /dev/mapper/mpath9 -.PP +.\" ---------------------------------------------------------------------------- +. +This utility is used to manage SCSI persistent reservations on Device Mapper +Multipath devices. To be able to use this functionality, the \fIreservation_key\fR +attribute must be defined in the \fI/etc/multipath.conf\fR file. Otherwise the +\fBmultipathd\fR daemon will not check for persistent reservation for newly +discovered paths or reinstated paths. +. +. +.\" ---------------------------------------------------------------------------- +.SH OPTIONS +.\" ---------------------------------------------------------------------------- +. +.TP +.BI \-verbose|\-v " level" +Verbosity: +.RS +.TP 5 +.I 0 +Critical messages. +.TP +.I 1 +Error messages. +.TP +.I 2 +Warning messages. +.TP +.I 3 +Informational messages. +.TP +.I 4 +Informational messages with trace enabled. +.RE +. +.TP +.BI \--device=\fIDEVICE\fB|\-d " DEVICE" +Query or change DEVICE. +. +.TP +.B \--help|\-h +Output this usage message. +. +.TP +.B \--hex|\-H +Output response in hex. +. +.TP +.B \--in|\-i +Request PR In command. +. +.TP +.B \--out|\-o +Request PR Out command. +. +.TP +.B \--param-aptpl|\-Z +PR Out parameter 'APTPL'. +. +.TP +.B \--read-keys|\-k +PR In: Read Keys. +. +.TP +.BI \--param-sark=\fISARK\fB|\-S " SARK" +PR Out parameter service action reservation key (SARK is in hex). +. +.TP +.B \--preempt|\-P +PR Out: Preempt. +. +.TP +.B \--preempt-abort|\-A +PR Out: Preempt and Abort. +. +.TP +.BI \--prout-type=\fITYPE\fB|\-T " TYPE" +PR Out command type. +. +.TP +.B \--read-full-status|\-s +PR In: Read Full Status. +. +.TP +.B \--read-keys|\-k +PR In: Read Keys. +. +.TP +.B \--read-reservation|\-r +PR In: Read Reservation. +. +.TP +.B \--register|\-G +PR Out: Register. +. +.TP +.B \--register-ignore|\-I +PR Out: Register and Ignore. +. +.TP +.B \--release|\-L +PR Out: Release. +. +.TP +.B \--report-capabilities|\-c +PR In: Report Capabilities. +. +.TP +.B \--reserve|\-R +PR Out: Reserve. +. +.TP +.BI \--transport-id=\fITIDS\fB|\-X " TIDS" +TransportIDs can be mentioned in several forms. +. +. +.\" ---------------------------------------------------------------------------- +.SH EXAMPLE +.\" ---------------------------------------------------------------------------- +. +.TP +Register the Service Action Reservation Key for the /dev/mapper/mpath9 device: +\fBmpathpersist --out --register --param-sark=\fI123abc \fB--prout-type=\fI5 /dev/mapper/mpath9\fR +.TP +Read the Service Action Reservation Key for the /dev/mapper/mpath9 device: +\fBmpathpersist -i -k \fI/dev/mapper/mpath9\fR +.TP +Reserve the Service Action Reservation Key for the /dev/mapper/mpath9 device: +\fBmpathpersist --out --reserve --param-sark=\fI123abc \fB--prout-type=\fI8 \fB-d \fI/dev/mapper/mpath9\fR +.TP +Read the reservation status of the /dev/mapper/mpath9 device: +\fBmpathpersist -i -s -d \fI/dev/mapper/mpath9\fR +. +. +.\" ---------------------------------------------------------------------------- +.SH "SEE ALSO" +.\" ---------------------------------------------------------------------------- +. +.BR multipath (8), +.BR multipathd (8). +. +. +.\" ---------------------------------------------------------------------------- +.SH AUTHORS +.\" ---------------------------------------------------------------------------- +. +\fImultipath-tools\fR was developed by Christophe Varoqui +and others. +.\" EOF diff --git a/multipath/Makefile b/multipath/Makefile index b125ae3..cad34bf 100644 --- a/multipath/Makefile +++ b/multipath/Makefile @@ -5,7 +5,7 @@ include ../Makefile.inc CFLAGS += -I$(multipathdir) -I$(mpathcmddir) -LDFLAGS += -lpthread -ldevmapper -ldl -L$(multipathdir) -lmultipath -ludev \ +LIBDEPS += -lpthread -ldevmapper -ldl -L$(multipathdir) -lmultipath -ludev \ -L$(mpathcmddir) -lmpathcmd EXEC = multipath @@ -15,7 +15,7 @@ OBJS = main.o all: $(EXEC) $(EXEC): $(OBJS) - $(CC) $(CFLAGS) $(OBJS) -o $(EXEC) $(LDFLAGS) + $(CC) $(CFLAGS) $(OBJS) -o $(EXEC) $(LDFLAGS) $(LIBDEPS) $(GZIP) $(EXEC).8 > $(EXEC).8.gz $(GZIP) $(EXEC).conf.5 > $(EXEC).conf.5.gz @@ -25,8 +25,8 @@ install: $(INSTALL_PROGRAM) -d $(DESTDIR)$(udevrulesdir) $(INSTALL_PROGRAM) -m 644 11-dm-mpath.rules $(DESTDIR)$(udevrulesdir) $(INSTALL_PROGRAM) -m 644 $(EXEC).rules $(DESTDIR)$(libudevdir)/rules.d/56-multipath.rules - $(INSTALL_PROGRAM) -d $(DESTDIR)$(mandir) - $(INSTALL_PROGRAM) -m 644 $(EXEC).8.gz $(DESTDIR)$(mandir) + $(INSTALL_PROGRAM) -d $(DESTDIR)$(man8dir) + $(INSTALL_PROGRAM) -m 644 $(EXEC).8.gz $(DESTDIR)$(man8dir) $(INSTALL_PROGRAM) -d $(DESTDIR)$(man5dir) $(INSTALL_PROGRAM) -m 644 $(EXEC).conf.5.gz $(DESTDIR)$(man5dir) @@ -34,7 +34,7 @@ uninstall: $(RM) $(DESTDIR)$(bindir)/$(EXEC) $(RM) $(DESTDIR)$(udevrulesdir)/11-dm-mpath.rules $(RM) $(DESTDIR)$(libudevdir)/rules.d/56-multipath.rules - $(RM) $(DESTDIR)$(mandir)/$(EXEC).8.gz + $(RM) $(DESTDIR)$(man8dir)/$(EXEC).8.gz $(RM) $(DESTDIR)$(man5dir)/$(EXEC).conf.5.gz clean: diff --git a/multipath/main.c b/multipath/main.c index ee00fdb..06add30 100644 --- a/multipath/main.c +++ b/multipath/main.c @@ -521,6 +521,7 @@ main (int argc, char *argv[]) if (!conf) exit(1); multipath_conf = conf; + conf->retrigger_tries = 0; while ((arg = getopt(argc, argv, ":adchl::FfM:v:p:b:BritquwW")) != EOF ) { switch(arg) { case 1: printf("optarg : %s\n",optarg); diff --git a/multipath/multipath.8 b/multipath/multipath.8 index 966139e..f0b1ff0 100644 --- a/multipath/multipath.8 +++ b/multipath/multipath.8 @@ -1,7 +1,24 @@ -.TH MULTIPATH 8 "July 2006" "" "Linux Administrator's Manual" +.\" ---------------------------------------------------------------------------- +.\" Update the date below if you make any significant change. +.\" Make sure there are no errors with: +.\" groff -z -wall -b -e -t multipath/multipath.8 +.\" +.\" ---------------------------------------------------------------------------- +. +.TH MULTIPATH 8 2016-10-26 "Linux" +. +. +.\" ---------------------------------------------------------------------------- .SH NAME -multipath \- Device mapper target autoconfig +.\" ---------------------------------------------------------------------------- +. +multipath \- Device mapper target autoconfig. +. +. +.\" ---------------------------------------------------------------------------- .SH SYNOPSIS +.\" ---------------------------------------------------------------------------- +. .B multipath .RB [\| \-v\ \c .IR verbosity \|] @@ -10,124 +27,166 @@ multipath \- Device mapper target autoconfig .RB [\| \-d \|] .RB [\| \-h | \-l | \-ll | \-f | \-t | \-F | \-B | \-c | \-q | \|-r | \|-i | \-a | \|-u | \-w | \-W \|] .RB [\| \-p\ \c -.BR failover | multibus | group_by_serial | group_by_prio | group_by_node_name \|] +.IR failover | multibus | group_by_serial | group_by_prio | group_by_node_name \|] .RB [\| device \|] +. +. +.\" ---------------------------------------------------------------------------- .SH DESCRIPTION +.\" ---------------------------------------------------------------------------- +. .B multipath is used to detect and coalesce multiple paths to devices, for fail-over or performance reasons. +. +. +.\" ---------------------------------------------------------------------------- .SH OPTIONS +.\" ---------------------------------------------------------------------------- +. .TP -.B \-v " level" -verbosity, print all paths and multipaths +.BI \-v " level" +Verbosity, print all paths and multipaths: .RS 1.2i .TP 1.2i -.B 0 -no output +.I 0 +No output. .TP -.B 1 -print the created or updated multipath names only, for use to feed other tools like kpartx +.I 1 +Print the created or updated multipath names only, for use to feed other tools like kpartx. .TP -.B 2 + -print all info : detected paths, coalesced paths (ie multipaths) and device maps +.I 2 + +Print all info: detected paths, coalesced paths (ie multipaths) and device maps. .RE +. .TP .B \-h -print usage text +Print usage text. +. .TP .B \-d -dry run, do not create or update devmaps +Dry run, do not create or update devmaps. +. .TP .B \-l -show the current multipath topology from information fetched in sysfs and the device mapper +Show the current multipath topology from information fetched in sysfs and the device mapper. +. .TP .B \-ll -show the current multipath topology from all available information (sysfs, the device mapper, path checkers ...) +Show the current multipath topology from all available information (sysfs, the device mapper, path checkers ...). +. .TP .B \-f -flush a multipath device map specified as parameter, if unused +Flush a multipath device map specified as parameter, if unused. +. .TP .B \-F -flush all unused multipath device maps +Flush all unused multipath device maps. +. .TP .B \-t -print internal hardware table to stdout +Print internal hardware table to stdout. +. .TP .B \-r -force devmap reload +Force devmap reload. +. .TP .B \-i -ignore wwids file when processing devices +Ignore WWIDs file when processing devices. +. .TP .B \-B -treat the bindings file as read only +Treat the bindings file as read only. +. .TP -.B \-b " bindings_file" -set user_friendly_names bindings file location. The default is -/etc/multipath/bindings +.BI \-b " bindings_file" +Set user_friendly_names bindings file location. The default is +\fI/etc/multipath/bindings\fR. +. .TP .B \-c -check if a block device should be a path in a multipath device +Check if a block device should be a path in a multipath device. +. .TP .B \-q -allow device tables with queue_if_no_path when multipathd is not running +Allow device tables with \fIqueue_if_no_path\fR when multipathd is not running. +. .TP .B \-a -add the wwid for the specified device to the wwids file +Add the WWID for the specified device to the WWIDs file. +. .TP .B \-u -check if the device specified in the program environment should be +Check if the device specified in the program environment should be a path in a multipath device. +. .TP .B \-w -remove the wwid for the specified device from the wwids file +Remove the WWID for the specified device from the WWIDs file. +. .TP .B \-W -reset the wwids file to only include the current multipath devices +Reset the WWIDs file to only include the current multipath devices. +. .TP .BI \-p " policy" -force new maps to use the specified policy: +Force new maps to use the specified policy: .RS 1.2i .TP 1.2i -.B failover -1 path per priority group +.I failover +One path per priority group. .TP -.B multibus -all paths in 1 priority group +.I multibus +All paths in one priority group. .TP -.B group_by_serial -1 priority group per serial +.I group_by_serial +One priority group per serial number. .TP -.B group_by_prio -1 priority group per priority value. Priorities are determined by callout programs specified as a global, per-controller or per-multipath option in the configuration file +.I group_by_prio +One priority group per priority value. Priorities are determined by +callout programs specified as a global, per-controller or +per-multipath option in the configuration file. .TP -.B group_by_node_name -1 priority group per target node name. Target node names are fetched -in /sys/class/fc_transport/target*/node_name. +.I group_by_node_name +One priority group per target node name. Target node names are fetched +in \fI/sys/class/fc_transport/target*/node_name\fR. .TP .RE Existing maps are not modified. +. .TP .BI device -update only the devmap specified by +Update only the devmap specified by .IR device , which is either: .RS 1.2i .IP \[bu] -a devmap name +A devmap name. .IP \[bu] -a path associated with the desired devmap; the path may be in one of the following formats: +A path associated with the desired devmap; the path may be in one of the following formats: .RS 1.2i .IP \[bu] -.B /dev/sdb +.B /dev/sdX .IP \[bu] .B major:minor +. +. +.\" ---------------------------------------------------------------------------- .SH "SEE ALSO" +.\" ---------------------------------------------------------------------------- +. .BR multipathd (8), .BR multipath.conf (5), .BR kpartx (8), .BR udev (8), -.BR dmsetup (8) -.BR hotplug (8) +.BR dmsetup (8), +.BR hotplug (8). +. +. +.\" ---------------------------------------------------------------------------- .SH AUTHORS -.B multipath -was developed by Christophe Varoqui, and others. +.\" ---------------------------------------------------------------------------- +. +\fImultipath-tools\fR was developed by Christophe Varoqui +and others. +.\" EOF diff --git a/multipath/multipath.conf.5 b/multipath/multipath.conf.5 index 55fde22..b7d7e59 100644 --- a/multipath/multipath.conf.5 +++ b/multipath/multipath.conf.5 @@ -3,11 +3,11 @@ .\" Make sure there are no errors with: .\" groff -z -wall -b -e -t multipath/multipath.conf.5 .\" -.\" TODO: Look for XXX +.\" TODO: Look for XXX and ??? .\" .\" ---------------------------------------------------------------------------- . -.TH MULTIPATH.CONF 5 2016-08-07 "Linux" +.TH MULTIPATH.CONF 5 2016-10-18 "Linux" . . .\" ---------------------------------------------------------------------------- @@ -162,15 +162,19 @@ kernel multipath target. There are three selector algorithms: .RS .TP 12 .I "round-robin 0" -Loop through every path in the path group, sending the same amount of IO to -each. +Loop through every path in the path group, sending the same amount of I/O to +each. Some aspects of behavior can be controlled with the attributes: +\fIrr_min_io\fR, \fIrr_min_io_rq\fR and \fIrr_weight\fR. .TP +.\" XXX .I "queue-length 0" -Send the next bunch of IO down the path with the least amount of outstanding IO. +(Since ??? kernel) Choose the path for the next bunch of I/O based on the amount +of outstanding I/O to the path. .TP +.\" XXX .I "service-time 0" -Choose the path for the next bunch of IO based on the amount of outstanding IO -to the path and its relative throughput. +(Since ??? kernel) Choose the path for the next bunch of I/O based on the amount +of outstanding I/O to the path and its relative throughput. .TP Default value is: \fBservice-time 0\fR .RE @@ -241,7 +245,7 @@ generate the path priority. This prioritizer accepts the optional prio_arg .I emc (Hardware-dependent) Generate the path priority for DGC class arrays as CLARiiON CX/AX and -EMC VNX families. +EMC VNX and Unity families. .TP .I alua (Hardware-dependent) @@ -332,22 +336,31 @@ Default value is: \fB\fR .TP .B features Specify any device-mapper features to be used. Syntax is \fInum list\fR -where \fInum\fR is the number, between 0 and 6, of features in \fIlist\fR. +where \fInum\fR is the number, between 0 and 8, of features in \fIlist\fR. Possible values for the feature list are: .RS .TP 12 +.\" XXX .I queue_if_no_path -(Superseded by \fIno_path_retry\fR) Queue IO if no path is active. Identical to -the \fIno_path_retry\fR with \fIqueue\fR value. See KNOWN ISSUES. +(Superseded by \fIno_path_retry\fR) (Since ??? kernel) Queue I/O if no path is active. +Identical to the \fIno_path_retry\fR with \fIqueue\fR value. See KNOWN ISSUES. .TP .I no_partitions Disable automatic partitions generation via kpartx. .TP -.I pg_init_retries -Number of times to retry pg_init, it must be between 1 and 50. +.\" XXX +.I pg_init_retries +(Since ??? kernel) Number of times to retry pg_init, it must be between 1 and 50. .TP -.I pg_init_delay_msecs -Number of msecs before pg_init retry, it must be between 0 and 60000. +.\" XXX +.I pg_init_delay_msecs +(Since ??? kernel) Number of msecs before pg_init retry, it must be between 0 and 60000. +.TP +.\" XXX +.I queue_mode +(Since ??? kernel) Select the the queue_mode per multipath device. +Where can be \fIbio\fR, \fIrq\fR or \fImq\fR. Which corresponds to +bio-based, request_fn rq-based, and blk-mq rq-based respectively. .TP Default value is: \fB0\fR .RE @@ -369,7 +382,7 @@ Issue a \fITEST UNIT READY\fR command to the device. .I emc_clariion (Hardware-dependent) Query the DGC/EMC specific EVPD page 0xC0 to determine the path state -for CLARiiON CX/AX and EMC VNX arrays families. +for CLARiiON CX/AX and EMC VNX and Unity arrays families. .TP .I hp_sw (Hardware-dependent) @@ -409,6 +422,8 @@ Default value is: \fBmpath\fR .TP .B failback Tell multipathd how to manage path group failback. +To select \fIimmediate\fR or a \fIvalue\fR, it's mandatory that the device +has support for a working prioritizer. .RS .TP 12 .I immediate @@ -433,8 +448,9 @@ Default value is: \fBmanual\fR . .TP .B rr_min_io -The number of IO to route to a path before switching to the next in -the same path group. This is only for BIO based multipath. +Number of I/O requests to route to a path before switching to the next in the +same path group. This is only for \fIBlock I/O\fR(BIO) based multipath and +only apply to \fIround-robin\fR path_selector. .RS .TP Default value is: \fB1000\fR @@ -443,8 +459,9 @@ Default value is: \fB1000\fR . .TP .B rr_min_io_rq -The number of IO requests to route to a path before switching to the -next in the same path group. This is only for request based multipath. +Number of I/O requests to route to a path before switching to the next in the +same path group. This is only for \fIRequest\fR based multipath and +only apply to \fIround-robin\fR path_selector. .RS .TP Default value is: \fB1\fR @@ -467,11 +484,12 @@ Default value is: \fBmax\fR . .TP .B rr_weight -If set to \fIpriorities\fR the multipath configurator will assign -path weights as "path prio * rr_min_io". Possible values are +If set to \fIpriorities\fR the multipath configurator will assign path weights +as "path prio * rr_min_io". Possible values are .I priorities or .I uniform . +Only apply to \fIround-robin\fR path_selector. .RS .TP Default value is: \fBuniform\fR @@ -480,14 +498,21 @@ Default value is: \fBuniform\fR . .TP .B no_path_retry -Specify the number of retries until disable queueing, or +Specify what to do when all paths are down. Possible values are: +.RS +.TP 12 +.I value > 0 +number of retries until disable I/O queueing. +.TP .I fail -for immediate failure (no queueing), +for immediate failure (no I/O queueing). +.TP .I queue -for never stop queueing. If unset no queueing is attempted. See KNOWN ISSUES. -.RS +for never stop I/O queueing. Similar to \fIqueue_if_no_path\fR. .TP -Default value is: \fB\fR +See KNOWN ISSUES. +.TP +Default value is: \fBfail\fR .RE . . @@ -498,9 +523,9 @@ If set to , when multipathd stops, queueing will be turned off for all devices. This is useful for devices that set no_path_retry. If a machine is shut down while all paths to a device are down, it is possible to hang waiting -for IO to return from the device after multipathd has been stopped. Without +for I/O to return from the device after multipathd has been stopped. Without multipathd running, access to the paths cannot be restored, and the kernel -cannot be told to stop queueing IO. Setting queue_without_daemon to +cannot be told to stop queueing I/O. Setting queue_without_daemon to .I no , avoids this problem. .RS @@ -549,7 +574,7 @@ Default value is: \fBno\fR .TP .B fast_io_fail_tmo Specify the number of seconds the SCSI layer will wait after a problem has been -detected on a FC remote port before failing IO to devices on that remote port. +detected on a FC remote port before failing I/O to devices on that remote port. This should be smaller than dev_loss_tmo. Setting this to .I off will disable the timeout. @@ -817,6 +842,17 @@ Default value is: \fB30\fR .RE . . +.TP +.B skip_kpartx +If set to +.I yes +, kpartx will not automatically create partitions on the device. +.RS +.TP +The default is \fBno\fR +.RE +. +. .\" ---------------------------------------------------------------------------- .SH "blacklist section" .\" ---------------------------------------------------------------------------- @@ -831,6 +867,10 @@ The following keywords are recognized: .TP 17 .B devnode Regular expression of the device nodes to be excluded. +.RS +.TP +Default value is: \fB^(ram|raw|loop|fd|md|dm-|sr|scd|st|dcssblk)[0-9]\fR, \fB^(td|hd|vd)[a-z]\fR and \fB^nvme\fR +.RE .TP .B wwid The \fIWorld Wide Identification\fR of a device. @@ -870,7 +910,7 @@ The \fIWorld Wide Identification\fR of a device. Regular expression of the udev property to be whitelisted. .RS .TP -Default value is: \fB(ID_WWN|SCSI_IDENT_.*)\fR +Default value is: \fB(SCSI_IDENT_|ID_WWN)\fR .RE .TP .B device @@ -943,6 +983,8 @@ are taken from the \fIdefaults\fR or \fIdevices\fR section: .B delay_watch_checks .TP .B delay_wait_checks +.TP +.B skip_kpartx .RE .PD .LP @@ -984,7 +1026,8 @@ The following hardware handler are implemented: .TP 12 .I 1 emc (Hardware-dependent) -Hardware handler for DGC class arrays as CLARiiON CX/AX and EMC VNX families. +Hardware handler for DGC class arrays as CLARiiON CX/AX and EMC VNX and Unity +families. .TP .I 1 rdac (Hardware-dependent) @@ -1051,6 +1094,8 @@ section: .B delay_watch_checks .TP .B delay_wait_checks +.TP +.B skip_kpartx .RE .PD .LP @@ -1111,6 +1156,8 @@ the values are taken from the \fIdevices\fR or \fIdefaults\fR sections: .B delay_watch_checks .TP .B delay_wait_checks +.TP +.B skip_kpartx .RE .PD .LP diff --git a/multipathd/Makefile b/multipathd/Makefile index 03d7815..ae06f9e 100644 --- a/multipathd/Makefile +++ b/multipathd/Makefile @@ -6,18 +6,19 @@ include ../Makefile.inc #CFLAGS += -DLCKDBG #CFLAGS += -D_DEBUG_ #CFLAGS += -DLOGDBG -CFLAGS += -I$(multipathdir) -I$(mpathpersistdir) -I$(mpathcmddir) +CFLAGS += -I$(multipathdir) -I$(mpathpersistdir) -I$(mpathcmddir) \ + -I$(thirdpartydir) -LDFLAGS += -ludev -ldl -L$(multipathdir) -lmultipath -L$(mpathpersistdir) \ +LIBDEPS += -ludev -ldl -L$(multipathdir) -lmultipath -L$(mpathpersistdir) \ -lmpathpersist -L$(mpathcmddir) -lmpathcmd -lurcu -lpthread \ -ldevmapper -lreadline ifdef SYSTEMD CFLAGS += -DUSE_SYSTEMD=$(SYSTEMD) ifeq ($(shell test $(SYSTEMD) -gt 209 && echo 1), 1) - LDFLAGS += -lsystemd + LIBDEPS += -lsystemd else - LDFLAGS += -lsystemd-daemon + LIBDEPS += -lsystemd-daemon endif endif @@ -28,7 +29,7 @@ EXEC = multipathd all : $(EXEC) $(EXEC): $(OBJS) - $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) -o $(EXEC) + $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) -o $(EXEC) $(LIBDEPS) $(GZIP) $(EXEC).8 > $(EXEC).8.gz install: @@ -39,12 +40,12 @@ ifdef SYSTEMD $(INSTALL_PROGRAM) -m 644 $(EXEC).service $(DESTDIR)$(unitdir) $(INSTALL_PROGRAM) -m 644 $(EXEC).socket $(DESTDIR)$(unitdir) endif - $(INSTALL_PROGRAM) -d $(DESTDIR)$(mandir) - $(INSTALL_PROGRAM) -m 644 $(EXEC).8.gz $(DESTDIR)$(mandir) + $(INSTALL_PROGRAM) -d $(DESTDIR)$(man8dir) + $(INSTALL_PROGRAM) -m 644 $(EXEC).8.gz $(DESTDIR)$(man8dir) uninstall: $(RM) $(DESTDIR)$(bindir)/$(EXEC) - $(RM) $(DESTDIR)$(mandir)/$(EXEC).8.gz + $(RM) $(DESTDIR)$(man8dir)/$(EXEC).8.gz $(RM) $(DESTDIR)$(unitdir)/$(EXEC).service $(RM) $(DESTDIR)$(unitdir)/$(EXEC).socket diff --git a/multipathd/cli.c b/multipathd/cli.c index 9a19728..50161be 100644 --- a/multipathd/cli.c +++ b/multipathd/cli.c @@ -454,7 +454,6 @@ parse_cmd (char * cmd, char ** reply, int * len, void * data, int timeout ) struct handler * h; vector cmdvec = NULL; struct timespec tmo; - struct timeval now; r = get_cmdvec(cmd, &cmdvec); @@ -476,13 +475,13 @@ parse_cmd (char * cmd, char ** reply, int * len, void * data, int timeout ) /* * execute handler */ - if (gettimeofday(&now, NULL) == 0) { - tmo.tv_sec = now.tv_sec + timeout; - tmo.tv_nsec = now.tv_usec * 1000; + if (clock_gettime(CLOCK_MONOTONIC, &tmo) == 0) { + tmo.tv_sec += timeout; } else { tmo.tv_sec = 0; } if (h->locked) { + int locked = 0; struct vectors * vecs = (struct vectors *)data; pthread_cleanup_push(cleanup_lock, &vecs->lock); @@ -493,10 +492,11 @@ parse_cmd (char * cmd, char ** reply, int * len, void * data, int timeout ) r = 0; } if (r == 0) { + locked = 1; pthread_testcancel(); r = h->fn(cmdvec, reply, len, data); } - pthread_cleanup_pop(!r); + pthread_cleanup_pop(locked); } else r = h->fn(cmdvec, reply, len, data); free_keys(cmdvec); diff --git a/multipathd/cli_handlers.c b/multipathd/cli_handlers.c index 8ff4362..b0eeca6 100644 --- a/multipathd/cli_handlers.c +++ b/multipathd/cli_handlers.c @@ -498,9 +498,14 @@ show_maps (char ** r, int *len, struct vectors * vecs, char * style, c += snprint_multipath_header(c, reply + maxlen - c, style); - vector_foreach_slot(vecs->mpvec, mpp, i) + vector_foreach_slot(vecs->mpvec, mpp, i) { + if (update_multipath(vecs, mpp->alias, 0)) { + i--; + continue; + } c += snprint_multipath(c, reply + maxlen - c, style, mpp, pretty); + } again = ((c - reply) == (maxlen - 1)); @@ -997,6 +1002,8 @@ cli_disable_queueing(void *v, char **reply, int *len, void *data) return 1; } + if (mpp->nr_active == 0) + mpp->stat_map_failures++; mpp->retry_tick = -1; dm_queue_if_no_path(mpp->alias, 0); return 0; @@ -1011,6 +1018,8 @@ cli_disable_all_queueing(void *v, char **reply, int *len, void *data) condlog(2, "disable queueing (operator)"); vector_foreach_slot(vecs->mpvec, mpp, i) { + if (mpp->nr_active == 0) + mpp->stat_map_failures++; mpp->retry_tick = -1; dm_queue_if_no_path(mpp->alias, 0); } @@ -1078,19 +1087,21 @@ cli_resume(void * v, char ** reply, int * len, void * data) char * param = get_keyparam(v, MAP); int r; struct multipath * mpp; + uint16_t udev_flags; param = convert_dev(param, 0); mpp = find_mp_by_alias(vecs->mpvec, param); if (!mpp) return 1; + udev_flags = (mpp->skip_kpartx)? MPATH_UDEV_NO_KPARTX_FLAG : 0; if (mpp->wait_for_udev) { condlog(2, "%s: device not fully created, failing resume", mpp->alias); return 1; } - r = dm_simplecmd_noflush(DM_DEVICE_RESUME, param, 0); + r = dm_simplecmd_noflush(DM_DEVICE_RESUME, param, udev_flags); condlog(2, "%s: resume (operator)", param); diff --git a/multipathd/main.c b/multipathd/main.c index 001eb8c..aec89e7 100644 --- a/multipathd/main.c +++ b/multipathd/main.c @@ -24,6 +24,11 @@ #include #include +/* + * libmultipath + */ +#include "time-util.h" + /* * libcheckers */ @@ -79,6 +84,7 @@ int uxsock_timeout; #include "lock.h" #include "waiter.h" #include "wwids.h" +#include "../third-party/valgrind/drd.h" #define FILE_NAME_SIZE 256 #define CMDSIZE 160 @@ -106,7 +112,7 @@ int ignore_new_devs; enum daemon_status running_state = DAEMON_INIT; pid_t daemon_pid; pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER; -pthread_cond_t config_cond = PTHREAD_COND_INITIALIZER; +pthread_cond_t config_cond; /* * global copy of vecs for use in sig handlers @@ -193,7 +199,7 @@ int set_config_state(enum daemon_status state) if (running_state != DAEMON_IDLE) { struct timespec ts; - clock_gettime(CLOCK_REALTIME, &ts); + clock_gettime(CLOCK_MONOTONIC, &ts); ts.tv_sec += 1; rc = pthread_cond_timedwait(&config_cond, &config_lock, &ts); @@ -709,7 +715,10 @@ ev_add_path (struct path * pp, struct vectors * vecs) goto fail; /* leave path added to pathvec */ } mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid); - if (mpp && mpp->wait_for_udev) { + if (mpp && mpp->wait_for_udev && + (pathcount(mpp, PATH_UP) > 0 || + (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT))) { + /* if wait_for_udev is set and valid paths exist */ mpp->wait_for_udev = 2; orphan_path(pp, "waiting for create to complete"); return 0; @@ -883,6 +892,7 @@ ev_remove_path (struct path *pp, struct vectors * vecs) mpp->retry_tick = 0; mpp->no_path_retry = NO_PATH_RETRY_FAIL; mpp->flush_on_last_del = FLUSH_IN_PROGRESS; + mpp->stat_map_failures++; dm_queue_if_no_path(mpp->alias, 0); } if (!flush_map(mpp, vecs, 1)) { @@ -947,51 +957,61 @@ static int uev_update_path (struct uevent *uev, struct vectors * vecs) { int ro, retval = 0; + struct path * pp; + struct config *conf; + int disable_changed_wwids; + + conf = get_multipath_config(); + disable_changed_wwids = conf->disable_changed_wwids; + put_multipath_config(conf); ro = uevent_get_disk_ro(uev); - if (ro >= 0) { - struct path * pp; - struct multipath *mpp = NULL; + pthread_cleanup_push(cleanup_lock, &vecs->lock); + lock(&vecs->lock); + pthread_testcancel(); - condlog(2, "%s: update path write_protect to '%d' (uevent)", - uev->kernel, ro); - pthread_cleanup_push(cleanup_lock, &vecs->lock); - lock(&vecs->lock); - pthread_testcancel(); - /* - * pthread_mutex_lock() and pthread_mutex_unlock() - * need to be at the same indentation level, hence - * this slightly convoluted codepath. - */ - pp = find_path_by_dev(vecs->pathvec, uev->kernel); - if (pp) { - if (pp->initialized == INIT_REQUESTED_UDEV) { - retval = 2; - } else { - mpp = pp->mpp; - if (mpp && mpp->wait_for_udev) { - mpp->wait_for_udev = 2; - mpp = NULL; - retval = 0; + pp = find_path_by_dev(vecs->pathvec, uev->kernel); + if (pp) { + struct multipath *mpp = pp->mpp; + + if (disable_changed_wwids && + (strlen(pp->wwid) || pp->wwid_changed)) { + char wwid[WWID_SIZE]; + + strcpy(wwid, pp->wwid); + get_uid(pp, pp->state, uev->udev); + if (strcmp(wwid, pp->wwid) != 0) { + condlog(0, "%s: path wwid changed from '%s' to '%s'. disallowing", uev->kernel, wwid, pp->wwid); + strcpy(pp->wwid, wwid); + if (!pp->wwid_changed) { + pp->wwid_changed = 1; + pp->tick = 1; + dm_fail_path(pp->mpp->alias, pp->dev_t); } - } - if (mpp) { - retval = reload_map(vecs, mpp, 0, 1); + goto out; + } else + pp->wwid_changed = 0; + } + if (pp->initialized == INIT_REQUESTED_UDEV) + retval = uev_add_path(uev, vecs); + else if (mpp && ro >= 0) { + condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro); + + if (mpp->wait_for_udev) + mpp->wait_for_udev = 2; + else { + retval = reload_map(vecs, mpp, 0, 1); condlog(2, "%s: map %s reloaded (retval %d)", uev->kernel, mpp->alias, retval); } } - lock_cleanup_pop(vecs->lock); - if (!pp) { - condlog(0, "%s: spurious uevent, path not found", - uev->kernel); - return 1; - } - if (retval == 2) - return uev_add_path(uev, vecs); } +out: + lock_cleanup_pop(vecs->lock); + if (!pp) + condlog(0, "%s: spurious uevent, path not found", uev->kernel); return retval; } @@ -1007,7 +1027,7 @@ map_discovery (struct vectors * vecs) vector_foreach_slot (vecs->mpvec, mpp, i) if (setup_multipath(vecs, mpp)) - return 1; + i--; return 0; } @@ -1391,6 +1411,7 @@ retry_count_tick(vector mpvec) mpp->stat_total_queueing_time++; condlog(4, "%s: Retrying.. No active path", mpp->alias); if(--mpp->retry_tick == 0) { + mpp->stat_map_failures++; dm_queue_if_no_path(mpp->alias, 0); condlog(2, "%s: Disable queueing", mpp->alias); } @@ -1448,8 +1469,7 @@ void repair_path(struct path * pp) return; checker_repair(&pp->checker); - if (strlen(checker_message(&pp->checker))) - LOG_MSG(1, checker_message(&pp->checker)); + LOG_MSG(1, checker_message(&pp->checker)); } /* @@ -1515,6 +1535,12 @@ check_path (struct vectors * vecs, struct path * pp, int ticks) } else checker_clear_message(&pp->checker); + if (pp->wwid_changed) { + condlog(2, "%s: path wwid has changed. Refusing to use", + pp->dev); + newstate = PATH_DOWN; + } + if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) { condlog(2, "%s: unusable path", pp->dev); conf = get_multipath_config(); @@ -1561,7 +1587,7 @@ check_path (struct vectors * vecs, struct path * pp, int ticks) if ((newstate == PATH_UP || newstate == PATH_GHOST) && pp->wait_checks > 0) { - if (pp->mpp && pp->mpp->nr_active > 0) { + if (pp->mpp->nr_active > 0) { pp->state = PATH_DELAYED; pp->wait_checks--; return 1; @@ -1584,8 +1610,7 @@ check_path (struct vectors * vecs, struct path * pp, int ticks) int oldstate = pp->state; pp->state = newstate; - if (strlen(checker_message(&pp->checker))) - LOG_MSG(1, checker_message(&pp->checker)); + LOG_MSG(1, checker_message(&pp->checker)); /* * upon state change, reset the checkint @@ -1595,7 +1620,7 @@ check_path (struct vectors * vecs, struct path * pp, int ticks) pp->checkint = conf->checkint; put_multipath_config(conf); - if (newstate == PATH_DOWN || newstate == PATH_SHAKY) { + if (newstate == PATH_DOWN || newstate == PATH_SHAKY || newstate == PATH_TIMEOUT) { /* * proactively fail path in the DM */ @@ -1697,8 +1722,7 @@ check_path (struct vectors * vecs, struct path * pp, int ticks) pp->tick = pp->checkint; } } - else if (newstate == PATH_DOWN && - strlen(checker_message(&pp->checker))) { + else if (newstate == PATH_DOWN) { int log_checker_err; conf = get_multipath_config(); @@ -1736,6 +1760,19 @@ check_path (struct vectors * vecs, struct path * pp, int ticks) return 1; } +static void init_path_check_interval(struct vectors *vecs) +{ + struct config *conf; + struct path *pp; + unsigned int i; + + vector_foreach_slot (vecs->pathvec, pp, i) { + conf = get_multipath_config(); + pp->checkint = conf->checkint; + put_multipath_config(conf); + } +} + static void * checkerloop (void *ap) { @@ -1744,7 +1781,7 @@ checkerloop (void *ap) int count = 0; unsigned int i; struct itimerval timer_tick_it; - struct timeval last_time; + struct timespec last_time; struct config *conf; pthread_cleanup_push(rcu_unregister, NULL); @@ -1753,34 +1790,24 @@ checkerloop (void *ap) vecs = (struct vectors *)ap; condlog(2, "path checkers start up"); - /* - * init the path check interval - */ - vector_foreach_slot (vecs->pathvec, pp, i) { - conf = get_multipath_config(); - pp->checkint = conf->checkint; - put_multipath_config(conf); - } - /* Tweak start time for initial path check */ - if (gettimeofday(&last_time, NULL) != 0) + if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0) last_time.tv_sec = 0; else last_time.tv_sec -= 1; while (1) { - struct timeval diff_time, start_time, end_time; + struct timespec diff_time, start_time, end_time; int num_paths = 0, ticks = 0, signo, strict_timing, rc = 0; sigset_t mask; - if (gettimeofday(&start_time, NULL) != 0) + if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0) start_time.tv_sec = 0; if (start_time.tv_sec && last_time.tv_sec) { - timersub(&start_time, &last_time, &diff_time); + timespecsub(&start_time, &last_time, &diff_time); condlog(4, "tick (%lu.%06lu secs)", - diff_time.tv_sec, diff_time.tv_usec); - last_time.tv_sec = start_time.tv_sec; - last_time.tv_usec = start_time.tv_usec; + diff_time.tv_sec, diff_time.tv_nsec / 1000); + last_time = start_time; ticks = diff_time.tv_sec; } else { ticks = 1; @@ -1795,30 +1822,29 @@ checkerloop (void *ap) condlog(4, "timeout waiting for DAEMON_IDLE"); continue; } - if (vecs->pathvec) { - pthread_cleanup_push(cleanup_lock, &vecs->lock); - lock(&vecs->lock); - pthread_testcancel(); - vector_foreach_slot (vecs->pathvec, pp, i) { - rc = check_path(vecs, pp, ticks); - if (rc < 0) { - vector_del_slot(vecs->pathvec, i); - free_path(pp); - i--; - } else - num_paths += rc; - } - lock_cleanup_pop(vecs->lock); - } - if (vecs->mpvec) { - pthread_cleanup_push(cleanup_lock, &vecs->lock); - lock(&vecs->lock); - pthread_testcancel(); - defered_failback_tick(vecs->mpvec); - retry_count_tick(vecs->mpvec); - missing_uev_wait_tick(vecs); - lock_cleanup_pop(vecs->lock); + + pthread_cleanup_push(cleanup_lock, &vecs->lock); + lock(&vecs->lock); + pthread_testcancel(); + vector_foreach_slot (vecs->pathvec, pp, i) { + rc = check_path(vecs, pp, ticks); + if (rc < 0) { + vector_del_slot(vecs->pathvec, i); + free_path(pp); + i--; + } else + num_paths += rc; } + lock_cleanup_pop(vecs->lock); + + pthread_cleanup_push(cleanup_lock, &vecs->lock); + lock(&vecs->lock); + pthread_testcancel(); + defered_failback_tick(vecs->mpvec); + retry_count_tick(vecs->mpvec); + missing_uev_wait_tick(vecs); + lock_cleanup_pop(vecs->lock); + if (count) count--; else { @@ -1831,16 +1857,17 @@ checkerloop (void *ap) lock_cleanup_pop(vecs->lock); } - diff_time.tv_usec = 0; + diff_time.tv_nsec = 0; if (start_time.tv_sec && - gettimeofday(&end_time, NULL) == 0) { - timersub(&end_time, &start_time, &diff_time); + clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) { + timespecsub(&end_time, &start_time, &diff_time); if (num_paths) { unsigned int max_checkint; condlog(3, "checked %d path%s in %lu.%06lu secs", num_paths, num_paths > 1 ? "s" : "", - diff_time.tv_sec, diff_time.tv_usec); + diff_time.tv_sec, + diff_time.tv_nsec / 1000); conf = get_multipath_config(); max_checkint = conf->max_checkint; put_multipath_config(conf); @@ -1861,10 +1888,10 @@ checkerloop (void *ap) else { timer_tick_it.it_interval.tv_sec = 0; timer_tick_it.it_interval.tv_usec = 0; - if (diff_time.tv_usec) { + if (diff_time.tv_nsec) { timer_tick_it.it_value.tv_sec = 0; timer_tick_it.it_value.tv_usec = - (unsigned long)1000000 - diff_time.tv_usec; + 1000UL * 1000 * 1000 - diff_time.tv_nsec; } else { timer_tick_it.it_value.tv_sec = 1; timer_tick_it.it_value.tv_usec = 0; @@ -1899,21 +1926,29 @@ configure (struct vectors * vecs, int start_waiters) int i, ret; struct config *conf; - if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) + if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) { + condlog(0, "couldn't allocate path vec in configure"); return 1; + } - if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) + if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) { + condlog(0, "couldn't allocate multipath vec in configure"); return 1; + } - if (!(mpvec = vector_alloc())) + if (!(mpvec = vector_alloc())) { + condlog(0, "couldn't allocate new maps vec in configure"); return 1; + } /* * probe for current path (from sysfs) and map (from dm) sets */ ret = path_discovery(vecs->pathvec, DI_ALL); - if (ret < 0) + if (ret < 0) { + condlog(0, "configure failed at path discovery"); return 1; + } vector_foreach_slot (vecs->pathvec, pp, i){ conf = get_multipath_config(); @@ -1926,21 +1961,27 @@ configure (struct vectors * vecs, int start_waiters) pp->checkint = conf->checkint; put_multipath_config(conf); } - if (map_discovery(vecs)) + if (map_discovery(vecs)) { + condlog(0, "configure failed at map discovery"); return 1; + } /* * create new set of maps & push changed ones into dm */ - if (coalesce_paths(vecs, mpvec, NULL, 1, CMD_NONE)) + if (coalesce_paths(vecs, mpvec, NULL, 1, CMD_NONE)) { + condlog(0, "configure failed while coalescing paths"); return 1; + } /* * may need to remove some maps which are no longer relevant * e.g., due to blacklist changes in conf file */ - if (coalesce_maps(vecs, mpvec)) + if (coalesce_maps(vecs, mpvec)) { + condlog(0, "configure failed while coalescing maps"); return 1; + } dm_lib_release(); @@ -1965,11 +2006,16 @@ configure (struct vectors * vecs, int start_waiters) * start dm event waiter threads for these new maps */ vector_foreach_slot(vecs->mpvec, mpp, i) { - if (setup_multipath(vecs, mpp)) - return 1; - if (start_waiters) - if (start_waiter_thread(mpp, vecs)) - return 1; + if (setup_multipath(vecs, mpp)) { + i--; + continue; + } + if (start_waiters) { + if (start_waiter_thread(mpp, vecs)) { + remove_map(mpp, vecs, 1); + i--; + } + } } return 0; } @@ -2120,18 +2166,12 @@ sigusr2 (int sig) static void signal_init(void) { - sigset_t set; - - sigemptyset(&set); - sigaddset(&set, SIGPIPE); - pthread_sigmask(SIG_SETMASK, &set, NULL); - signal_set(SIGHUP, sighup); signal_set(SIGUSR1, sigusr1); signal_set(SIGUSR2, sigusr2); signal_set(SIGINT, sigend); signal_set(SIGTERM, sigend); - signal(SIGPIPE, SIG_IGN); + signal_set(SIGPIPE, sigend); } static void @@ -2217,8 +2257,8 @@ child (void * param) signal_init(); rcu_init(); - setup_thread_attr(&misc_attr, 64 * 1024, 1); - setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 1); + setup_thread_attr(&misc_attr, 64 * 1024, 0); + setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0); setup_thread_attr(&waiter_attr, 32 * 1024, 1); if (logsink == 1) { @@ -2327,6 +2367,8 @@ child (void * param) */ post_config_state(DAEMON_CONFIGURE); + init_path_check_interval(vecs); + /* * Start uevent listener early to catch events */ @@ -2516,6 +2558,13 @@ main (int argc, char *argv[]) int foreground = 0; struct config *conf; + ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf), + "Manipulated through RCU"); + ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state), + "Suppress complaints about unprotected running_state reads"); + ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout), + "Suppress complaints about this scalar variable"); + logsink = 1; if (getuid() != 0) { @@ -2529,6 +2578,8 @@ main (int argc, char *argv[]) strerror(errno)); umask(umask(077) | 022); + pthread_cond_init_mono(&config_cond); + udev = udev_new(); while ((arg = getopt(argc, argv, ":dsv:k::Bn")) != EOF ) { @@ -2555,6 +2606,7 @@ main (int argc, char *argv[]) exit(1); if (verbosity) conf->verbosity = verbosity; + uxsock_timeout = conf->uxsock_timeout; uxclnt(optarg, uxsock_timeout + 100); exit(0); case 'B': @@ -2579,6 +2631,7 @@ main (int argc, char *argv[]) exit(1); if (verbosity) conf->verbosity = verbosity; + uxsock_timeout = conf->uxsock_timeout; memset(cmd, 0x0, CMDSIZE); while (optind < argc) { if (strchr(argv[optind], ' ')) diff --git a/multipathd/multipathd.8 b/multipathd/multipathd.8 index 96a8fdb..4c765af 100644 --- a/multipathd/multipathd.8 +++ b/multipathd/multipathd.8 @@ -1,218 +1,307 @@ -.TH MULTIPATHD 8 "November 2009" "Linux Administrator's Manual" +.\" ---------------------------------------------------------------------------- +.\" Update the date below if you make any significant change. +.\" Make sure there are no errors with: +.\" groff -z -wall -b -e -t multipathd/multipathd.8 +.\" +.\" ---------------------------------------------------------------------------- +. +.TH MULTIPATHD 8 2016-10-27 Linux +. +. +.\" ---------------------------------------------------------------------------- .SH NAME -multipathd \- multipath daemon - +.\" ---------------------------------------------------------------------------- +. +multipathd \- Multipath daemon. +. +. +.\" ---------------------------------------------------------------------------- .SH SYNOPSIS +.\" ---------------------------------------------------------------------------- +. .B multipathd -.RB [\| options \|] - +.RB [\| \-d | \-k \|] +.RB [\| \-s \|] +.RB [\| \-v\ \c +.IR verbosity \|] +.RB [\| \-B \|] +.RB [\| \-n \|] +. +. +.\" ---------------------------------------------------------------------------- .SH DESCRIPTION -The -.B multipathd -daemon is in charge of checking for failed paths. When this happens, -it will reconfigure the multipath map the path belongs to, so that this map -regains its maximum performance and redundancy. +.\" ---------------------------------------------------------------------------- +. +The \fBmultipathd\fR daemon is in charge of checking for failed paths. When this +happens, it will reconfigure the multipath map the path belongs to, so that this +map regains its maximum performance and redundancy. -This daemon executes the external multipath config tool when events occur. +This daemon executes the external \fBmultipath\fR tool when events occur. In turn, the multipath tool signals the multipathd daemon when it is done with devmap reconfiguration, so that it can refresh its failed path list. - +. +. +.\" ---------------------------------------------------------------------------- .SH OPTIONS +.\" ---------------------------------------------------------------------------- +. .TP .B \-d Foreground Mode. Don't daemonize, and print all messages to stdout and stderr. +. .TP .B \-s Suppress timestamps. Do not prefix logging messages with a timestamp. +. .TP -.B -v "level" -Verbosity level. Print additional information while running multipathd. A level of 0 means only print errors. A level of 3 or greater prints debugging information as well. +.BI \-v " level" +Verbosity level. Print additional information while running multipathd. A level +of 0 means only print errors. A level of 3 or greater prints debugging information +as well. +. .TP -.B -B -Read-only bindings file. Multipathd will not write to the user_friendly_names -bindings file. If a user_friendly_name doesn't already exist for a device, it +.B \-B +Read-only bindings file. multipathd will not write to the \fIuser_friendly_names\fR +bindings file. If a \fIuser_friendly_name\fR doesn't already exist for a device, it will use its WWID as its alias. -.TP -.B -k -multipathd will enter interactive mode. From this mode, the available commands can be viewed by entering "help". When you are finished entering commands, press CTRL-D to quit. -.TP -.B -n -ignore new devices. Multipathd will not create a multipath device unless the -wwid for the device is already listed in the wwids file. - +. +.TP +.B \-k +multipathd will enter interactive mode. From this mode, the available commands can +be viewed by entering '\fIhelp\fR'. When you are finished entering commands, press +\fBCTRL-D\fR to quit. +. +.TP +.B \-n +Ignore new devices. multipathd will not create a multipath device unless the +WWID for the device is already listed in the WWIDs file. +. +. +.\" ---------------------------------------------------------------------------- .SH COMMANDS +.\" ---------------------------------------------------------------------------- +. .TP The following commands can be used in interactive mode: +. .TP .B list|show paths Show the paths that multipathd is monitoring, and their state. +. .TP .B list|show paths format $format Show the paths that multipathd is monitoring, using a format string with path format wildcards. +. .TP .B list|show maps|multipaths Show the multipath devices that the multipathd is monitoring. +. .TP .B list|show maps|multipaths format $format Show the status of all multipath devices that the multipathd is monitoring, using a format string with multipath format wildcards. +. .TP .B list|show maps|multipaths status Show the status of all multipath devices that the multipathd is monitoring. +. .TP .B list|show maps|multipaths stats Show some statistics of all multipath devices that the multipathd is monitoring. +. .TP .B list|show maps|multipaths topology -Show the current multipath topology. Same as "multipath \-ll". +Show the current multipath topology. Same as '\fImultipath \-ll\fR'. +. .TP .B list|show topology -Show the current multipath topology. Same as "multipath \-ll". +Show the current multipath topology. Same as '\fImultipath \-ll\fR'. +. .TP .B list|show map|multipath $map topology -Show topology of a single multipath device specified by $map, e.g. 36005076303ffc56200000000000010aa. -This map could be obtained from "list maps". +Show topology of a single multipath device specified by $map, for example +36005076303ffc56200000000000010aa. This map could be obtained from +'\fIlist maps\fR'. +. .TP .B list|show wildcards -Show the format wildcards used in interactive commands taking $format +Show the format wildcards used in interactive commands taking $format. +. .TP .B list|show config -Show the currently used configuration, derived from default values and values specified within the configuration file /etc/multipath.conf. +Show the currently used configuration, derived from default values and values +specified within the configuration file \fI/etc/multipath.conf\fR. +. .TP .B list|show blacklist -Show the currently used blacklist rules, derived from default values and values specified within the configuration file /etc/multipath.conf. +Show the currently used blacklist rules, derived from default values and values +specified within the configuration file \fI/etc/multipath.conf\fR. +. .TP .B list|show devices -Show all available block devices by name including the information if they are blacklisted or not. +Show all available block devices by name including the information if they are +blacklisted or not. +. .TP .B list|show status -Show the number of path checkers in each possible state, the number of monitored paths, and whether multipathd is currently handling a uevent. +Show the number of path checkers in each possible state, the number of monitored +paths, and whether multipathd is currently handling a uevent. +. .TP .B list|show daemon -Show the current state of the multipathd daemon +Show the current state of the multipathd daemon. +. .TP .B add path $path Add a path to the list of monitored paths. $path is as listed in /sys/block (e.g. sda). +. .TP .B remove|del path $path Stop monitoring a path. $path is as listed in /sys/block (e.g. sda). +. .TP .B add map|multipath $map -Add a multipath device to the list of monitored devices. $map can either be a device-mapper device as listed in /sys/block (e.g. dm-0) or it can be the alias for the multipath device (e.g. mpath1) or the uid of the multipath device (e.g. 36005076303ffc56200000000000010aa). +Add a multipath device to the list of monitored devices. $map can either be a +device-mapper device as listed in /sys/block (e.g. dm-0) or it can be the alias +for the multipath device (e.g. mpath1) or the uid of the multipath device +(e.g. 36005076303ffc56200000000000010aa). +. .TP .B remove|del map|multipath $map Stop monitoring a multipath device. +. .TP .B resize map|multipath $map -Resizes map $map to the given size +Resizes map $map to the given size. +. .TP .B switch|switchgroup map|multipath $map group $group -Force a multipath device to switch to a specific path group. $group is the path group index, starting with 1. +Force a multipath device to switch to a specific path group. $group is the path +group index, starting with 1. +. .TP .B reconfigure -Reconfigures the multipaths. This should be triggered automatically after any hotplug event. +Reconfigures the multipaths. This should be triggered automatically after anyi +hotplug event. +. .TP .B suspend map|multipath $map Sets map $map into suspend state. +. .TP .B resume map|multipath $map Resumes map $map from suspend state. +. .TP .B reset map|multipath $map Reassign existing device-mapper table(s) use use the multipath device, instead of its path devices. +. .TP .B reload map|multipath $map Reload a multipath device. +. .TP .B fail path $path Sets path $path into failed state. +. .TP .B reinstate path $path Resumes path $path from failed state. +. .TP .B disablequeueing maps|multipaths Disable queueing on all multipath devices. +. .TP .B restorequeueing maps|multipaths Restore queueing on all multipath devices. +. .TP .B disablequeueing map|multipath $map -Disable queuing on multipathed map $map +Disable queuing on multipathed map $map. +. .TP .B restorequeueing map|multipath $map -Restore queuing on multipahted map $map +Restore queuing on multipahted map $map. +. .TP .B forcequeueing daemon Forces multipathd into queue_without_daemon mode, so that no_path_retry queueing -will not be disabled when the daemon stops +will not be disabled when the daemon stops. +. .TP .B restorequeueing daemon -Restores configured queue_without_daemon mode +Restores configured queue_without_daemon mode. +. .TP .B map|multipath $map setprstatus -Enable persistent reservation management on $map +Enable persistent reservation management on $map. +. .TP .B map|multipath $map unsetprstatus -Disable persistent reservation management on $map +Disable persistent reservation management on $map. +. .TP .B map|multipath $map getprstatus -Get the current persistent reservation management status of $map +Get the current persistent reservation management status of $map. +. .TP .B quit|exit End interactive session. +. .TP .B shutdown Stop multipathd. - +. +. +.\" ---------------------------------------------------------------------------- .SH "SYSTEMD INTEGRATION" -When compiled with systemd support two systemd service files are -installed, -.I multipathd.service -and -.I multipathd.socket -The -.I multipathd.socket -service instructs systemd to intercept the CLI command socket, so -that any call to the CLI interface will start-up the daemon if -required. -The -.I multipathd.service -file carries the definitions for controlling the multipath daemon. -The daemon itself uses the -.B sd_notify(3) -interface to communicate with systemd. The following unit keywords are -recognized: -.TP -.I WatchdogSec= +.\" ---------------------------------------------------------------------------- +. +When compiled with systemd support two systemd service files are installed, +\fImultipathd.service\fR and \fImultipathd.socket\fR The \fImultipathd.socket\fR +service instructs systemd to intercept the CLI command socket, so that any call +to the CLI interface will start-up the daemon if required. +The \fImultipathd.service\fR file carries the definitions for controlling the +multipath daemon. The daemon itself uses the \fBsd_notify\fR(3) interface to +communicate with systemd. The following unit keywords are recognized: +. +.TP +.B WatchdogSec= Enables the internal watchdog from systemd. multipath will send a -notification via -.B sd_notify(3) -to systemd to reset the watchdog. If specified the -.I polling_interval -and -.I max_polling_interval -settings will be overridden by the watchdog settings. - +notification via \fBsd_notify\fR(3) to systemd to reset the watchdog. If +specified the \fIpolling_interval\fR and \fImax_polling_interval\fR settings +will be overridden by the watchdog settings. Please note that systemd prior to version 207 has issues which prevent the systemd-provided watchdog from working correctly. So the watchdog is not enabled per default, but has to be enabled manually by updating -the multipathd.service file. -.TP -.I OOMScoreAdjust= -Overrides the internal OOM adjust mechanism -.TP -.I LimitNOFILE= -Overrides the -.I max_fds -configuration setting. - +the \fImultipathd.service\fR file. +. +.TP +.B OOMScoreAdjust= +Overrides the internal OOM adjust mechanism. +. +.TP +.B LimitNOFILE= +Overrides the \fImax_fds\fR configuration setting. +. +. +.\" ---------------------------------------------------------------------------- .SH "SEE ALSO" -.BR multipath (8) -.BR kpartx (8) -.BR sd_notify (3) -.BR system.service (5) -.SH "AUTHORS" -.B multipathd -was developed by Christophe Varoqui, and others. +.\" ---------------------------------------------------------------------------- +. +.BR multipath (8), +.BR kpartx (8), +.BR sd_notify (3), +.BR system.service (5). +. +. +.\" ---------------------------------------------------------------------------- +.SH AUTHORS +.\" ---------------------------------------------------------------------------- +. +\fImultipath-tools\fR was developed by Christophe Varoqui +and others. +.\" EOF diff --git a/multipathd/uxlsnr.c b/multipathd/uxlsnr.c index 7a9faf3..dfef03e 100644 --- a/multipathd/uxlsnr.c +++ b/multipathd/uxlsnr.c @@ -32,6 +32,7 @@ #include "defaults.h" #include "config.h" #include "mpath_cmd.h" +#include "time-util.h" #include "main.h" #include "cli.h" @@ -99,21 +100,22 @@ void free_polls (void) FREE(polls); } -void check_timeout(struct timeval start_time, char *inbuf, +void check_timeout(struct timespec start_time, char *inbuf, unsigned int timeout) { - struct timeval diff_time, end_time; + struct timespec diff_time, end_time; - if (start_time.tv_sec && gettimeofday(&end_time, NULL) == 0) { - timersub(&end_time, &start_time, &diff_time); + if (start_time.tv_sec && + clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) { unsigned long msecs; + timespecsub(&end_time, &start_time, &diff_time); msecs = diff_time.tv_sec * 1000 + - diff_time.tv_usec / 1000; + diff_time.tv_nsec / (1000 * 1000); if (msecs > timeout) condlog(2, "cli cmd '%s' timeout reached " "after %lu.%06lu secs", inbuf, - diff_time.tv_sec, diff_time.tv_usec); + diff_time.tv_sec, diff_time.tv_nsec / 1000); } } @@ -180,7 +182,7 @@ void * uxsock_listen(uxsock_trigger_fn uxsock_trigger, void * trigger_data) pthread_mutex_unlock(&client_lock); condlog(0, "%s: failed to realloc %d poll fds", "uxsock", 1 + num_clients); - pthread_yield(); + sched_yield(); continue; } old_clients = num_clients; @@ -220,7 +222,7 @@ void * uxsock_listen(uxsock_trigger_fn uxsock_trigger, void * trigger_data) /* see if a client wants to speak to us */ for (i = 1; i < num_clients + 1; i++) { if (polls[i].revents & POLLIN) { - struct timeval start_time; + struct timespec start_time; c = NULL; pthread_mutex_lock(&client_lock); @@ -236,7 +238,8 @@ void * uxsock_listen(uxsock_trigger_fn uxsock_trigger, void * trigger_data) i, polls[i].fd); continue; } - if (gettimeofday(&start_time, NULL) != 0) + if (clock_gettime(CLOCK_MONOTONIC, &start_time) + != 0) start_time.tv_sec = 0; if (recv_packet(c->fd, &inbuf, uxsock_timeout) != 0) { diff --git a/third-party/valgrind/drd.h b/third-party/valgrind/drd.h new file mode 100644 index 0000000..4615e5b --- /dev/null +++ b/third-party/valgrind/drd.h @@ -0,0 +1,571 @@ +/* + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (drd.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of DRD, a Valgrind tool for verification of + multithreaded programs. + + Copyright (C) 2006-2015 Bart Van Assche . + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (drd.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + +#ifndef __VALGRIND_DRD_H +#define __VALGRIND_DRD_H + + +#include "valgrind.h" + + +/** Obtain the thread ID assigned by Valgrind's core. */ +#define DRD_GET_VALGRIND_THREADID \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID, \ + 0, 0, 0, 0, 0) + +/** Obtain the thread ID assigned by DRD. */ +#define DRD_GET_DRD_THREADID \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__DRD_GET_DRD_THREAD_ID, \ + 0, 0, 0, 0, 0) + + +/** Tell DRD not to complain about data races for the specified variable. */ +#define DRD_IGNORE_VAR(x) ANNOTATE_BENIGN_RACE_SIZED(&(x), sizeof(x), "") + +/** Tell DRD to no longer ignore data races for the specified variable. */ +#define DRD_STOP_IGNORING_VAR(x) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_FINISH_SUPPRESSION, \ + &(x), sizeof(x), 0, 0, 0) + +/** + * Tell DRD to trace all memory accesses for the specified variable + * until the memory that was allocated for the variable is freed. + */ +#define DRD_TRACE_VAR(x) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_START_TRACE_ADDR, \ + &(x), sizeof(x), 0, 0, 0) + +/** + * Tell DRD to stop tracing memory accesses for the specified variable. + */ +#define DRD_STOP_TRACING_VAR(x) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_STOP_TRACE_ADDR, \ + &(x), sizeof(x), 0, 0, 0) + +/** + * @defgroup RaceDetectionAnnotations Data race detection annotations. + * + * @see See also the source file producer-consumer. + */ +#define ANNOTATE_PCQ_CREATE(pcq) do { } while(0) + +/** Tell DRD that a FIFO queue has been destroyed. */ +#define ANNOTATE_PCQ_DESTROY(pcq) do { } while(0) + +/** + * Tell DRD that an element has been added to the FIFO queue at address pcq. + */ +#define ANNOTATE_PCQ_PUT(pcq) do { } while(0) + +/** + * Tell DRD that an element has been removed from the FIFO queue at address pcq, + * and that DRD should insert a happens-before relationship between the memory + * accesses that occurred before the corresponding ANNOTATE_PCQ_PUT(pcq) + * annotation and the memory accesses after this annotation. Correspondence + * between PUT and GET annotations happens in FIFO order. Since locking + * of the queue is needed anyway to add elements to or to remove elements from + * the queue, for DRD all four FIFO annotations are defined as no-ops. + */ +#define ANNOTATE_PCQ_GET(pcq) do { } while(0) + +/** + * Tell DRD that data races at the specified address are expected and must not + * be reported. + */ +#define ANNOTATE_BENIGN_RACE(addr, descr) \ + ANNOTATE_BENIGN_RACE_SIZED(addr, sizeof(*addr), descr) + +/* Same as ANNOTATE_BENIGN_RACE(addr, descr), but applies to + the memory range [addr, addr + size). */ +#define ANNOTATE_BENIGN_RACE_SIZED(addr, size, descr) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_START_SUPPRESSION, \ + addr, size, 0, 0, 0) + +/** Tell DRD to ignore all reads performed by the current thread. */ +#define ANNOTATE_IGNORE_READS_BEGIN() \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_LOADS, \ + 0, 0, 0, 0, 0); + + +/** Tell DRD to no longer ignore the reads performed by the current thread. */ +#define ANNOTATE_IGNORE_READS_END() \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_LOADS, \ + 1, 0, 0, 0, 0); + +/** Tell DRD to ignore all writes performed by the current thread. */ +#define ANNOTATE_IGNORE_WRITES_BEGIN() \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_STORES, \ + 0, 0, 0, 0, 0) + +/** Tell DRD to no longer ignore the writes performed by the current thread. */ +#define ANNOTATE_IGNORE_WRITES_END() \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_STORES, \ + 1, 0, 0, 0, 0) + +/** Tell DRD to ignore all memory accesses performed by the current thread. */ +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { ANNOTATE_IGNORE_READS_BEGIN(); ANNOTATE_IGNORE_WRITES_BEGIN(); } while(0) + +/** + * Tell DRD to no longer ignore the memory accesses performed by the current + * thread. + */ +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { ANNOTATE_IGNORE_READS_END(); ANNOTATE_IGNORE_WRITES_END(); } while(0) + +/** + * Tell DRD that size bytes starting at addr has been allocated by a custom + * memory allocator. + */ +#define ANNOTATE_NEW_MEMORY(addr, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_CLEAN_MEMORY, \ + addr, size, 0, 0, 0) + +/** Ask DRD to report every access to the specified address. */ +#define ANNOTATE_TRACE_MEMORY(addr) DRD_TRACE_VAR(*(char*)(addr)) + +/** + * Tell DRD to assign the specified name to the current thread. This name will + * be used in error messages printed by DRD. + */ +#define ANNOTATE_THREAD_NAME(name) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_SET_THREAD_NAME, \ + name, 0, 0, 0, 0) + +/*@}*/ + + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE ORDER OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end. +*/ +enum { + /* Ask the DRD tool to discard all information about memory accesses */ + /* and client objects for the specified range. This client request is */ + /* binary compatible with the similarly named Helgrind client request. */ + VG_USERREQ__DRD_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'), + /* args: Addr, SizeT. */ + + /* Ask the DRD tool the thread ID assigned by Valgrind. */ + VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID = VG_USERREQ_TOOL_BASE('D','R'), + /* args: none. */ + /* Ask the DRD tool the thread ID assigned by DRD. */ + VG_USERREQ__DRD_GET_DRD_THREAD_ID, + /* args: none. */ + + /* To tell the DRD tool to suppress data race detection on the */ + /* specified address range. */ + VG_USERREQ__DRD_START_SUPPRESSION, + /* args: start address, size in bytes */ + /* To tell the DRD tool no longer to suppress data race detection on */ + /* the specified address range. */ + VG_USERREQ__DRD_FINISH_SUPPRESSION, + /* args: start address, size in bytes */ + + /* To ask the DRD tool to trace all accesses to the specified range. */ + VG_USERREQ__DRD_START_TRACE_ADDR, + /* args: Addr, SizeT. */ + /* To ask the DRD tool to stop tracing accesses to the specified range. */ + VG_USERREQ__DRD_STOP_TRACE_ADDR, + /* args: Addr, SizeT. */ + + /* Tell DRD whether or not to record memory loads in the calling thread. */ + VG_USERREQ__DRD_RECORD_LOADS, + /* args: Bool. */ + /* Tell DRD whether or not to record memory stores in the calling thread. */ + VG_USERREQ__DRD_RECORD_STORES, + /* args: Bool. */ + + /* Set the name of the thread that performs this client request. */ + VG_USERREQ__DRD_SET_THREAD_NAME, + /* args: null-terminated character string. */ + + /* Tell DRD that a DRD annotation has not yet been implemented. */ + VG_USERREQ__DRD_ANNOTATION_UNIMP, + /* args: char*. */ + + /* Tell DRD that a user-defined semaphore synchronization object + * is about to be created. */ + VG_USERREQ__DRD_ANNOTATE_SEM_INIT_PRE, + /* args: Addr, UInt value. */ + /* Tell DRD that a user-defined semaphore synchronization object + * has been destroyed. */ + VG_USERREQ__DRD_ANNOTATE_SEM_DESTROY_POST, + /* args: Addr. */ + /* Tell DRD that a user-defined semaphore synchronization + * object is going to be acquired (semaphore wait). */ + VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_PRE, + /* args: Addr. */ + /* Tell DRD that a user-defined semaphore synchronization + * object has been acquired (semaphore wait). */ + VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_POST, + /* args: Addr. */ + /* Tell DRD that a user-defined semaphore synchronization + * object is about to be released (semaphore post). */ + VG_USERREQ__DRD_ANNOTATE_SEM_POST_PRE, + /* args: Addr. */ + + /* Tell DRD to ignore the inter-thread ordering introduced by a mutex. */ + VG_USERREQ__DRD_IGNORE_MUTEX_ORDERING, + /* args: Addr. */ + + /* Tell DRD that a user-defined reader-writer synchronization object + * has been created. */ + VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE + = VG_USERREQ_TOOL_BASE('H','G') + 256 + 14, + /* args: Addr. */ + /* Tell DRD that a user-defined reader-writer synchronization object + * is about to be destroyed. */ + VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY + = VG_USERREQ_TOOL_BASE('H','G') + 256 + 15, + /* args: Addr. */ + /* Tell DRD that a lock on a user-defined reader-writer synchronization + * object has been acquired. */ + VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED + = VG_USERREQ_TOOL_BASE('H','G') + 256 + 17, + /* args: Addr, Int is_rw. */ + /* Tell DRD that a lock on a user-defined reader-writer synchronization + * object is about to be released. */ + VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED + = VG_USERREQ_TOOL_BASE('H','G') + 256 + 18, + /* args: Addr, Int is_rw. */ + + /* Tell DRD that a Helgrind annotation has not yet been implemented. */ + VG_USERREQ__HELGRIND_ANNOTATION_UNIMP + = VG_USERREQ_TOOL_BASE('H','G') + 256 + 32, + /* args: char*. */ + + /* Tell DRD to insert a happens-before annotation. */ + VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE + = VG_USERREQ_TOOL_BASE('H','G') + 256 + 33, + /* args: Addr. */ + /* Tell DRD to insert a happens-after annotation. */ + VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER + = VG_USERREQ_TOOL_BASE('H','G') + 256 + 34, + /* args: Addr. */ + +}; + + +/** + * @addtogroup RaceDetectionAnnotations + */ +/*@{*/ + +#ifdef __cplusplus +/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racy reads. + + Instead of doing + ANNOTATE_IGNORE_READS_BEGIN(); + ... = x; + ANNOTATE_IGNORE_READS_END(); + one can use + ... = ANNOTATE_UNPROTECTED_READ(x); */ +template +inline T ANNOTATE_UNPROTECTED_READ(const volatile T& x) { + ANNOTATE_IGNORE_READS_BEGIN(); + const T result = x; + ANNOTATE_IGNORE_READS_END(); + return result; +} +/* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + static class static_var##_annotator \ + { \ + public: \ + static_var##_annotator() \ + { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ + #static_var ": " description); \ + } \ + } the_##static_var##_annotator; \ + } +#endif + +/*@}*/ + +#endif /* __VALGRIND_DRD_H */ diff --git a/third-party/valgrind/valgrind.h b/third-party/valgrind/valgrind.h new file mode 100644 index 0000000..4b8ef75 --- /dev/null +++ b/third-party/valgrind/valgrind.h @@ -0,0 +1,7126 @@ +/* -*- c -*- + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (valgrind.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of Valgrind, a dynamic binary instrumentation + framework. + + Copyright (C) 2000-2015 Julian Seward. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (valgrind.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + + +/* This file is for inclusion into client (your!) code. + + You can use these macros to manipulate and query Valgrind's + execution inside your own programs. + + The resulting executables will still run without Valgrind, just a + little bit more slowly than they otherwise would, but otherwise + unchanged. When not running on valgrind, each client request + consumes very few (eg. 7) instructions, so the resulting performance + loss is negligible unless you plan to execute client requests + millions of times per second. Nevertheless, if that is still a + problem, you can compile with the NVALGRIND symbol defined (gcc + -DNVALGRIND) so that client requests are not even compiled in. */ + +#ifndef __VALGRIND_H +#define __VALGRIND_H + + +/* ------------------------------------------------------------------ */ +/* VERSION NUMBER OF VALGRIND */ +/* ------------------------------------------------------------------ */ + +/* Specify Valgrind's version number, so that user code can + conditionally compile based on our version number. Note that these + were introduced at version 3.6 and so do not exist in version 3.5 + or earlier. The recommended way to use them to check for "version + X.Y or later" is (eg) + +#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \ + && (__VALGRIND_MAJOR__ > 3 \ + || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) +*/ +#define __VALGRIND_MAJOR__ 3 +#define __VALGRIND_MINOR__ 11 + + +#include + +/* Nb: this file might be included in a file compiled with -ansi. So + we can't use C++ style "//" comments nor the "asm" keyword (instead + use "__asm__"). */ + +/* Derive some tags indicating what the target platform is. Note + that in this file we're using the compiler's CPP symbols for + identifying architectures, which are different to the ones we use + within the rest of Valgrind. Note, __powerpc__ is active for both + 32 and 64-bit PPC, whereas __powerpc64__ is only active for the + latter (on Linux, that is). + + Misc note: how to find out what's predefined in gcc by default: + gcc -Wp,-dM somefile.c +*/ +#undef PLAT_x86_darwin +#undef PLAT_amd64_darwin +#undef PLAT_x86_win32 +#undef PLAT_amd64_win64 +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64be_linux +#undef PLAT_ppc64le_linux +#undef PLAT_arm_linux +#undef PLAT_arm64_linux +#undef PLAT_s390x_linux +#undef PLAT_mips32_linux +#undef PLAT_mips64_linux +#undef PLAT_tilegx_linux +#undef PLAT_x86_solaris +#undef PLAT_amd64_solaris + + +#if defined(__APPLE__) && defined(__i386__) +# define PLAT_x86_darwin 1 +#elif defined(__APPLE__) && defined(__x86_64__) +# define PLAT_amd64_darwin 1 +#elif (defined(__MINGW32__) && !defined(__MINGW64__)) \ + || defined(__CYGWIN32__) \ + || (defined(_WIN32) && defined(_M_IX86)) +# define PLAT_x86_win32 1 +#elif defined(__MINGW64__) \ + || (defined(_WIN64) && defined(_M_X64)) +# define PLAT_amd64_win64 1 +#elif defined(__linux__) && defined(__i386__) +# define PLAT_x86_linux 1 +#elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__) +# define PLAT_amd64_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__) +# define PLAT_ppc32_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2 +/* Big Endian uses ELF version 1 */ +# define PLAT_ppc64be_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2 +/* Little Endian uses ELF version 2 */ +# define PLAT_ppc64le_linux 1 +#elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__) +# define PLAT_arm_linux 1 +#elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__) +# define PLAT_arm64_linux 1 +#elif defined(__linux__) && defined(__s390__) && defined(__s390x__) +# define PLAT_s390x_linux 1 +#elif defined(__linux__) && defined(__mips__) && (__mips==64) +# define PLAT_mips64_linux 1 +#elif defined(__linux__) && defined(__mips__) && (__mips!=64) +# define PLAT_mips32_linux 1 +#elif defined(__linux__) && defined(__tilegx__) +# define PLAT_tilegx_linux 1 +#elif defined(__sun) && defined(__i386__) +# define PLAT_x86_solaris 1 +#elif defined(__sun) && defined(__x86_64__) +# define PLAT_amd64_solaris 1 +#else +/* If we're not compiling for our target platform, don't generate + any inline asms. */ +# if !defined(NVALGRIND) +# define NVALGRIND 1 +# endif +#endif + + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */ +/* in here of use to end-users -- skip to the next section. */ +/* ------------------------------------------------------------------ */ + +/* + * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client + * request. Accepts both pointers and integers as arguments. + * + * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind + * client request that does not return a value. + + * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind + * client request and whose value equals the client request result. Accepts + * both pointers and integers as arguments. Note that such calls are not + * necessarily pure functions -- they may have side effects. + */ + +#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \ + _zzq_request, _zzq_arg1, _zzq_arg2, \ + _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \ + (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ + (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) + +#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \ + _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ + (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) + +#if defined(NVALGRIND) + +/* Define NVALGRIND to completely remove the Valgrind magic sequence + from the compiled code (analogous to NDEBUG's effects on + assert()) */ +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + (_zzq_default) + +#else /* ! NVALGRIND */ + +/* The following defines the magic code sequences which the JITter + spots and handles magically. Don't look too closely at them as + they will rot your brain. + + The assembly code sequences for all architectures is in this one + file. This is because this file must be stand-alone, and we don't + want to have multiple files. + + For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default + value gets put in the return slot, so that everything works when + this is executed not under Valgrind. Args are passed in a memory + block, and so there's no intrinsic limit to the number that could + be passed, but it's currently five. + + The macro args are: + _zzq_rlval result lvalue + _zzq_default default value (result returned when running on real CPU) + _zzq_request request code + _zzq_arg1..5 request params + + The other two macros are used to support function wrapping, and are + a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the + guest's NRADDR pseudo-register and whatever other information is + needed to safely run the call original from the wrapper: on + ppc64-linux, the R2 value at the divert point is also needed. This + information is abstracted into a user-visible type, OrigFn. + + VALGRIND_CALL_NOREDIR_* behaves the same as the following on the + guest, but guarantees that the branch instruction will not be + redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64: + branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a + complete inline asm, since it needs to be combined with more magic + inline asm stuff to be useful. +*/ + +/* ----------------- x86-{linux,darwin,solaris} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ + || (defined(PLAT_x86_win32) && defined(__GNUC__)) \ + || defined(PLAT_x86_solaris) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "roll $3, %%edi ; roll $13, %%edi\n\t" \ + "roll $29, %%edi ; roll $19, %%edi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EDX = client_request ( %EAX ) */ \ + "xchgl %%ebx,%%ebx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EAX = guest_NRADDR */ \ + "xchgl %%ecx,%%ecx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_EAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%EAX */ \ + "xchgl %%edx,%%edx\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "xchgl %%edi,%%edi\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) + || PLAT_x86_solaris */ + +/* ------------------------- x86-Win32 ------------------------- */ + +#if defined(PLAT_x86_win32) && !defined(__GNUC__) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#if defined(_MSC_VER) + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + __asm rol edi, 3 __asm rol edi, 13 \ + __asm rol edi, 29 __asm rol edi, 19 + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \ + (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \ + (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \ + (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5)) + +static __inline uintptr_t +valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request, + uintptr_t _zzq_arg1, uintptr_t _zzq_arg2, + uintptr_t _zzq_arg3, uintptr_t _zzq_arg4, + uintptr_t _zzq_arg5) +{ + volatile uintptr_t _zzq_args[6]; + volatile unsigned int _zzq_result; + _zzq_args[0] = (uintptr_t)(_zzq_request); + _zzq_args[1] = (uintptr_t)(_zzq_arg1); + _zzq_args[2] = (uintptr_t)(_zzq_arg2); + _zzq_args[3] = (uintptr_t)(_zzq_arg3); + _zzq_args[4] = (uintptr_t)(_zzq_arg4); + _zzq_args[5] = (uintptr_t)(_zzq_arg5); + __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default + __SPECIAL_INSTRUCTION_PREAMBLE + /* %EDX = client_request ( %EAX ) */ + __asm xchg ebx,ebx + __asm mov _zzq_result, edx + } + return _zzq_result; +} + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EAX = guest_NRADDR */ \ + __asm xchg ecx,ecx \ + __asm mov __addr, eax \ + } \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_EAX ERROR + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ + __asm xchg edi,edi \ + } \ + } while (0) + +#else +#error Unsupported compiler. +#endif + +#endif /* PLAT_x86_win32 */ + +/* ----------------- amd64-{linux,darwin,solaris} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ + || defined(PLAT_amd64_solaris) \ + || (defined(PLAT_amd64_win64) && defined(__GNUC__)) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ + "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RDX = client_request ( %RAX ) */ \ + "xchgq %%rbx,%%rbx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RAX = guest_NRADDR */ \ + "xchgq %%rcx,%%rcx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_RAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%RAX */ \ + "xchgq %%rdx,%%rdx\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "xchgq %%rdi,%%rdi\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ + +/* ------------------------- amd64-Win64 ------------------------- */ + +#if defined(PLAT_amd64_win64) && !defined(__GNUC__) + +#error Unsupported compiler. + +#endif /* PLAT_amd64_win64 */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \ + "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned int _zzq_args[6]; \ + unsigned int _zzq_result; \ + unsigned int* _zzq_ptr; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64be_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + unsigned long int r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned long int _zzq_args[6]; \ + unsigned long int _zzq_result; \ + unsigned long int* _zzq_ptr; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc64be_linux */ + +#if defined(PLAT_ppc64le_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + unsigned long int r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned long int _zzq_args[6]; \ + unsigned long int _zzq_result; \ + unsigned long int* _zzq_ptr; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R12 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc64le_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \ + "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("mov r3, %1\n\t" /*default*/ \ + "mov r4, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = client_request ( R4 ) */ \ + "orr r10, r10, r10\n\t" \ + "mov %0, r3" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "cc","memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = guest_NRADDR */ \ + "orr r11, r11, r11\n\t" \ + "mov %0, r3" \ + : "=r" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R4 */ \ + "orr r12, r12, r12\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "orr r9, r9, r9\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_arm_linux */ + +/* ------------------------ arm64-linux ------------------------- */ + +#if defined(PLAT_arm64_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \ + "ror x12, x12, #51 ; ror x12, x12, #61 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile("mov x3, %1\n\t" /*default*/ \ + "mov x4, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* X3 = client_request ( X4 ) */ \ + "orr x10, x10, x10\n\t" \ + "mov %0, x3" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" ((unsigned long int)(_zzq_default)), \ + "r" (&_zzq_args[0]) \ + : "cc","memory", "x3", "x4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* X3 = guest_NRADDR */ \ + "orr x11, x11, x11\n\t" \ + "mov %0, x3" \ + : "=r" (__addr) \ + : \ + : "cc", "memory", "x3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir X8 */ \ + "orr x12, x12, x12\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "orr x9, x9, x9\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_arm64_linux */ + +/* ------------------------ s390x-linux ------------------------ */ + +#if defined(PLAT_s390x_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + } + OrigFn; + +/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific + * code. This detection is implemented in platform specific toIR.c + * (e.g. VEX/priv/guest_s390_decoder.c). + */ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "lr 15,15\n\t" \ + "lr 1,1\n\t" \ + "lr 2,2\n\t" \ + "lr 3,3\n\t" + +#define __CLIENT_REQUEST_CODE "lr 2,2\n\t" +#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t" +#define __CALL_NO_REDIR_CODE "lr 4,4\n\t" +#define __VEX_INJECT_IR_CODE "lr 5,5\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile(/* r2 = args */ \ + "lgr 2,%1\n\t" \ + /* r3 = default */ \ + "lgr 3,%2\n\t" \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + __CLIENT_REQUEST_CODE \ + /* results = r3 */ \ + "lgr %0, 3\n\t" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "2", "3", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + __GET_NR_CONTEXT_CODE \ + "lgr %0, 3\n\t" \ + : "=a" (__addr) \ + : \ + : "cc", "3", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_R1 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + __CALL_NO_REDIR_CODE + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + __VEX_INJECT_IR_CODE); \ + } while (0) + +#endif /* PLAT_s390x_linux */ + +/* ------------------------- mips32-linux ---------------- */ + +#if defined(PLAT_mips32_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +/* .word 0x342 + * .word 0x742 + * .word 0xC2 + * .word 0x4C2*/ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "srl $0, $0, 13\n\t" \ + "srl $0, $0, 29\n\t" \ + "srl $0, $0, 3\n\t" \ + "srl $0, $0, 19\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("move $11, %1\n\t" /*default*/ \ + "move $12, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* T3 = client_request ( T4 ) */ \ + "or $13, $13, $13\n\t" \ + "move %0, $11\n\t" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$11", "$12"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %t9 = guest_NRADDR */ \ + "or $14, $14, $14\n\t" \ + "move %0, $11" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$11" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%t9 */ \ + "or $15, $15, $15\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or $11, $11, $11\n\t" \ + ); \ + } while (0) + + +#endif /* PLAT_mips32_linux */ + +/* ------------------------- mips64-linux ---------------- */ + +#if defined(PLAT_mips64_linux) + +typedef + struct { + unsigned long nraddr; /* where's the code? */ + } + OrigFn; + +/* dsll $0,$0, 3 + * dsll $0,$0, 13 + * dsll $0,$0, 29 + * dsll $0,$0, 19*/ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \ + "dsll $0,$0,29 ; dsll $0,$0,19\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile("move $11, %1\n\t" /*default*/ \ + "move $12, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* $11 = client_request ( $12 ) */ \ + "or $13, $13, $13\n\t" \ + "move %0, $11\n\t" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$11", "$12"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* $11 = guest_NRADDR */ \ + "or $14, $14, $14\n\t" \ + "move %0, $11" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$11"); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir $25 */ \ + "or $15, $15, $15\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or $11, $11, $11\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_mips64_linux */ + +/* ------------------------ tilegx-linux --------------- */ +#if defined(PLAT_tilegx_linux) + +typedef + struct { + unsigned long long int nraddr; /* where's the code? */ + } + OrigFn; +/*** special instruction sequence. + 0:02b3c7ff91234fff { moveli zero, 4660 ; moveli zero, 22136 } + 8:0091a7ff95678fff { moveli zero, 22136 ; moveli zero, 4660 } +****/ + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + ".quad 0x02b3c7ff91234fff\n" \ + ".quad 0x0091a7ff95678fff\n" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + ({ volatile unsigned long long int _zzq_args[6]; \ + volatile unsigned long long int _zzq_result; \ + _zzq_args[0] = (unsigned long long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ + __asm__ volatile("move r11, %1\n\t" /*default*/ \ + "move r12, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* r11 = client_request */ \ + "or r13, r13, r13\n\t" \ + "move %0, r11\n\t" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "memory", "r11", "r12"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* r11 = guest_NRADDR */ \ + "or r14, r14, r14\n" \ + "move %0, r11\n" \ + : "=r" (__addr) \ + : \ + : "memory", "r11" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_R12 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + "or r15, r15, r15\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or r11, r11, r11\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_tilegx_linux */ + +/* Insert assembly code for other platforms here... */ + +#endif /* NVALGRIND */ + + +/* ------------------------------------------------------------------ */ +/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */ +/* ugly. It's the least-worst tradeoff I can think of. */ +/* ------------------------------------------------------------------ */ + +/* This section defines magic (a.k.a appalling-hack) macros for doing + guaranteed-no-redirection macros, so as to get from function + wrappers to the functions they are wrapping. The whole point is to + construct standard call sequences, but to do the call itself with a + special no-redirect call pseudo-instruction that the JIT + understands and handles specially. This section is long and + repetitious, and I can't see a way to make it shorter. + + The naming scheme is as follows: + + CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc} + + 'W' stands for "word" and 'v' for "void". Hence there are + different macros for calling arity 0, 1, 2, 3, 4, etc, functions, + and for each, the possibility of returning a word-typed result, or + no result. +*/ + +/* Use these to write the name of your wrapper. NOTE: duplicates + VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts + the default behaviour equivalance class tag "0000" into the name. + See pub_tool_redir.h for details -- normally you don't need to + think about this, though. */ + +/* Use an extra level of macroisation so as to ensure the soname/fnname + args are fully macro-expanded before pasting them together. */ +#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd + +#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \ + VG_CONCAT4(_vgw00000ZU_,soname,_,fnname) + +#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \ + VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname) + +/* Use this macro from within a wrapper function to collect the + context (address and possibly other info) of the original function. + Once you have that you can then use it in one of the CALL_FN_ + macros. The type of the argument _lval is OrigFn. */ +#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval) + +/* Also provide end-user facilities for function replacement, rather + than wrapping. A replacement function differs from a wrapper in + that it has no way to get hold of the original function being + called, and hence no way to call onwards to it. In a replacement + function, VALGRIND_GET_ORIG_FN always returns zero. */ + +#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \ + VG_CONCAT4(_vgr00000ZU_,soname,_,fnname) + +#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \ + VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname) + +/* Derivatives of the main macros below, for calling functions + returning void. */ + +#define CALL_FN_v_v(fnptr) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_v(_junk,fnptr); } while (0) + +#define CALL_FN_v_W(fnptr, arg1) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_W(_junk,fnptr,arg1); } while (0) + +#define CALL_FN_v_WW(fnptr, arg1,arg2) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0) + +#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0) + +#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0) + +#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0) + +#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0) + +#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0) + +/* ----------------- x86-{linux,darwin,solaris} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ + || defined(PLAT_x86_solaris) + +/* These regs are trashed by the hidden call. No need to mention eax + as gcc can already see that, plus causes gcc to bomb. */ +#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "movl %%esp,%%edi\n\t" \ + "andl $0xfffffff0,%%esp\n\t" +#define VALGRIND_RESTORE_STACK \ + "movl %%edi,%%esp\n\t" + +/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 48(%%eax)\n\t" \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */ + +/* ---------------- amd64-{linux,darwin,solaris} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ + || defined(PLAT_amd64_solaris) + +/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \ + "rdi", "r8", "r9", "r10", "r11" + +/* This is all pretty complex. It's so as to make stack unwinding + work reliably. See bug 243270. The basic problem is the sub and + add of 128 of %rsp in all of the following macros. If gcc believes + the CFA is in %rsp, then unwinding may fail, because what's at the + CFA is not what gcc "expected" when it constructs the CFIs for the + places where the macros are instantiated. + + But we can't just add a CFI annotation to increase the CFA offset + by 128, to match the sub of 128 from %rsp, because we don't know + whether gcc has chosen %rsp as the CFA at that point, or whether it + has chosen some other register (eg, %rbp). In the latter case, + adding a CFI annotation to change the CFA offset is simply wrong. + + So the solution is to get hold of the CFA using + __builtin_dwarf_cfa(), put it in a known register, and add a + CFI annotation to say what the register is. We choose %rbp for + this (perhaps perversely), because: + + (1) %rbp is already subject to unwinding. If a new register was + chosen then the unwinder would have to unwind it in all stack + traces, which is expensive, and + + (2) %rbp is already subject to precise exception updates in the + JIT. If a new register was chosen, we'd have to have precise + exceptions for it too, which reduces performance of the + generated code. + + However .. one extra complication. We can't just whack the result + of __builtin_dwarf_cfa() into %rbp and then add %rbp to the + list of trashed registers at the end of the inline assembly + fragments; gcc won't allow %rbp to appear in that list. Hence + instead we need to stash %rbp in %r15 for the duration of the asm, + and say that %r15 is trashed instead. gcc seems happy to go with + that. + + Oh .. and this all needs to be conditionalised so that it is + unchanged from before this commit, when compiled with older gccs + that don't support __builtin_dwarf_cfa. Furthermore, since + this header file is freestanding, it has to be independent of + config.h, and so the following conditionalisation cannot depend on + configure time checks. + + Although it's not clear from + 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)', + this expression excludes Darwin. + .cfi directives in Darwin assembly appear to be completely + different and I haven't investigated how they work. + + For even more entertainment value, note we have to use the + completely undocumented __builtin_dwarf_cfa(), which appears to + really compute the CFA, whereas __builtin_frame_address(0) claims + to but actually doesn't. See + https://bugs.kde.org/show_bug.cgi?id=243270#c47 +*/ +#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) +# define __FRAME_POINTER \ + ,"r"(__builtin_dwarf_cfa()) +# define VALGRIND_CFI_PROLOGUE \ + "movq %%rbp, %%r15\n\t" \ + "movq %2, %%rbp\n\t" \ + ".cfi_remember_state\n\t" \ + ".cfi_def_cfa rbp, 0\n\t" +# define VALGRIND_CFI_EPILOGUE \ + "movq %%r15, %%rbp\n\t" \ + ".cfi_restore_state\n\t" +#else +# define __FRAME_POINTER +# define VALGRIND_CFI_PROLOGUE +# define VALGRIND_CFI_EPILOGUE +#endif + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "movq %%rsp,%%r14\n\t" \ + "andq $0xfffffffffffffff0,%%rsp\n\t" +#define VALGRIND_RESTORE_STACK \ + "movq %%r14,%%rsp\n\t" + +/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned + long) == 8. */ + +/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_ + macros. In order not to trash the stack redzone, we need to drop + %rsp by 128 before the hidden call, and restore afterwards. The + nastyness is that it is only by luck that the stack still appears + to be unwindable during the hidden call - since then the behaviour + of any routine using this macro does not match what the CFI data + says. Sigh. + + Why is this important? Imagine that a wrapper has a stack + allocated local, and passes to the hidden call, a pointer to it. + Because gcc does not know about the hidden call, it may allocate + that local in the redzone. Unfortunately the hidden call may then + trash it before it comes to use it. So we must step clear of the + redzone, for the duration of the hidden call, to make it safe. + + Probably the same problem afflicts the other redzone-style ABIs too + (ppc64-linux); but for those, the stack is + self describing (none of this CFI nonsense) so at least messing + with the stack pointer doesn't give a danger of non-unwindable + stack. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 96(%%rax)\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +/* This is useful for finding out about the on-stack stuff: + + extern int f9 ( int,int,int,int,int,int,int,int,int ); + extern int f10 ( int,int,int,int,int,int,int,int,int,int ); + extern int f11 ( int,int,int,int,int,int,int,int,int,int,int ); + extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int ); + + int g9 ( void ) { + return f9(11,22,33,44,55,66,77,88,99); + } + int g10 ( void ) { + return f10(11,22,33,44,55,66,77,88,99,110); + } + int g11 ( void ) { + return f11(11,22,33,44,55,66,77,88,99,110,121); + } + int g12 ( void ) { + return f12(11,22,33,44,55,66,77,88,99,110,121,132); + } +*/ + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rlwinm 1,1,0,0,27\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc32-linux, + sizeof(unsigned long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + _argvec[12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg12 */ \ + "lwz 3,48(11)\n\t" \ + "stw 3,20(1)\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64be_linux) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rldicr 1,1,0,59\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+0]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+1]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+2]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+3]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+4]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+5]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+6]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+7]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+8]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+9]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+10]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+11]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+12]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + _argvec[2+12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg12 */ \ + "ld 3,96(11)\n\t" \ + "std 3,136(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64be_linux */ + +/* ------------------------- ppc64le-linux ----------------------- */ +#if defined(PLAT_ppc64le_linux) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rldicr 1,1,0,59\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+0]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+1]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+2]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+3]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+4]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+5]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+6]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+7]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+8]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+9]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+10]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+11]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg11 */ \ + "ld 3,88(12)\n\t" \ + "std 3,112(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+12]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + _argvec[2+12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg12 */ \ + "ld 3,96(12)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(12)\n\t" \ + "std 3,112(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64le_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +/* This is a bit tricky. We store the original stack pointer in r10 + as it is callee-saves. gcc doesn't allow the use of r11 for some + reason. Also, we can't directly "bic" the stack pointer in thumb + mode since r13 isn't an allowed register number in that context. + So use r4 as a temporary, since that is about to get trashed + anyway, just after each use of this macro. Side effect is we need + to be very careful about any future changes, since + VALGRIND_ALIGN_STACK simply assumes r4 is usable. */ +#define VALGRIND_ALIGN_STACK \ + "mov r10, sp\n\t" \ + "mov r4, sp\n\t" \ + "bic r4, r4, #7\n\t" \ + "mov sp, r4\n\t" +#define VALGRIND_RESTORE_STACK \ + "mov sp, r10\n\t" + +/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "push {r0, r1, r2, r3} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #40] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "ldr r2, [%1, #48] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_arm_linux */ + +/* ------------------------ arm64-linux ------------------------ */ + +#if defined(PLAT_arm64_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \ + "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \ + "x18", "x19", "x20", "x30", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \ + "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \ + "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \ + "v26", "v27", "v28", "v29", "v30", "v31" + +/* x21 is callee-saved, so we can use it to save and restore SP around + the hidden call. */ +#define VALGRIND_ALIGN_STACK \ + "mov x21, sp\n\t" \ + "bic sp, x21, #15\n\t" +#define VALGRIND_RESTORE_STACK \ + "mov sp, x21\n\t" + +/* These CALL_FN_ macros assume that on arm64-linux, + sizeof(unsigned long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x20 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x20 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x30 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1, #88] \n\t" \ + "str x8, [sp, #16] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11, \ + arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x30 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1, #88] \n\t" \ + "str x8, [sp, #16] \n\t" \ + "ldr x8, [%1, #96] \n\t" \ + "str x8, [sp, #24] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_arm64_linux */ + +/* ------------------------- s390x-linux ------------------------- */ + +#if defined(PLAT_s390x_linux) + +/* Similar workaround as amd64 (see above), but we use r11 as frame + pointer and save the old r11 in r7. r11 might be used for + argvec, therefore we copy argvec in r1 since r1 is clobbered + after the call anyway. */ +#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) +# define __FRAME_POINTER \ + ,"d"(__builtin_dwarf_cfa()) +# define VALGRIND_CFI_PROLOGUE \ + ".cfi_remember_state\n\t" \ + "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \ + "lgr 7,11\n\t" \ + "lgr 11,%2\n\t" \ + ".cfi_def_cfa r11, 0\n\t" +# define VALGRIND_CFI_EPILOGUE \ + "lgr 11, 7\n\t" \ + ".cfi_restore_state\n\t" +#else +# define __FRAME_POINTER +# define VALGRIND_CFI_PROLOGUE \ + "lgr 1,%1\n\t" +# define VALGRIND_CFI_EPILOGUE +#endif + +/* Nb: On s390 the stack pointer is properly aligned *at all times* + according to the s390 GCC maintainer. (The ABI specification is not + precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and + VALGRIND_RESTORE_STACK are not defined here. */ + +/* These regs are trashed by the hidden call. Note that we overwrite + r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the + function a proper return address. All others are ABI defined call + clobbers. */ +#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \ + "f0","f1","f2","f3","f4","f5","f6","f7" + +/* Nb: Although r11 is modified in the asm snippets below (inside + VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for + two reasons: + (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not + modified + (2) GCC will complain that r11 cannot appear inside a clobber section, + when compiled with -O -fno-omit-frame-pointer + */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 1, 0(1)\n\t" /* target->r1 */ \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +/* The call abi has the arguments in r2-r6 and stack */ +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1, arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-168\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,168\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-176\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,176\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-184\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,184\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-192\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,192\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-200\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,200\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10, arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-208\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "mvc 200(8,15), 88(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,208\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + _argvec[12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-216\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "mvc 200(8,15), 88(1)\n\t" \ + "mvc 208(8,15), 96(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "lgr %0, 2\n\t" \ + "aghi 15,216\n\t" \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + + +#endif /* PLAT_s390x_linux */ + +/* ------------------------- mips32-linux ----------------------- */ + +#if defined(PLAT_mips32_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ +"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ +"$25", "$31" + +/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16\n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" /* arg1*/ \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 24\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 24 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 32\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "nop\n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 32 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 32\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 32 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 40\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 40 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 40\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 40 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 48\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 48 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 48\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 44(%1) \n\t" \ + "sw $4, 40($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 48 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 56\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 44(%1) \n\t" \ + "sw $4, 40($29) \n\t" \ + "lw $4, 48(%1) \n\t" \ + "sw $4, 44($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 56 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_mips32_linux */ + +/* ------------------------- mips64-linux ------------------------- */ + +#if defined(PLAT_mips64_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ +"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ +"$25", "$31" + +/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" /* arg1*/ \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "dsubu $29, $29, 8\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 8\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "dsubu $29, $29, 16\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 16\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "dsubu $29, $29, 24\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 88(%1)\n\t" \ + "sd $4, 16($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 24\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "dsubu $29, $29, 32\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 88(%1)\n\t" \ + "sd $4, 16($29)\n\t" \ + "ld $4, 96(%1)\n\t" \ + "sd $4, 24($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 32\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_mips64_linux */ + +/* ------------------------ tilegx-linux ------------------------- */ + +#if defined(PLAT_tilegx_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3", "r4", "r5", \ + "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", \ + "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", \ + "r23", "r24", "r25", "r26", "r27", "r28", "r29", "lr" + +/* These CALL_FN_ macros assume that on tilegx-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "ld r12, %1 \n\t" /* target->r11 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0 \n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8 \n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ + "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ + "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ + "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ + "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ + "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ + "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ + "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ + "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ + "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ + "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ + "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ + "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ + "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ + "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ + "ld_add r8, r29, 8 \n\t" /*arg9 -> r8 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ + "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ + "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ + "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ + "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ + "ld_add r8, r29, 8 \n\t" /*arg9 -> r8 */ \ + "ld_add r9, r29, 8 \n\t" /*arg10 -> r9 */ \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 8\n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ + "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ + "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ + "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ + "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ + "ld_add r8, r29, 8 \n\t" /*arg9 -> r8 */ \ + "ld_add r9, r29, 8 \n\t" /*arg10 -> r9 */ \ + "ld r10, r29 \n\t" \ + "st_add sp, r10, -16 \n\t" \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 24 \n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "addi sp, sp, -8 \n\t" \ + "st_add sp, lr, -8 \n\t" \ + "move r29, %1 \n\t" \ + "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ + "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ + "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ + "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ + "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ + "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ + "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ + "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ + "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ + "ld_add r8, r29, 8 \n\t" /*arg9 -> r8 */ \ + "ld_add r9, r29, 8 \n\t" /*arg10 -> r9 */ \ + "addi r28, sp, -8 \n\t" \ + "addi sp, sp, -24 \n\t" \ + "ld_add r10, r29, 8 \n\t" \ + "ld r11, r29 \n\t" \ + "st_add r28, r10, 8 \n\t" \ + "st r28, r11 \n\t" \ + VALGRIND_CALL_NOREDIR_R12 \ + "addi sp, sp, 32 \n\t" \ + "ld_add lr, sp, 8 \n\t" \ + "move %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#endif /* PLAT_tilegx_linux */ + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */ +/* */ +/* ------------------------------------------------------------------ */ + +/* Some request codes. There are many more of these, but most are not + exposed to end-user view. These are the public ones, all of the + form 0x1000 + small_number. + + Core ones are in the range 0x00000000--0x0000ffff. The non-public + ones start at 0x2000. +*/ + +/* These macros are used by tools -- they must be public, but don't + embed them into other programs. */ +#define VG_USERREQ_TOOL_BASE(a,b) \ + ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16)) +#define VG_IS_TOOL_USERREQ(a, b, v) \ + (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000)) + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE ORDER OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end. */ +typedef + enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001, + VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002, + + /* These allow any function to be called from the simulated + CPU but run on the real CPU. Nb: the first arg passed to + the function is always the ThreadId of the running + thread! So CLIENT_CALL0 actually requires a 1 arg + function, etc. */ + VG_USERREQ__CLIENT_CALL0 = 0x1101, + VG_USERREQ__CLIENT_CALL1 = 0x1102, + VG_USERREQ__CLIENT_CALL2 = 0x1103, + VG_USERREQ__CLIENT_CALL3 = 0x1104, + + /* Can be useful in regression testing suites -- eg. can + send Valgrind's output to /dev/null and still count + errors. */ + VG_USERREQ__COUNT_ERRORS = 0x1201, + + /* Allows the client program and/or gdbserver to execute a monitor + command. */ + VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202, + + /* These are useful and can be interpreted by any tool that + tracks malloc() et al, by using vg_replace_malloc.c. */ + VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, + VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b, + VG_USERREQ__FREELIKE_BLOCK = 0x1302, + /* Memory pool support. */ + VG_USERREQ__CREATE_MEMPOOL = 0x1303, + VG_USERREQ__DESTROY_MEMPOOL = 0x1304, + VG_USERREQ__MEMPOOL_ALLOC = 0x1305, + VG_USERREQ__MEMPOOL_FREE = 0x1306, + VG_USERREQ__MEMPOOL_TRIM = 0x1307, + VG_USERREQ__MOVE_MEMPOOL = 0x1308, + VG_USERREQ__MEMPOOL_CHANGE = 0x1309, + VG_USERREQ__MEMPOOL_EXISTS = 0x130a, + + /* Allow printfs to valgrind log. */ + /* The first two pass the va_list argument by value, which + assumes it is the same size as or smaller than a UWord, + which generally isn't the case. Hence are deprecated. + The second two pass the vargs by reference and so are + immune to this problem. */ + /* both :: char* fmt, va_list vargs (DEPRECATED) */ + VG_USERREQ__PRINTF = 0x1401, + VG_USERREQ__PRINTF_BACKTRACE = 0x1402, + /* both :: char* fmt, va_list* vargs */ + VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404, + + /* Stack support. */ + VG_USERREQ__STACK_REGISTER = 0x1501, + VG_USERREQ__STACK_DEREGISTER = 0x1502, + VG_USERREQ__STACK_CHANGE = 0x1503, + + /* Wine support */ + VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601, + + /* Querying of debug info. */ + VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701, + + /* Disable/enable error reporting level. Takes a single + Word arg which is the delta to this thread's error + disablement indicator. Hence 1 disables or further + disables errors, and -1 moves back towards enablement. + Other values are not allowed. */ + VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801, + + /* Initialise IR injection */ + VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901 + } Vg_ClientRequest; + +#if !defined(__GNUC__) +# define __extension__ /* */ +#endif + + +/* Returns the number of Valgrinds this code is running under. That + is, 0 if running natively, 1 if running under Valgrind, 2 if + running under Valgrind which is running under another Valgrind, + etc. */ +#define RUNNING_ON_VALGRIND \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \ + VG_USERREQ__RUNNING_ON_VALGRIND, \ + 0, 0, 0, 0, 0) \ + + +/* Discard translation of code in the range [_qzz_addr .. _qzz_addr + + _qzz_len - 1]. Useful if you are debugging a JITter or some such, + since it provides a way to make sure valgrind will retranslate the + invalidated area. Returns no value. */ +#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \ + _qzz_addr, _qzz_len, 0, 0, 0) + + +/* These requests are for getting Valgrind itself to print something. + Possibly with a backtrace. This is a really ugly hack. The return value + is the number of characters printed, excluding the "**** " part at the + start and the backtrace (if present). */ + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) +/* Modern GCC will optimize the static routine out if unused, + and unused attribute will shut down warnings about it. */ +static int VALGRIND_PRINTF(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +#endif +static int +#if defined(_MSC_VER) +__inline +#endif +VALGRIND_PRINTF(const char *format, ...) +{ +#if defined(NVALGRIND) + return 0; +#else /* NVALGRIND */ +#if defined(_MSC_VER) || defined(__MINGW64__) + uintptr_t _qzz_res; +#else + unsigned long _qzz_res; +#endif + va_list vargs; + va_start(vargs, format); +#if defined(_MSC_VER) || defined(__MINGW64__) + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#else + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_VALIST_BY_REF, + (unsigned long)format, + (unsigned long)&vargs, + 0, 0, 0); +#endif + va_end(vargs); + return (int)_qzz_res; +#endif /* NVALGRIND */ +} + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) +static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +#endif +static int +#if defined(_MSC_VER) +__inline +#endif +VALGRIND_PRINTF_BACKTRACE(const char *format, ...) +{ +#if defined(NVALGRIND) + return 0; +#else /* NVALGRIND */ +#if defined(_MSC_VER) || defined(__MINGW64__) + uintptr_t _qzz_res; +#else + unsigned long _qzz_res; +#endif + va_list vargs; + va_start(vargs, format); +#if defined(_MSC_VER) || defined(__MINGW64__) + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#else + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, + (unsigned long)format, + (unsigned long)&vargs, + 0, 0, 0); +#endif + va_end(vargs); + return (int)_qzz_res; +#endif /* NVALGRIND */ +} + + +/* These requests allow control to move from the simulated CPU to the + real CPU, calling an arbitary function. + + Note that the current ThreadId is inserted as the first argument. + So this call: + + VALGRIND_NON_SIMD_CALL2(f, arg1, arg2) + + requires f to have this signature: + + Word f(Word tid, Word arg1, Word arg2) + + where "Word" is a word-sized type. + + Note that these client requests are not entirely reliable. For example, + if you call a function with them that subsequently calls printf(), + there's a high chance Valgrind will crash. Generally, your prospects of + these working are made higher if the called function does not refer to + any global variables, and does not refer to any libc or other functions + (printf et al). Any kind of entanglement with libc or dynamic linking is + likely to have a bad outcome, for tricky reasons which we've grappled + with a lot in the past. +*/ +#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL0, \ + _qyy_fn, \ + 0, 0, 0, 0) + +#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL1, \ + _qyy_fn, \ + _qyy_arg1, 0, 0, 0) + +#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL2, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, 0, 0) + +#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL3, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, \ + _qyy_arg3, 0) + + +/* Counts the number of errors that have been recorded by a tool. Nb: + the tool must record the errors with VG_(maybe_record_error)() or + VG_(unique_error)() for them to be counted. */ +#define VALGRIND_COUNT_ERRORS \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + 0 /* default return */, \ + VG_USERREQ__COUNT_ERRORS, \ + 0, 0, 0, 0, 0) + +/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing + when heap blocks are allocated in order to give accurate results. This + happens automatically for the standard allocator functions such as + malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete, + delete[], etc. + + But if your program uses a custom allocator, this doesn't automatically + happen, and Valgrind will not do as well. For example, if you allocate + superblocks with mmap() and then allocates chunks of the superblocks, all + Valgrind's observations will be at the mmap() level and it won't know that + the chunks should be considered separate entities. In Memcheck's case, + that means you probably won't get heap block overrun detection (because + there won't be redzones marked as unaddressable) and you definitely won't + get any leak detection. + + The following client requests allow a custom allocator to be annotated so + that it can be handled accurately by Valgrind. + + VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated + by a malloc()-like function. For Memcheck (an illustrative case), this + does two things: + + - It records that the block has been allocated. This means any addresses + within the block mentioned in error messages will be + identified as belonging to the block. It also means that if the block + isn't freed it will be detected by the leak checker. + + - It marks the block as being addressable and undefined (if 'is_zeroed' is + not set), or addressable and defined (if 'is_zeroed' is set). This + controls how accesses to the block by the program are handled. + + 'addr' is the start of the usable block (ie. after any + redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator + can apply redzones -- these are blocks of padding at the start and end of + each block. Adding redzones is recommended as it makes it much more likely + Valgrind will spot block overruns. `is_zeroed' indicates if the memory is + zeroed (or filled with another predictable value), as is the case for + calloc(). + + VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a + heap block -- that will be used by the client program -- is allocated. + It's best to put it at the outermost level of the allocator if possible; + for example, if you have a function my_alloc() which calls + internal_alloc(), and the client request is put inside internal_alloc(), + stack traces relating to the heap block will contain entries for both + my_alloc() and internal_alloc(), which is probably not what you want. + + For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out + custom blocks from within a heap block, B, that has been allocated with + malloc/calloc/new/etc, then block B will be *ignored* during leak-checking + -- the custom blocks will take precedence. + + VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For + Memcheck, it does two things: + + - It records that the block has been deallocated. This assumes that the + block was annotated as having been allocated via + VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. + + - It marks the block as being unaddressable. + + VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a + heap block is deallocated. + + VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For + Memcheck, it does four things: + + - It records that the size of a block has been changed. This assumes that + the block was annotated as having been allocated via + VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. + + - If the block shrunk, it marks the freed memory as being unaddressable. + + - If the block grew, it marks the new area as undefined and defines a red + zone past the end of the new block. + + - The V-bits of the overlap between the old and the new block are preserved. + + VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block + and before deallocation of the old block. + + In many cases, these three client requests will not be enough to get your + allocator working well with Memcheck. More specifically, if your allocator + writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call + will be necessary to mark the memory as addressable just before the zeroing + occurs, otherwise you'll get a lot of invalid write errors. For example, + you'll need to do this if your allocator recycles freed blocks, but it + zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK). + Alternatively, if your allocator reuses freed blocks for allocator-internal + data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary. + + Really, what's happening is a blurring of the lines between the client + program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the + memory should be considered unaddressable to the client program, but the + allocator knows more than the rest of the client program and so may be able + to safely access it. Extra client requests are necessary for Valgrind to + understand the distinction between the allocator and the rest of the + program. + + Ignored if addr == 0. +*/ +#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \ + addr, sizeB, rzB, is_zeroed, 0) + +/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. + Ignored if addr == 0. +*/ +#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \ + addr, oldSizeB, newSizeB, rzB, 0) + +/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. + Ignored if addr == 0. +*/ +#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \ + addr, rzB, 0, 0, 0) + +/* Create a memory pool. */ +#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ + pool, rzB, is_zeroed, 0, 0) + +/* Destroy a memory pool. */ +#define VALGRIND_DESTROY_MEMPOOL(pool) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \ + pool, 0, 0, 0, 0) + +/* Associate a piece of memory with a memory pool. */ +#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \ + pool, addr, size, 0, 0) + +/* Disassociate a piece of memory from a memory pool. */ +#define VALGRIND_MEMPOOL_FREE(pool, addr) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \ + pool, addr, 0, 0, 0) + +/* Disassociate any pieces outside a particular range. */ +#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \ + pool, addr, size, 0, 0) + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \ + poolA, poolB, 0, 0, 0) + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \ + pool, addrA, addrB, size, 0) + +/* Return 1 if a mempool exists, else 0. */ +#define VALGRIND_MEMPOOL_EXISTS(pool) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__MEMPOOL_EXISTS, \ + pool, 0, 0, 0, 0) + +/* Mark a piece of memory as being a stack. Returns a stack id. + start is the lowest addressable stack byte, end is the highest + addressable stack byte. */ +#define VALGRIND_STACK_REGISTER(start, end) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__STACK_REGISTER, \ + start, end, 0, 0, 0) + +/* Unmark the piece of memory associated with a stack id as being a + stack. */ +#define VALGRIND_STACK_DEREGISTER(id) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \ + id, 0, 0, 0, 0) + +/* Change the start and end address of the stack id. + start is the new lowest addressable stack byte, end is the new highest + addressable stack byte. */ +#define VALGRIND_STACK_CHANGE(id, start, end) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \ + id, start, end, 0, 0) + +/* Load PDB debug info for Wine PE image_map. */ +#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \ + fd, ptr, total_size, delta, 0) + +/* Map a code address to a source file name and line number. buf64 + must point to a 64-byte buffer in the caller's address space. The + result will be dumped in there and is guaranteed to be zero + terminated. If no info is found, the first byte is set to zero. */ +#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__MAP_IP_TO_SRCLOC, \ + addr, buf64, 0, 0, 0) + +/* Disable error reporting for this thread. Behaves in a stack like + way, so you can safely call this multiple times provided that + VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times + to re-enable reporting. The first call of this macro disables + reporting. Subsequent calls have no effect except to increase the + number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable + reporting. Child threads do not inherit this setting from their + parents -- they are always created with reporting enabled. */ +#define VALGRIND_DISABLE_ERROR_REPORTING \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ + 1, 0, 0, 0, 0) + +/* Re-enable error reporting, as per comments on + VALGRIND_DISABLE_ERROR_REPORTING. */ +#define VALGRIND_ENABLE_ERROR_REPORTING \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ + -1, 0, 0, 0, 0) + +/* Execute a monitor command from the client program. + If a connection is opened with GDB, the output will be sent + according to the output mode set for vgdb. + If no connection is opened, output will go to the log output. + Returns 1 if command not recognised, 0 otherwise. */ +#define VALGRIND_MONITOR_COMMAND(command) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \ + command, 0, 0, 0, 0) + + +#undef PLAT_x86_darwin +#undef PLAT_amd64_darwin +#undef PLAT_x86_win32 +#undef PLAT_amd64_win64 +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64be_linux +#undef PLAT_ppc64le_linux +#undef PLAT_arm_linux +#undef PLAT_s390x_linux +#undef PLAT_mips32_linux +#undef PLAT_mips64_linux +#undef PLAT_tilegx_linux +#undef PLAT_x86_solaris +#undef PLAT_amd64_solaris + +#endif /* __VALGRIND_H */ -- 2.34.1