Actual source code: mpiuopen.c
1: #define PETSC_DESIRE_FEATURE_TEST_MACROS /* for popen() */
2: /*
3: Some PETSc utility routines to add simple parallel IO capabilities
4: */
5: #include <petscsys.h>
6: #include <petsc/private/logimpl.h>
7: #include <errno.h>
9: /*@C
10: PetscFOpen - Has the first process in the communicator open a file;
11: all others do nothing.
13: Logically Collective; No Fortran Support
15: Input Parameters:
16: + comm - the communicator
17: . name - the filename
18: - mode - the mode for fopen(), usually "w"
20: Output Parameter:
21: . fp - the file pointer
23: Level: developer
25: Note:
26: NULL (0), "stderr" or "stdout" may be passed in as the filename
28: .seealso: `PetscFClose()`, `PetscSynchronizedFGets()`, `PetscSynchronizedPrintf()`, `PetscSynchronizedFlush()`,
29: `PetscFPrintf()`
30: @*/
31: PetscErrorCode PetscFOpen(MPI_Comm comm, const char name[], const char mode[], FILE **fp)
32: {
33: PetscMPIInt rank;
34: FILE *fd;
35: char fname[PETSC_MAX_PATH_LEN], tname[PETSC_MAX_PATH_LEN];
37: MPI_Comm_rank(comm, &rank);
38: if (rank == 0) {
39: PetscBool isstdout, isstderr;
40: PetscStrcmp(name, "stdout", &isstdout);
41: PetscStrcmp(name, "stderr", &isstderr);
42: if (isstdout || !name) fd = PETSC_STDOUT;
43: else if (isstderr) fd = PETSC_STDERR;
44: else {
45: PetscBool devnull;
46: PetscStrreplace(PETSC_COMM_SELF, name, tname, PETSC_MAX_PATH_LEN);
47: PetscFixFilename(tname, fname);
48: PetscStrbeginswith(fname, "/dev/null", &devnull);
49: if (devnull) PetscStrcpy(fname, "/dev/null");
50: PetscInfo(0, "Opening file %s\n", fname);
51: fd = fopen(fname, mode);
53: }
54: } else fd = NULL;
55: *fp = fd;
56: return 0;
57: }
59: /*@C
60: PetscFClose - Has the first processor in the communicator close a
61: file; all others do nothing.
63: Logically Collective; No Fortran Support
65: Input Parameters:
66: + comm - the communicator
67: - fd - the file, opened with PetscFOpen()
69: Level: developer
71: .seealso: `PetscFOpen()`
72: @*/
73: PetscErrorCode PetscFClose(MPI_Comm comm, FILE *fd)
74: {
75: PetscMPIInt rank;
76: int err;
78: MPI_Comm_rank(comm, &rank);
79: if (rank == 0 && fd != PETSC_STDOUT && fd != PETSC_STDERR) {
80: err = fclose(fd);
82: }
83: return 0;
84: }
86: #if defined(PETSC_HAVE_POPEN)
87: static char PetscPOpenMachine[128] = "";
89: /*@C
90: PetscPClose - Closes (ends) a program on processor zero run with `PetscPOpen()`
92: Collective, but only process 0 runs the command
94: Input Parameters:
95: + comm - MPI communicator, only processor zero runs the program
96: - fp - the file pointer where program input or output may be read or NULL if don't care
98: Level: intermediate
100: Note:
101: Does not work under Windows
103: .seealso: `PetscFOpen()`, `PetscFClose()`, `PetscPOpen()`
104: @*/
105: PetscErrorCode PetscPClose(MPI_Comm comm, FILE *fd)
106: {
107: PetscMPIInt rank;
109: MPI_Comm_rank(comm, &rank);
110: if (rank == 0) {
111: char buf[1024];
112: while (fgets(buf, 1024, fd))
113: ; /* wait till it prints everything */
114: (void)pclose(fd);
115: }
116: return 0;
117: }
119: /*@C
120: PetscPOpen - Runs a program on processor zero and sends either its input or output to
121: a file.
123: Logically Collective, but only process 0 runs the command
125: Input Parameters:
126: + comm - MPI communicator, only processor zero runs the program
127: . machine - machine to run command on or NULL, or string with 0 in first location
128: . program - name of program to run
129: - mode - either r or w
131: Output Parameter:
132: . fp - the file pointer where program input or output may be read or NULL if don't care
134: Level: intermediate
136: Notes:
137: Use `PetscPClose()` to close the file pointer when you are finished with it
139: Does not work under Windows
141: If machine is not provided will use the value set with `PetsPOpenSetMachine()` if that was provided, otherwise
142: will use the machine running node zero of the communicator
144: The program string may contain ${DISPLAY}, ${HOMEDIRECTORY} or ${WORKINGDIRECTORY}; these
145: will be replaced with relevant values.
147: .seealso: `PetscFOpen()`, `PetscFClose()`, `PetscPClose()`, `PetscPOpenSetMachine()`
148: @*/
149: PetscErrorCode PetscPOpen(MPI_Comm comm, const char machine[], const char program[], const char mode[], FILE **fp)
150: {
151: PetscMPIInt rank;
152: size_t i, len, cnt;
153: char commandt[PETSC_MAX_PATH_LEN], command[PETSC_MAX_PATH_LEN];
154: FILE *fd;
156: /* all processors have to do the string manipulation because PetscStrreplace() is a collective operation */
157: if (PetscPOpenMachine[0] || (machine && machine[0])) {
158: PetscStrcpy(command, "ssh ");
159: if (PetscPOpenMachine[0]) {
160: PetscStrcat(command, PetscPOpenMachine);
161: } else {
162: PetscStrcat(command, machine);
163: }
164: PetscStrcat(command, " \" export DISPLAY=${DISPLAY}; ");
165: /*
166: Copy program into command but protect the " with a \ in front of it
167: */
168: PetscStrlen(command, &cnt);
169: PetscStrlen(program, &len);
170: for (i = 0; i < len; i++) {
171: if (program[i] == '\"') command[cnt++] = '\\';
172: command[cnt++] = program[i];
173: }
174: command[cnt] = 0;
176: PetscStrcat(command, "\"");
177: } else {
178: PetscStrcpy(command, program);
179: }
181: PetscStrreplace(comm, command, commandt, 1024);
183: MPI_Comm_rank(comm, &rank);
184: if (rank == 0) {
185: PetscInfo(NULL, "Running command :%s\n", commandt);
187: if (fp) *fp = fd;
188: }
189: return 0;
190: }
192: /*@C
193: PetscPOpenSetMachine - Sets the name of the default machine to run `PetscPOpen()` calls on
195: Logically Collective, but only process 0 runs the command
197: Input Parameter:
198: . machine - machine to run command on or NULL for the current machine
200: Options Database Key:
201: . -popen_machine <machine> - run the process on this machine
203: Level: intermediate
205: .seealso: `PetscFOpen()`, `PetscFClose()`, `PetscPClose()`, `PetscPOpen()`
206: @*/
207: PetscErrorCode PetscPOpenSetMachine(const char machine[])
208: {
209: if (machine) {
210: PetscStrcpy(PetscPOpenMachine, machine);
211: } else {
212: PetscPOpenMachine[0] = 0;
213: }
214: return 0;
215: }
217: #endif