1 | /* This file handles advisory file locking as required by POSIX.
|
---|
2 | *
|
---|
3 | * The entry points into this file are
|
---|
4 | * lock_op: perform locking operations for FCNTL system call
|
---|
5 | * lock_revive: revive processes when a lock is released
|
---|
6 | */
|
---|
7 |
|
---|
8 | #include "fs.h"
|
---|
9 | #include <minix/com.h>
|
---|
10 | #include <fcntl.h>
|
---|
11 | #include <unistd.h>
|
---|
12 | #include "file.h"
|
---|
13 | #include "fproc.h"
|
---|
14 | #include "inode.h"
|
---|
15 | #include "lock.h"
|
---|
16 | #include "param.h"
|
---|
17 |
|
---|
18 | /*===========================================================================*
|
---|
19 | * lock_op *
|
---|
20 | *===========================================================================*/
|
---|
21 | PUBLIC int lock_op(f, req)
|
---|
22 | struct filp *f;
|
---|
23 | int req; /* either F_SETLK or F_SETLKW */
|
---|
24 | {
|
---|
25 | /* Perform the advisory locking required by POSIX. */
|
---|
26 |
|
---|
27 | int r, ltype, i, conflict = 0, unlocking = 0;
|
---|
28 | mode_t mo;
|
---|
29 | off_t first, last;
|
---|
30 | struct flock flock;
|
---|
31 | vir_bytes user_flock;
|
---|
32 | struct file_lock *flp, *flp2, *empty;
|
---|
33 |
|
---|
34 | /* Fetch the flock structure from user space. */
|
---|
35 | user_flock = (vir_bytes) m_in.name1;
|
---|
36 | r = sys_datacopy(who, (vir_bytes) user_flock,
|
---|
37 | FS_PROC_NR, (vir_bytes) &flock, (phys_bytes) sizeof(flock));
|
---|
38 | if (r != OK) return(EINVAL);
|
---|
39 |
|
---|
40 | /* Make some error checks. */
|
---|
41 | ltype = flock.l_type;
|
---|
42 | mo = f->filp_mode;
|
---|
43 | if (ltype != F_UNLCK && ltype != F_RDLCK && ltype != F_WRLCK) return(EINVAL);
|
---|
44 | if (req == F_GETLK && ltype == F_UNLCK) return(EINVAL);
|
---|
45 | if ( (f->filp_ino->i_mode & I_TYPE) != I_REGULAR) return(EINVAL);
|
---|
46 | if (req != F_GETLK && ltype == F_RDLCK && (mo & R_BIT) == 0) return(EBADF);
|
---|
47 | if (req != F_GETLK && ltype == F_WRLCK && (mo & W_BIT) == 0) return(EBADF);
|
---|
48 |
|
---|
49 | /* Compute the first and last bytes in the lock region. */
|
---|
50 | switch (flock.l_whence) {
|
---|
51 | case SEEK_SET: first = 0; break;
|
---|
52 | case SEEK_CUR: first = f->filp_pos; break;
|
---|
53 | case SEEK_END: first = f->filp_ino->i_size; break;
|
---|
54 | default: return(EINVAL);
|
---|
55 | }
|
---|
56 | /* Check for overflow. */
|
---|
57 | if (((long)flock.l_start > 0) && ((first + flock.l_start) < first))
|
---|
58 | return(EINVAL);
|
---|
59 | if (((long)flock.l_start < 0) && ((first + flock.l_start) > first))
|
---|
60 | return(EINVAL);
|
---|
61 | first = first + flock.l_start;
|
---|
62 | last = first + flock.l_len - 1;
|
---|
63 | if (flock.l_len == 0) last = MAX_FILE_POS;
|
---|
64 | if (last < first) return(EINVAL);
|
---|
65 |
|
---|
66 | /* Check if this region conflicts with any existing lock. */
|
---|
67 | empty = (struct file_lock *) 0;
|
---|
68 | for (flp = &file_lock[0]; flp < & file_lock[NR_LOCKS]; flp++) {
|
---|
69 | if (flp->lock_type == 0) {
|
---|
70 | if (empty == (struct file_lock *) 0) empty = flp;
|
---|
71 | continue; /* 0 means unused slot */
|
---|
72 | }
|
---|
73 | if (flp->lock_inode != f->filp_ino) continue; /* different file */
|
---|
74 | if (last < flp->lock_first) continue; /* new one is in front */
|
---|
75 | if (first > flp->lock_last) continue; /* new one is afterwards */
|
---|
76 | if (ltype == F_RDLCK && flp->lock_type == F_RDLCK) continue;
|
---|
77 | if (ltype != F_UNLCK && flp->lock_pid == fp->fp_pid) continue;
|
---|
78 |
|
---|
79 | /* There might be a conflict. Process it. */
|
---|
80 | conflict = 1;
|
---|
81 | if (req == F_GETLK) break;
|
---|
82 |
|
---|
83 | /* If we are trying to set a lock, it just failed. */
|
---|
84 | if (ltype == F_RDLCK || ltype == F_WRLCK) {
|
---|
85 | if (req == F_SETLK) {
|
---|
86 | /* For F_SETLK, just report back failure. */
|
---|
87 | return(EAGAIN);
|
---|
88 | } else {
|
---|
89 | /* For F_SETLKW, suspend the process. */
|
---|
90 | suspend(XLOCK);
|
---|
91 | return(SUSPEND);
|
---|
92 | }
|
---|
93 | }
|
---|
94 |
|
---|
95 | /* We are clearing a lock and we found something that overlaps. */
|
---|
96 | unlocking = 1;
|
---|
97 | if (first <= flp->lock_first && last >= flp->lock_last) {
|
---|
98 | flp->lock_type = 0; /* mark slot as unused */
|
---|
99 | nr_locks--; /* number of locks is now 1 less */
|
---|
100 | continue;
|
---|
101 | }
|
---|
102 |
|
---|
103 | /* Part of a locked region has been unlocked. */
|
---|
104 | if (first <= flp->lock_first) {
|
---|
105 | flp->lock_first = last + 1;
|
---|
106 | continue;
|
---|
107 | }
|
---|
108 |
|
---|
109 | if (last >= flp->lock_last) {
|
---|
110 | flp->lock_last = first - 1;
|
---|
111 | continue;
|
---|
112 | }
|
---|
113 |
|
---|
114 | /* Bad luck. A lock has been split in two by unlocking the middle. */
|
---|
115 | if (nr_locks == NR_LOCKS) return(ENOLCK);
|
---|
116 | for (i = 0; i < NR_LOCKS; i++)
|
---|
117 | if (file_lock[i].lock_type == 0) break;
|
---|
118 | flp2 = &file_lock[i];
|
---|
119 | flp2->lock_type = flp->lock_type;
|
---|
120 | flp2->lock_pid = flp->lock_pid;
|
---|
121 | flp2->lock_inode = flp->lock_inode;
|
---|
122 | flp2->lock_first = last + 1;
|
---|
123 | flp2->lock_last = flp->lock_last;
|
---|
124 | flp->lock_last = first - 1;
|
---|
125 | nr_locks++;
|
---|
126 | }
|
---|
127 | if (unlocking) lock_revive();
|
---|
128 |
|
---|
129 | if (req == F_GETLK) {
|
---|
130 | if (conflict) {
|
---|
131 | /* GETLK and conflict. Report on the conflicting lock. */
|
---|
132 | flock.l_type = flp->lock_type;
|
---|
133 | flock.l_whence = SEEK_SET;
|
---|
134 | flock.l_start = flp->lock_first;
|
---|
135 | flock.l_len = flp->lock_last - flp->lock_first + 1;
|
---|
136 | flock.l_pid = flp->lock_pid;
|
---|
137 |
|
---|
138 | } else {
|
---|
139 | /* It is GETLK and there is no conflict. */
|
---|
140 | flock.l_type = F_UNLCK;
|
---|
141 | }
|
---|
142 |
|
---|
143 | /* Copy the flock structure back to the caller. */
|
---|
144 | r = sys_datacopy(FS_PROC_NR, (vir_bytes) &flock,
|
---|
145 | who, (vir_bytes) user_flock, (phys_bytes) sizeof(flock));
|
---|
146 | return(r);
|
---|
147 | }
|
---|
148 |
|
---|
149 | if (ltype == F_UNLCK) return(OK); /* unlocked a region with no locks */
|
---|
150 |
|
---|
151 | /* There is no conflict. If space exists, store new lock in the table. */
|
---|
152 | if (empty == (struct file_lock *) 0) return(ENOLCK); /* table full */
|
---|
153 | empty->lock_type = ltype;
|
---|
154 | empty->lock_pid = fp->fp_pid;
|
---|
155 | empty->lock_inode = f->filp_ino;
|
---|
156 | empty->lock_first = first;
|
---|
157 | empty->lock_last = last;
|
---|
158 | nr_locks++;
|
---|
159 | return(OK);
|
---|
160 | }
|
---|
161 |
|
---|
162 | /*===========================================================================*
|
---|
163 | * lock_revive *
|
---|
164 | *===========================================================================*/
|
---|
165 | PUBLIC void lock_revive()
|
---|
166 | {
|
---|
167 | /* Go find all the processes that are waiting for any kind of lock and
|
---|
168 | * revive them all. The ones that are still blocked will block again when
|
---|
169 | * they run. The others will complete. This strategy is a space-time
|
---|
170 | * tradeoff. Figuring out exactly which ones to unblock now would take
|
---|
171 | * extra code, and the only thing it would win would be some performance in
|
---|
172 | * extremely rare circumstances (namely, that somebody actually used
|
---|
173 | * locking).
|
---|
174 | */
|
---|
175 |
|
---|
176 | int task;
|
---|
177 | struct fproc *fptr;
|
---|
178 |
|
---|
179 | for (fptr = &fproc[INIT_PROC_NR + 1]; fptr < &fproc[NR_PROCS]; fptr++){
|
---|
180 | task = -fptr->fp_task;
|
---|
181 | if (fptr->fp_suspended == SUSPENDED && task == XLOCK) {
|
---|
182 | revive( (int) (fptr - fproc), 0);
|
---|
183 | }
|
---|
184 | }
|
---|
185 | }
|
---|