Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / afs / afs_axscache.c
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10 #include <afsconfig.h>
11 #include "afs/param.h"
12
13
14 #include "afs/sysincludes.h" /* Standard vendor system headers */
15 #include "afsincludes.h" /* Afs-based standard headers */
16 #include "afs/afs_stats.h" /* statistics */
17 #include "afs/stds.h"
18 static struct axscache *afs_axsfreelist = NULL;
19
20 #define NAXSs (1000 / sizeof(struct axscache))
21 static struct xfreelist {
22 struct xfreelist *next;
23 struct axscache data[NAXSs];
24 } *xfreemallocs = 0;
25 static int afs_xaxscnt = 0;
26 afs_rwlock_t afs_xaxs;
27
28 /* takes an address of an access cache & uid, returns ptr */
29 /* PRECONDITION: first field has been checked and doesn't match!
30 * INVARIANT: isparent(i,j) ^ isparent(j,i) (ie, they switch around)
31 */
32 struct axscache *
33 afs_SlowFindAxs(struct axscache **cachep, afs_int32 id)
34 {
35 struct axscache *i, *j;
36
37 j = (*cachep);
38 i = j->next;
39 while (i) {
40 if (i->uid == id) {
41 axs_Front(cachep, j, i); /* maintain LRU queue */
42 return (i);
43 }
44
45 if ((j = i->next)) { /* ASSIGNMENT HERE! */
46 if (j->uid == id) {
47 axs_Front(cachep, i, j);
48 return (j);
49 }
50 } else
51 return (NULL);
52 i = j->next;
53 }
54 return (NULL);
55 }
56
57
58 struct axscache *
59 axs_Alloc(void)
60 {
61 struct axscache *i, *j;
62 struct xfreelist *h, *xsp;
63 int k;
64
65 ObtainWriteLock(&afs_xaxs, 174);
66 if ((i = afs_axsfreelist)) {
67 afs_axsfreelist = i->next;
68 ReleaseWriteLock(&afs_xaxs);
69 return i;
70 } else {
71 h = afs_osi_Alloc(sizeof(struct xfreelist));
72 osi_Assert(h != NULL);
73 afs_xaxscnt++;
74 xsp = xfreemallocs;
75 xfreemallocs = h;
76 xfreemallocs->next = xsp;
77 i = j = h->data;
78 for (k = 0; k < NAXSs - 1; k++, i++) {
79 i->uid = -2;
80 i->axess = 0;
81 i->next = ++j; /* need j because order of evaluation not defined */
82 }
83 i->uid = -2;
84 i->axess = 0;
85 i->next = NULL;
86 afs_axsfreelist = (h->data)->next;
87 }
88 ReleaseWriteLock(&afs_xaxs);
89 return (h->data);
90 }
91
92
93 #define axs_Free(axsp) { \
94 ObtainWriteLock(&afs_xaxs,175); \
95 axsp->next = afs_axsfreelist; \
96 afs_axsfreelist = axsp; \
97 ReleaseWriteLock(&afs_xaxs); \
98 }
99
100
101 /* I optimize for speed on lookup, and don't give a RIP about delete.
102 */
103 void
104 afs_RemoveAxs(struct axscache **headp, struct axscache *axsp)
105 {
106 struct axscache *i, *j;
107
108 if (*headp && axsp) { /* is bullet-proofing really neccessary? */
109 if (*headp == axsp) { /* most common case, I think */
110 *headp = axsp->next;
111 axs_Free(axsp);
112 return;
113 }
114
115 i = *headp;
116 j = i->next;
117
118 while (j) {
119 if (j == axsp) {
120 i->next = j->next;
121 axs_Free(axsp);
122 return;
123 }
124 if ((i = j->next)) { /* ASSIGNMENT HERE! */
125 j->next = i->next;
126 axs_Free(axsp);
127 return;
128 }
129 }
130 }
131 /* end of "if neither pointer is NULL" */
132 return; /* !#@ FAILED to find it! */
133 }
134
135
136 /*
137 * Takes an entire list of access cache structs and prepends them, lock, stock,
138 * and barrel, to the front of the freelist.
139 */
140 void
141 afs_FreeAllAxs(struct axscache **headp)
142 {
143 struct axscache *i, *j;
144
145 i = *headp;
146 j = NULL;
147
148 while (i) { /* chase down the list 'til we reach the end */
149 j = i->next;
150 if (!j) {
151 ObtainWriteLock(&afs_xaxs, 176);
152 i->next = afs_axsfreelist; /* tack on the freelist to the end */
153 afs_axsfreelist = *headp;
154 ReleaseWriteLock(&afs_xaxs);
155 *headp = NULL;
156 return;
157 }
158 i = j->next;
159 }
160
161 if (j) { /* we ran off the end of the list... */
162 ObtainWriteLock(&afs_xaxs, 177);
163 j->next = afs_axsfreelist; /* tack on the freelist to the end */
164 afs_axsfreelist = *headp;
165 ReleaseWriteLock(&afs_xaxs);
166 }
167 *headp = NULL;
168 return;
169 }
170
171
172 void
173 shutdown_xscache(void)
174 {
175 struct xfreelist *xp, *nxp;
176
177 AFS_RWLOCK_INIT(&afs_xaxs, "afs_xaxs");
178 xp = xfreemallocs;
179 while (xp) {
180 nxp = xp->next;
181 afs_osi_Free(xp, sizeof(struct xfreelist));
182 xp = nxp;
183 }
184 afs_axsfreelist = NULL;
185 xfreemallocs = NULL;
186 }