11
11
struct ucounts init_ucounts = {
12
12
.ns = & init_user_ns ,
13
13
.uid = GLOBAL_ROOT_UID ,
14
- .count = ATOMIC_INIT (1 ),
14
+ .count = RCUREF_INIT (1 ),
15
15
};
16
16
17
17
#define UCOUNTS_HASHTABLE_BITS 10
@@ -138,7 +138,7 @@ static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid,
138
138
guard (rcu )();
139
139
hlist_nulls_for_each_entry_rcu (ucounts , pos , hashent , node ) {
140
140
if (uid_eq (ucounts -> uid , uid ) && (ucounts -> ns == ns )) {
141
- if (atomic_inc_not_zero (& ucounts -> count ))
141
+ if (rcuref_get (& ucounts -> count ))
142
142
return ucounts ;
143
143
}
144
144
}
@@ -154,13 +154,6 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
154
154
spin_unlock_irq (& ucounts_lock );
155
155
}
156
156
157
- struct ucounts * get_ucounts (struct ucounts * ucounts )
158
- {
159
- if (atomic_inc_not_zero (& ucounts -> count ))
160
- return ucounts ;
161
- return NULL ;
162
- }
163
-
164
157
struct ucounts * alloc_ucounts (struct user_namespace * ns , kuid_t uid )
165
158
{
166
159
struct hlist_nulls_head * hashent = ucounts_hashentry (ns , uid );
@@ -176,7 +169,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
176
169
177
170
new -> ns = ns ;
178
171
new -> uid = uid ;
179
- atomic_set (& new -> count , 1 );
172
+ rcuref_init (& new -> count , 1 );
180
173
181
174
spin_lock_irq (& ucounts_lock );
182
175
ucounts = find_ucounts (ns , uid , hashent );
@@ -196,7 +189,8 @@ void put_ucounts(struct ucounts *ucounts)
196
189
{
197
190
unsigned long flags ;
198
191
199
- if (atomic_dec_and_lock_irqsave (& ucounts -> count , & ucounts_lock , flags )) {
192
+ if (rcuref_put (& ucounts -> count )) {
193
+ spin_lock_irqsave (& ucounts_lock , flags );
200
194
hlist_nulls_del_rcu (& ucounts -> node );
201
195
spin_unlock_irqrestore (& ucounts_lock , flags );
202
196
0 commit comments