@@ -94,7 +94,7 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
94
94
struct rt_work * work , rt_tick_t ticks )
95
95
{
96
96
rt_base_t level ;
97
- rt_err_t err ;
97
+ rt_err_t err = RT_EOK ;
98
98
99
99
level = rt_spin_lock_irqsave (& (queue -> spinlock ));
100
100
@@ -113,13 +113,7 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
113
113
{
114
114
/* resume work thread, and do a re-schedule if succeed */
115
115
rt_thread_resume (queue -> work_thread );
116
- rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
117
116
}
118
- else
119
- {
120
- rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
121
- }
122
- return RT_EOK ;
123
117
}
124
118
else if (ticks < RT_TICK_MAX / 2 )
125
119
{
@@ -139,12 +133,14 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
139
133
rt_list_insert_after (queue -> delayed_list .prev , & (work -> list ));
140
134
141
135
err = rt_timer_start (& (work -> timer ));
142
- rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
143
-
144
- return err ;
145
136
}
137
+ else
138
+ {
139
+ err = - RT_ERROR ;
140
+ }
141
+
146
142
rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
147
- return - RT_ERROR ;
143
+ return err ;
148
144
}
149
145
150
146
static rt_err_t _workqueue_cancel_work (struct rt_workqueue * queue , struct rt_work * work )
@@ -160,14 +156,14 @@ static rt_err_t _workqueue_cancel_work(struct rt_workqueue *queue, struct rt_wor
160
156
{
161
157
if ((err = rt_timer_stop (& (work -> timer ))) != RT_EOK )
162
158
{
163
- rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
164
- return err ;
159
+ goto exit ;
165
160
}
166
161
rt_timer_detach (& (work -> timer ));
167
162
work -> flags &= ~RT_WORK_STATE_SUBMITTING ;
168
163
}
169
164
err = queue -> work_current != work ? RT_EOK : - RT_EBUSY ;
170
165
work -> workqueue = RT_NULL ;
166
+ exit :
171
167
rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
172
168
return err ;
173
169
}
@@ -200,12 +196,9 @@ static void _delayed_work_timeout_handler(void *parameter)
200
196
{
201
197
/* resume work thread, and do a re-schedule if succeed */
202
198
rt_thread_resume (queue -> work_thread );
203
- rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
204
- }
205
- else
206
- {
207
- rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
208
199
}
200
+
201
+ rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
209
202
}
210
203
211
204
/**
@@ -358,13 +351,9 @@ rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *wo
358
351
{
359
352
/* resume work thread, and do a re-schedule if succeed */
360
353
rt_thread_resume (queue -> work_thread );
361
- rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
362
- }
363
- else
364
- {
365
- rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
366
354
}
367
355
356
+ rt_spin_unlock_irqrestore (& (queue -> spinlock ), level );
368
357
return RT_EOK ;
369
358
}
370
359
0 commit comments