ipc: conserve sequence numbers in ipcmni_extend mode

Rewrite, based on the patch from Waiman Long:

The mixing in of a sequence number into the IPC IDs is probably to avoid
ID reuse in userspace as much as possible.  With ipcmni_extend mode, the
number of usable sequence numbers is greatly reduced leading to higher
chance of ID reuse.

To address this issue, we need to conserve the sequence number space as
much as possible.  Right now, the sequence number is incremented for
every new ID created.  In reality, we only need to increment the
sequence number when new allocated ID is not greater than the last one
allocated.  It is in such case that the new ID may collide with an
existing one.  This is being done irrespective of the ipcmni mode.

In order to avoid any races, the index is first allocated and then the
pointer is replaced.

Changes compared to the initial patch:
 - Handle failures from idr_alloc().
 - Avoid that concurrent operations can see the wrong sequence number.
   (This is achieved by using idr_replace()).
 - IPCMNI_SEQ_SHIFT is not a constant, thus renamed to
   ipcmni_seq_shift().
 - IPCMNI_SEQ_MAX is not a constant, thus renamed to ipcmni_seq_max().

Link: http://lkml.kernel.org/r/20190329204930.21620-2-longman@redhat.com
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Waiman Long <longman@redhat.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Acked-by: Waiman Long <longman@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kees Cook <keescook@chromium.org>
Cc: "Luis R. Rodriguez" <mcgrof@kernel.org>
Cc: Takashi Iwai <tiwai@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Manfred Spraul 2019-05-14 15:46:33 -07:00 committed by Linus Torvalds
parent 5ac893b8cb
commit 3278a2c20c
3 changed files with 35 additions and 9 deletions

View File

@ -19,6 +19,7 @@ struct ipc_ids {
struct rw_semaphore rwsem; struct rw_semaphore rwsem;
struct idr ipcs_idr; struct idr ipcs_idr;
int max_idx; int max_idx;
int last_idx; /* For wrap around detection */
#ifdef CONFIG_CHECKPOINT_RESTORE #ifdef CONFIG_CHECKPOINT_RESTORE
int next_id; int next_id;
#endif #endif

View File

@ -119,6 +119,7 @@ void ipc_init_ids(struct ipc_ids *ids)
rhashtable_init(&ids->key_ht, &ipc_kht_params); rhashtable_init(&ids->key_ht, &ipc_kht_params);
idr_init(&ids->ipcs_idr); idr_init(&ids->ipcs_idr);
ids->max_idx = -1; ids->max_idx = -1;
ids->last_idx = -1;
#ifdef CONFIG_CHECKPOINT_RESTORE #ifdef CONFIG_CHECKPOINT_RESTORE
ids->next_id = -1; ids->next_id = -1;
#endif #endif
@ -192,6 +193,10 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
* *
* The caller must own kern_ipc_perm.lock.of the new object. * The caller must own kern_ipc_perm.lock.of the new object.
* On error, the function returns a (negative) error code. * On error, the function returns a (negative) error code.
*
* To conserve sequence number space, especially with extended ipc_mni,
* the sequence number is incremented only when the returned ID is less than
* the last one.
*/ */
static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new) static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new)
{ {
@ -215,17 +220,37 @@ static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new)
*/ */
if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */ if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */
new->seq = ids->seq++;
if (ids->seq > IPCID_SEQ_MAX) /* allocate the idx, with a NULL struct kern_ipc_perm */
idx = idr_alloc(&ids->ipcs_idr, NULL, 0, 0, GFP_NOWAIT);
if (idx >= 0) {
/*
* idx got allocated successfully.
* Now calculate the sequence number and set the
* pointer for real.
*/
if (idx <= ids->last_idx) {
ids->seq++;
if (ids->seq >= ipcid_seq_max())
ids->seq = 0; ids->seq = 0;
idx = idr_alloc(&ids->ipcs_idr, new, 0, 0, GFP_NOWAIT); }
ids->last_idx = idx;
new->seq = ids->seq;
/* no need for smp_wmb(), this is done
* inside idr_replace, as part of
* rcu_assign_pointer
*/
idr_replace(&ids->ipcs_idr, new, idx);
}
} else { } else {
new->seq = ipcid_to_seqx(next_id); new->seq = ipcid_to_seqx(next_id);
idx = idr_alloc(&ids->ipcs_idr, new, ipcid_to_idx(next_id), idx = idr_alloc(&ids->ipcs_idr, new, ipcid_to_idx(next_id),
0, GFP_NOWAIT); 0, GFP_NOWAIT);
} }
if (idx >= 0) if (idx >= 0)
new->id = (new->seq << IPCMNI_SEQ_SHIFT) + idx; new->id = (new->seq << ipcmni_seq_shift()) + idx;
return idx; return idx;
} }

View File

@ -34,13 +34,13 @@
extern int ipc_mni; extern int ipc_mni;
extern int ipc_mni_shift; extern int ipc_mni_shift;
#define IPCMNI_SEQ_SHIFT ipc_mni_shift #define ipcmni_seq_shift() ipc_mni_shift
#define IPCMNI_IDX_MASK ((1 << ipc_mni_shift) - 1) #define IPCMNI_IDX_MASK ((1 << ipc_mni_shift) - 1)
#else /* CONFIG_SYSVIPC_SYSCTL */ #else /* CONFIG_SYSVIPC_SYSCTL */
#define ipc_mni IPCMNI #define ipc_mni IPCMNI
#define IPCMNI_SEQ_SHIFT IPCMNI_SHIFT #define ipcmni_seq_shift() IPCMNI_SHIFT
#define IPCMNI_IDX_MASK ((1 << IPCMNI_SHIFT) - 1) #define IPCMNI_IDX_MASK ((1 << IPCMNI_SHIFT) - 1)
#endif /* CONFIG_SYSVIPC_SYSCTL */ #endif /* CONFIG_SYSVIPC_SYSCTL */
@ -123,8 +123,8 @@ struct pid_namespace *ipc_seq_pid_ns(struct seq_file *);
#define IPC_SHM_IDS 2 #define IPC_SHM_IDS 2
#define ipcid_to_idx(id) ((id) & IPCMNI_IDX_MASK) #define ipcid_to_idx(id) ((id) & IPCMNI_IDX_MASK)
#define ipcid_to_seqx(id) ((id) >> IPCMNI_SEQ_SHIFT) #define ipcid_to_seqx(id) ((id) >> ipcmni_seq_shift())
#define IPCID_SEQ_MAX (INT_MAX >> IPCMNI_SEQ_SHIFT) #define ipcid_seq_max() (INT_MAX >> ipcmni_seq_shift())
/* must be called with ids->rwsem acquired for writing */ /* must be called with ids->rwsem acquired for writing */
int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);