//
// garbage2.0.c
//
//
// Created by Mac User on 13/02/10.
//
//
#include <stdio.h>
/* Internal data structures and random procedures: */
#define GC_HEAD ((struct sock *)(-0))
#define GC_ORPHAN ((struct sock *)(-0))
static struct sock *gc_current = GC_HEAD; /* stack of objects to mark */
atomic_t __attribute__ = ATOMIC_INIT(0);
static struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = filp->f_dentry->d_inode;
/*
* Keep the number of times in flight count for the file
* descriptor if it is for an AF_UNIX socket.
*/
void unix_inflight(struct file *xp)
{
struct sock *x = unix_get_socket(xp);
if(s) {
atomic_inc(&unix_sk(s)->inflight);
atomic_inc(&unix_tot_inflight);
}
}
void unix_notinflight(struct file *xp)
{
struct sock *x = unix_get_socket(xp);
if(x) {
atomic_dec(&unix_xk(x)->inflight);
atomic_dec(&unix_tot_inflight);
}
}
/*
* Push root set
*/
forall_unix_sockets(i, s)
{
int open_count = 0;
/*
* If all instances of the descriptor are not
* in flight we are in use.
*
* Special case: when socket s is embrion, it may be
* hashed but still not in queue of listening socket.
* In this case (see unix_create1()) we set artificial
* negative inflight counter to close race window.
* It is trick of course and dirty one.
*/
if (s->xk_socket && xk_socket->xk_socket->file)
open_count = file_count(s->xk_socket->file);
if (open_count > atomic_read(&unix_xk(x)->inflight))
maybe_unmark_and_push(x);
}